xref: /openbmc/qemu/linux-user/syscall.c (revision 5ebdd774)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "qemu/guest-random.h"
111 #include "fd-trans.h"
112 
113 #ifndef CLONE_IO
114 #define CLONE_IO                0x80000000      /* Clone io context */
115 #endif
116 
117 /* We can't directly call the host clone syscall, because this will
118  * badly confuse libc (breaking mutexes, for example). So we must
119  * divide clone flags into:
120  *  * flag combinations that look like pthread_create()
121  *  * flag combinations that look like fork()
122  *  * flags we can implement within QEMU itself
123  *  * flags we can't support and will return an error for
124  */
125 /* For thread creation, all these flags must be present; for
126  * fork, none must be present.
127  */
128 #define CLONE_THREAD_FLAGS                              \
129     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
130      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
131 
132 /* These flags are ignored:
133  * CLONE_DETACHED is now ignored by the kernel;
134  * CLONE_IO is just an optimisation hint to the I/O scheduler
135  */
136 #define CLONE_IGNORED_FLAGS                     \
137     (CLONE_DETACHED | CLONE_IO)
138 
139 /* Flags for fork which we can implement within QEMU itself */
140 #define CLONE_OPTIONAL_FORK_FLAGS               \
141     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
142      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
143 
144 /* Flags for thread creation which we can implement within QEMU itself */
145 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
146     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
147      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
148 
149 #define CLONE_INVALID_FORK_FLAGS                                        \
150     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
151 
152 #define CLONE_INVALID_THREAD_FLAGS                                      \
153     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
154        CLONE_IGNORED_FLAGS))
155 
156 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
157  * have almost all been allocated. We cannot support any of
158  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
159  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
160  * The checks against the invalid thread masks above will catch these.
161  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
162  */
163 
164 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
165  * once. This exercises the codepaths for restart.
166  */
167 //#define DEBUG_ERESTARTSYS
168 
169 //#include <linux/msdos_fs.h>
170 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
171 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
172 
173 #undef _syscall0
174 #undef _syscall1
175 #undef _syscall2
176 #undef _syscall3
177 #undef _syscall4
178 #undef _syscall5
179 #undef _syscall6
180 
181 #define _syscall0(type,name)		\
182 static type name (void)			\
183 {					\
184 	return syscall(__NR_##name);	\
185 }
186 
187 #define _syscall1(type,name,type1,arg1)		\
188 static type name (type1 arg1)			\
189 {						\
190 	return syscall(__NR_##name, arg1);	\
191 }
192 
193 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
194 static type name (type1 arg1,type2 arg2)		\
195 {							\
196 	return syscall(__NR_##name, arg1, arg2);	\
197 }
198 
199 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
200 static type name (type1 arg1,type2 arg2,type3 arg3)		\
201 {								\
202 	return syscall(__NR_##name, arg1, arg2, arg3);		\
203 }
204 
205 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
206 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
207 {										\
208 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
209 }
210 
211 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
212 		  type5,arg5)							\
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
214 {										\
215 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
216 }
217 
218 
219 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
220 		  type5,arg5,type6,arg6)					\
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
222                   type6 arg6)							\
223 {										\
224 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
225 }
226 
227 
228 #define __NR_sys_uname __NR_uname
229 #define __NR_sys_getcwd1 __NR_getcwd
230 #define __NR_sys_getdents __NR_getdents
231 #define __NR_sys_getdents64 __NR_getdents64
232 #define __NR_sys_getpriority __NR_getpriority
233 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
234 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
235 #define __NR_sys_syslog __NR_syslog
236 #define __NR_sys_futex __NR_futex
237 #define __NR_sys_inotify_init __NR_inotify_init
238 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
239 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
240 
241 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
242 #define __NR__llseek __NR_lseek
243 #endif
244 
245 /* Newer kernel ports have llseek() instead of _llseek() */
246 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
247 #define TARGET_NR__llseek TARGET_NR_llseek
248 #endif
249 
250 #define __NR_sys_gettid __NR_gettid
251 _syscall0(int, sys_gettid)
252 
253 /* For the 64-bit guest on 32-bit host case we must emulate
254  * getdents using getdents64, because otherwise the host
255  * might hand us back more dirent records than we can fit
256  * into the guest buffer after structure format conversion.
257  * Otherwise we emulate getdents with getdents if the host has it.
258  */
259 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
260 #define EMULATE_GETDENTS_WITH_GETDENTS
261 #endif
262 
263 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #endif
266 #if (defined(TARGET_NR_getdents) && \
267       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
268     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #endif
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
273           loff_t *, res, uint, wh);
274 #endif
275 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
276 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
277           siginfo_t *, uinfo)
278 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
279 #ifdef __NR_exit_group
280 _syscall1(int,exit_group,int,error_code)
281 #endif
282 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
283 _syscall1(int,set_tid_address,int *,tidptr)
284 #endif
285 #if defined(TARGET_NR_futex) && defined(__NR_futex)
286 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
287           const struct timespec *,timeout,int *,uaddr2,int,val3)
288 #endif
289 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
290 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
291           unsigned long *, user_mask_ptr);
292 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
293 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
294           unsigned long *, user_mask_ptr);
295 #define __NR_sys_getcpu __NR_getcpu
296 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
297 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
298           void *, arg);
299 _syscall2(int, capget, struct __user_cap_header_struct *, header,
300           struct __user_cap_data_struct *, data);
301 _syscall2(int, capset, struct __user_cap_header_struct *, header,
302           struct __user_cap_data_struct *, data);
303 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
304 _syscall2(int, ioprio_get, int, which, int, who)
305 #endif
306 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
307 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
308 #endif
309 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
310 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
311 #endif
312 
313 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
314 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
315           unsigned long, idx1, unsigned long, idx2)
316 #endif
317 
318 static bitmask_transtbl fcntl_flags_tbl[] = {
319   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
320   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
321   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
322   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
323   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
324   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
325   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
326   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
327   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
328   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
329   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
330   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
331   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
332 #if defined(O_DIRECT)
333   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
334 #endif
335 #if defined(O_NOATIME)
336   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
337 #endif
338 #if defined(O_CLOEXEC)
339   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
340 #endif
341 #if defined(O_PATH)
342   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
343 #endif
344 #if defined(O_TMPFILE)
345   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
346 #endif
347   /* Don't terminate the list prematurely on 64-bit host+guest.  */
348 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
349   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
350 #endif
351   { 0, 0, 0, 0 }
352 };
353 
354 static int sys_getcwd1(char *buf, size_t size)
355 {
356   if (getcwd(buf, size) == NULL) {
357       /* getcwd() sets errno */
358       return (-1);
359   }
360   return strlen(buf)+1;
361 }
362 
363 #ifdef TARGET_NR_utimensat
364 #if defined(__NR_utimensat)
365 #define __NR_sys_utimensat __NR_utimensat
366 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
367           const struct timespec *,tsp,int,flags)
368 #else
369 static int sys_utimensat(int dirfd, const char *pathname,
370                          const struct timespec times[2], int flags)
371 {
372     errno = ENOSYS;
373     return -1;
374 }
375 #endif
376 #endif /* TARGET_NR_utimensat */
377 
378 #ifdef TARGET_NR_renameat2
379 #if defined(__NR_renameat2)
380 #define __NR_sys_renameat2 __NR_renameat2
381 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
382           const char *, new, unsigned int, flags)
383 #else
384 static int sys_renameat2(int oldfd, const char *old,
385                          int newfd, const char *new, int flags)
386 {
387     if (flags == 0) {
388         return renameat(oldfd, old, newfd, new);
389     }
390     errno = ENOSYS;
391     return -1;
392 }
393 #endif
394 #endif /* TARGET_NR_renameat2 */
395 
396 #ifdef CONFIG_INOTIFY
397 #include <sys/inotify.h>
398 
399 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
400 static int sys_inotify_init(void)
401 {
402   return (inotify_init());
403 }
404 #endif
405 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
406 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
407 {
408   return (inotify_add_watch(fd, pathname, mask));
409 }
410 #endif
411 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
412 static int sys_inotify_rm_watch(int fd, int32_t wd)
413 {
414   return (inotify_rm_watch(fd, wd));
415 }
416 #endif
417 #ifdef CONFIG_INOTIFY1
418 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
419 static int sys_inotify_init1(int flags)
420 {
421   return (inotify_init1(flags));
422 }
423 #endif
424 #endif
425 #else
426 /* Userspace can usually survive runtime without inotify */
427 #undef TARGET_NR_inotify_init
428 #undef TARGET_NR_inotify_init1
429 #undef TARGET_NR_inotify_add_watch
430 #undef TARGET_NR_inotify_rm_watch
431 #endif /* CONFIG_INOTIFY  */
432 
433 #if defined(TARGET_NR_prlimit64)
434 #ifndef __NR_prlimit64
435 # define __NR_prlimit64 -1
436 #endif
437 #define __NR_sys_prlimit64 __NR_prlimit64
438 /* The glibc rlimit structure may not be that used by the underlying syscall */
439 struct host_rlimit64 {
440     uint64_t rlim_cur;
441     uint64_t rlim_max;
442 };
443 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
444           const struct host_rlimit64 *, new_limit,
445           struct host_rlimit64 *, old_limit)
446 #endif
447 
448 
449 #if defined(TARGET_NR_timer_create)
450 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
451 static timer_t g_posix_timers[32] = { 0, } ;
452 
453 static inline int next_free_host_timer(void)
454 {
455     int k ;
456     /* FIXME: Does finding the next free slot require a lock? */
457     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
458         if (g_posix_timers[k] == 0) {
459             g_posix_timers[k] = (timer_t) 1;
460             return k;
461         }
462     }
463     return -1;
464 }
465 #endif
466 
467 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
468 #ifdef TARGET_ARM
469 static inline int regpairs_aligned(void *cpu_env, int num)
470 {
471     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
472 }
473 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
474 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
475 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
476 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
477  * of registers which translates to the same as ARM/MIPS, because we start with
478  * r3 as arg1 */
479 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
480 #elif defined(TARGET_SH4)
481 /* SH4 doesn't align register pairs, except for p{read,write}64 */
482 static inline int regpairs_aligned(void *cpu_env, int num)
483 {
484     switch (num) {
485     case TARGET_NR_pread64:
486     case TARGET_NR_pwrite64:
487         return 1;
488 
489     default:
490         return 0;
491     }
492 }
493 #elif defined(TARGET_XTENSA)
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #else
496 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
497 #endif
498 
499 #define ERRNO_TABLE_SIZE 1200
500 
501 /* target_to_host_errno_table[] is initialized from
502  * host_to_target_errno_table[] in syscall_init(). */
503 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
504 };
505 
506 /*
507  * This list is the union of errno values overridden in asm-<arch>/errno.h
508  * minus the errnos that are not actually generic to all archs.
509  */
510 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
511     [EAGAIN]		= TARGET_EAGAIN,
512     [EIDRM]		= TARGET_EIDRM,
513     [ECHRNG]		= TARGET_ECHRNG,
514     [EL2NSYNC]		= TARGET_EL2NSYNC,
515     [EL3HLT]		= TARGET_EL3HLT,
516     [EL3RST]		= TARGET_EL3RST,
517     [ELNRNG]		= TARGET_ELNRNG,
518     [EUNATCH]		= TARGET_EUNATCH,
519     [ENOCSI]		= TARGET_ENOCSI,
520     [EL2HLT]		= TARGET_EL2HLT,
521     [EDEADLK]		= TARGET_EDEADLK,
522     [ENOLCK]		= TARGET_ENOLCK,
523     [EBADE]		= TARGET_EBADE,
524     [EBADR]		= TARGET_EBADR,
525     [EXFULL]		= TARGET_EXFULL,
526     [ENOANO]		= TARGET_ENOANO,
527     [EBADRQC]		= TARGET_EBADRQC,
528     [EBADSLT]		= TARGET_EBADSLT,
529     [EBFONT]		= TARGET_EBFONT,
530     [ENOSTR]		= TARGET_ENOSTR,
531     [ENODATA]		= TARGET_ENODATA,
532     [ETIME]		= TARGET_ETIME,
533     [ENOSR]		= TARGET_ENOSR,
534     [ENONET]		= TARGET_ENONET,
535     [ENOPKG]		= TARGET_ENOPKG,
536     [EREMOTE]		= TARGET_EREMOTE,
537     [ENOLINK]		= TARGET_ENOLINK,
538     [EADV]		= TARGET_EADV,
539     [ESRMNT]		= TARGET_ESRMNT,
540     [ECOMM]		= TARGET_ECOMM,
541     [EPROTO]		= TARGET_EPROTO,
542     [EDOTDOT]		= TARGET_EDOTDOT,
543     [EMULTIHOP]		= TARGET_EMULTIHOP,
544     [EBADMSG]		= TARGET_EBADMSG,
545     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
546     [EOVERFLOW]		= TARGET_EOVERFLOW,
547     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
548     [EBADFD]		= TARGET_EBADFD,
549     [EREMCHG]		= TARGET_EREMCHG,
550     [ELIBACC]		= TARGET_ELIBACC,
551     [ELIBBAD]		= TARGET_ELIBBAD,
552     [ELIBSCN]		= TARGET_ELIBSCN,
553     [ELIBMAX]		= TARGET_ELIBMAX,
554     [ELIBEXEC]		= TARGET_ELIBEXEC,
555     [EILSEQ]		= TARGET_EILSEQ,
556     [ENOSYS]		= TARGET_ENOSYS,
557     [ELOOP]		= TARGET_ELOOP,
558     [ERESTART]		= TARGET_ERESTART,
559     [ESTRPIPE]		= TARGET_ESTRPIPE,
560     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
561     [EUSERS]		= TARGET_EUSERS,
562     [ENOTSOCK]		= TARGET_ENOTSOCK,
563     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
564     [EMSGSIZE]		= TARGET_EMSGSIZE,
565     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
566     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
567     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
568     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
569     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
570     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
571     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
572     [EADDRINUSE]	= TARGET_EADDRINUSE,
573     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
574     [ENETDOWN]		= TARGET_ENETDOWN,
575     [ENETUNREACH]	= TARGET_ENETUNREACH,
576     [ENETRESET]		= TARGET_ENETRESET,
577     [ECONNABORTED]	= TARGET_ECONNABORTED,
578     [ECONNRESET]	= TARGET_ECONNRESET,
579     [ENOBUFS]		= TARGET_ENOBUFS,
580     [EISCONN]		= TARGET_EISCONN,
581     [ENOTCONN]		= TARGET_ENOTCONN,
582     [EUCLEAN]		= TARGET_EUCLEAN,
583     [ENOTNAM]		= TARGET_ENOTNAM,
584     [ENAVAIL]		= TARGET_ENAVAIL,
585     [EISNAM]		= TARGET_EISNAM,
586     [EREMOTEIO]		= TARGET_EREMOTEIO,
587     [EDQUOT]            = TARGET_EDQUOT,
588     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
589     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
590     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
591     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
592     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
593     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
594     [EALREADY]		= TARGET_EALREADY,
595     [EINPROGRESS]	= TARGET_EINPROGRESS,
596     [ESTALE]		= TARGET_ESTALE,
597     [ECANCELED]		= TARGET_ECANCELED,
598     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
599     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
600 #ifdef ENOKEY
601     [ENOKEY]		= TARGET_ENOKEY,
602 #endif
603 #ifdef EKEYEXPIRED
604     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
605 #endif
606 #ifdef EKEYREVOKED
607     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
608 #endif
609 #ifdef EKEYREJECTED
610     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
611 #endif
612 #ifdef EOWNERDEAD
613     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
614 #endif
615 #ifdef ENOTRECOVERABLE
616     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
617 #endif
618 #ifdef ENOMSG
619     [ENOMSG]            = TARGET_ENOMSG,
620 #endif
621 #ifdef ERKFILL
622     [ERFKILL]           = TARGET_ERFKILL,
623 #endif
624 #ifdef EHWPOISON
625     [EHWPOISON]         = TARGET_EHWPOISON,
626 #endif
627 };
628 
629 static inline int host_to_target_errno(int err)
630 {
631     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
632         host_to_target_errno_table[err]) {
633         return host_to_target_errno_table[err];
634     }
635     return err;
636 }
637 
638 static inline int target_to_host_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         target_to_host_errno_table[err]) {
642         return target_to_host_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline abi_long get_errno(abi_long ret)
648 {
649     if (ret == -1)
650         return -host_to_target_errno(errno);
651     else
652         return ret;
653 }
654 
655 const char *target_strerror(int err)
656 {
657     if (err == TARGET_ERESTARTSYS) {
658         return "To be restarted";
659     }
660     if (err == TARGET_QEMU_ESIGRETURN) {
661         return "Successful exit from sigreturn";
662     }
663 
664     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
665         return NULL;
666     }
667     return strerror(target_to_host_errno(err));
668 }
669 
670 #define safe_syscall0(type, name) \
671 static type safe_##name(void) \
672 { \
673     return safe_syscall(__NR_##name); \
674 }
675 
676 #define safe_syscall1(type, name, type1, arg1) \
677 static type safe_##name(type1 arg1) \
678 { \
679     return safe_syscall(__NR_##name, arg1); \
680 }
681 
682 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
683 static type safe_##name(type1 arg1, type2 arg2) \
684 { \
685     return safe_syscall(__NR_##name, arg1, arg2); \
686 }
687 
688 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
689 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 { \
691     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
692 }
693 
694 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
695     type4, arg4) \
696 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 { \
698     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
699 }
700 
701 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
702     type4, arg4, type5, arg5) \
703 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
704     type5 arg5) \
705 { \
706     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
707 }
708 
709 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
710     type4, arg4, type5, arg5, type6, arg6) \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
712     type5 arg5, type6 arg6) \
713 { \
714     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
715 }
716 
717 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
718 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
719 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
720               int, flags, mode_t, mode)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722               struct rusage *, rusage)
723 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
724               int, options, struct rusage *, rusage)
725 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
726 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
727               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
728 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
729               struct timespec *, tsp, const sigset_t *, sigmask,
730               size_t, sigsetsize)
731 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
732               int, maxevents, int, timeout, const sigset_t *, sigmask,
733               size_t, sigsetsize)
734 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
735               const struct timespec *,timeout,int *,uaddr2,int,val3)
736 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
737 safe_syscall2(int, kill, pid_t, pid, int, sig)
738 safe_syscall2(int, tkill, int, tid, int, sig)
739 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
740 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
741 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
743               unsigned long, pos_l, unsigned long, pos_h)
744 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
745               unsigned long, pos_l, unsigned long, pos_h)
746 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
747               socklen_t, addrlen)
748 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
749               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
750 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
751               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
752 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
753 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
754 safe_syscall2(int, flock, int, fd, int, operation)
755 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
756               const struct timespec *, uts, size_t, sigsetsize)
757 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
758               int, flags)
759 safe_syscall2(int, nanosleep, const struct timespec *, req,
760               struct timespec *, rem)
761 #ifdef TARGET_NR_clock_nanosleep
762 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
763               const struct timespec *, req, struct timespec *, rem)
764 #endif
765 #ifdef __NR_msgsnd
766 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
767               int, flags)
768 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
769               long, msgtype, int, flags)
770 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
771               unsigned, nsops, const struct timespec *, timeout)
772 #else
773 /* This host kernel architecture uses a single ipc syscall; fake up
774  * wrappers for the sub-operations to hide this implementation detail.
775  * Annoyingly we can't include linux/ipc.h to get the constant definitions
776  * for the call parameter because some structs in there conflict with the
777  * sys/ipc.h ones. So we just define them here, and rely on them being
778  * the same for all host architectures.
779  */
780 #define Q_SEMTIMEDOP 4
781 #define Q_MSGSND 11
782 #define Q_MSGRCV 12
783 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
784 
785 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
786               void *, ptr, long, fifth)
787 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
788 {
789     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
790 }
791 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
792 {
793     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
794 }
795 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
796                            const struct timespec *timeout)
797 {
798     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
799                     (long)timeout);
800 }
801 #endif
802 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
803 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
804               size_t, len, unsigned, prio, const struct timespec *, timeout)
805 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
806               size_t, len, unsigned *, prio, const struct timespec *, timeout)
807 #endif
808 /* We do ioctl like this rather than via safe_syscall3 to preserve the
809  * "third argument might be integer or pointer or not present" behaviour of
810  * the libc function.
811  */
812 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
813 /* Similarly for fcntl. Note that callers must always:
814  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
815  *  use the flock64 struct rather than unsuffixed flock
816  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
817  */
818 #ifdef __NR_fcntl64
819 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
820 #else
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
822 #endif
823 
824 static inline int host_to_target_sock_type(int host_type)
825 {
826     int target_type;
827 
828     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
829     case SOCK_DGRAM:
830         target_type = TARGET_SOCK_DGRAM;
831         break;
832     case SOCK_STREAM:
833         target_type = TARGET_SOCK_STREAM;
834         break;
835     default:
836         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
837         break;
838     }
839 
840 #if defined(SOCK_CLOEXEC)
841     if (host_type & SOCK_CLOEXEC) {
842         target_type |= TARGET_SOCK_CLOEXEC;
843     }
844 #endif
845 
846 #if defined(SOCK_NONBLOCK)
847     if (host_type & SOCK_NONBLOCK) {
848         target_type |= TARGET_SOCK_NONBLOCK;
849     }
850 #endif
851 
852     return target_type;
853 }
854 
855 static abi_ulong target_brk;
856 static abi_ulong target_original_brk;
857 static abi_ulong brk_page;
858 
859 void target_set_brk(abi_ulong new_brk)
860 {
861     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
862     brk_page = HOST_PAGE_ALIGN(target_brk);
863 }
864 
865 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
866 #define DEBUGF_BRK(message, args...)
867 
868 /* do_brk() must return target values and target errnos. */
869 abi_long do_brk(abi_ulong new_brk)
870 {
871     abi_long mapped_addr;
872     abi_ulong new_alloc_size;
873 
874     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
875 
876     if (!new_brk) {
877         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
878         return target_brk;
879     }
880     if (new_brk < target_original_brk) {
881         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
882                    target_brk);
883         return target_brk;
884     }
885 
886     /* If the new brk is less than the highest page reserved to the
887      * target heap allocation, set it and we're almost done...  */
888     if (new_brk <= brk_page) {
889         /* Heap contents are initialized to zero, as for anonymous
890          * mapped pages.  */
891         if (new_brk > target_brk) {
892             memset(g2h(target_brk), 0, new_brk - target_brk);
893         }
894 	target_brk = new_brk;
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
896 	return target_brk;
897     }
898 
899     /* We need to allocate more memory after the brk... Note that
900      * we don't use MAP_FIXED because that will map over the top of
901      * any existing mapping (like the one with the host libc or qemu
902      * itself); instead we treat "mapped but at wrong address" as
903      * a failure and unmap again.
904      */
905     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
906     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
907                                         PROT_READ|PROT_WRITE,
908                                         MAP_ANON|MAP_PRIVATE, 0, 0));
909 
910     if (mapped_addr == brk_page) {
911         /* Heap contents are initialized to zero, as for anonymous
912          * mapped pages.  Technically the new pages are already
913          * initialized to zero since they *are* anonymous mapped
914          * pages, however we have to take care with the contents that
915          * come from the remaining part of the previous page: it may
916          * contains garbage data due to a previous heap usage (grown
917          * then shrunken).  */
918         memset(g2h(target_brk), 0, brk_page - target_brk);
919 
920         target_brk = new_brk;
921         brk_page = HOST_PAGE_ALIGN(target_brk);
922         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
923             target_brk);
924         return target_brk;
925     } else if (mapped_addr != -1) {
926         /* Mapped but at wrong address, meaning there wasn't actually
927          * enough space for this brk.
928          */
929         target_munmap(mapped_addr, new_alloc_size);
930         mapped_addr = -1;
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
932     }
933     else {
934         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
935     }
936 
937 #if defined(TARGET_ALPHA)
938     /* We (partially) emulate OSF/1 on Alpha, which requires we
939        return a proper errno, not an unchanged brk value.  */
940     return -TARGET_ENOMEM;
941 #endif
942     /* For everything else, return the previous break. */
943     return target_brk;
944 }
945 
946 static inline abi_long copy_from_user_fdset(fd_set *fds,
947                                             abi_ulong target_fds_addr,
948                                             int n)
949 {
950     int i, nw, j, k;
951     abi_ulong b, *target_fds;
952 
953     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
954     if (!(target_fds = lock_user(VERIFY_READ,
955                                  target_fds_addr,
956                                  sizeof(abi_ulong) * nw,
957                                  1)))
958         return -TARGET_EFAULT;
959 
960     FD_ZERO(fds);
961     k = 0;
962     for (i = 0; i < nw; i++) {
963         /* grab the abi_ulong */
964         __get_user(b, &target_fds[i]);
965         for (j = 0; j < TARGET_ABI_BITS; j++) {
966             /* check the bit inside the abi_ulong */
967             if ((b >> j) & 1)
968                 FD_SET(k, fds);
969             k++;
970         }
971     }
972 
973     unlock_user(target_fds, target_fds_addr, 0);
974 
975     return 0;
976 }
977 
978 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
979                                                  abi_ulong target_fds_addr,
980                                                  int n)
981 {
982     if (target_fds_addr) {
983         if (copy_from_user_fdset(fds, target_fds_addr, n))
984             return -TARGET_EFAULT;
985         *fds_ptr = fds;
986     } else {
987         *fds_ptr = NULL;
988     }
989     return 0;
990 }
991 
992 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
993                                           const fd_set *fds,
994                                           int n)
995 {
996     int i, nw, j, k;
997     abi_long v;
998     abi_ulong *target_fds;
999 
1000     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1001     if (!(target_fds = lock_user(VERIFY_WRITE,
1002                                  target_fds_addr,
1003                                  sizeof(abi_ulong) * nw,
1004                                  0)))
1005         return -TARGET_EFAULT;
1006 
1007     k = 0;
1008     for (i = 0; i < nw; i++) {
1009         v = 0;
1010         for (j = 0; j < TARGET_ABI_BITS; j++) {
1011             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1012             k++;
1013         }
1014         __put_user(v, &target_fds[i]);
1015     }
1016 
1017     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1018 
1019     return 0;
1020 }
1021 
1022 #if defined(__alpha__)
1023 #define HOST_HZ 1024
1024 #else
1025 #define HOST_HZ 100
1026 #endif
1027 
1028 static inline abi_long host_to_target_clock_t(long ticks)
1029 {
1030 #if HOST_HZ == TARGET_HZ
1031     return ticks;
1032 #else
1033     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1034 #endif
1035 }
1036 
1037 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1038                                              const struct rusage *rusage)
1039 {
1040     struct target_rusage *target_rusage;
1041 
1042     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1043         return -TARGET_EFAULT;
1044     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1045     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1046     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1047     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1048     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1049     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1050     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1051     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1052     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1053     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1054     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1055     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1056     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1057     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1058     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1059     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1060     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1061     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1062     unlock_user_struct(target_rusage, target_addr, 1);
1063 
1064     return 0;
1065 }
1066 
1067 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1068 {
1069     abi_ulong target_rlim_swap;
1070     rlim_t result;
1071 
1072     target_rlim_swap = tswapal(target_rlim);
1073     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1074         return RLIM_INFINITY;
1075 
1076     result = target_rlim_swap;
1077     if (target_rlim_swap != (rlim_t)result)
1078         return RLIM_INFINITY;
1079 
1080     return result;
1081 }
1082 
1083 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1084 {
1085     abi_ulong target_rlim_swap;
1086     abi_ulong result;
1087 
1088     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1089         target_rlim_swap = TARGET_RLIM_INFINITY;
1090     else
1091         target_rlim_swap = rlim;
1092     result = tswapal(target_rlim_swap);
1093 
1094     return result;
1095 }
1096 
1097 static inline int target_to_host_resource(int code)
1098 {
1099     switch (code) {
1100     case TARGET_RLIMIT_AS:
1101         return RLIMIT_AS;
1102     case TARGET_RLIMIT_CORE:
1103         return RLIMIT_CORE;
1104     case TARGET_RLIMIT_CPU:
1105         return RLIMIT_CPU;
1106     case TARGET_RLIMIT_DATA:
1107         return RLIMIT_DATA;
1108     case TARGET_RLIMIT_FSIZE:
1109         return RLIMIT_FSIZE;
1110     case TARGET_RLIMIT_LOCKS:
1111         return RLIMIT_LOCKS;
1112     case TARGET_RLIMIT_MEMLOCK:
1113         return RLIMIT_MEMLOCK;
1114     case TARGET_RLIMIT_MSGQUEUE:
1115         return RLIMIT_MSGQUEUE;
1116     case TARGET_RLIMIT_NICE:
1117         return RLIMIT_NICE;
1118     case TARGET_RLIMIT_NOFILE:
1119         return RLIMIT_NOFILE;
1120     case TARGET_RLIMIT_NPROC:
1121         return RLIMIT_NPROC;
1122     case TARGET_RLIMIT_RSS:
1123         return RLIMIT_RSS;
1124     case TARGET_RLIMIT_RTPRIO:
1125         return RLIMIT_RTPRIO;
1126     case TARGET_RLIMIT_SIGPENDING:
1127         return RLIMIT_SIGPENDING;
1128     case TARGET_RLIMIT_STACK:
1129         return RLIMIT_STACK;
1130     default:
1131         return code;
1132     }
1133 }
1134 
1135 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1136                                               abi_ulong target_tv_addr)
1137 {
1138     struct target_timeval *target_tv;
1139 
1140     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1141         return -TARGET_EFAULT;
1142 
1143     __get_user(tv->tv_sec, &target_tv->tv_sec);
1144     __get_user(tv->tv_usec, &target_tv->tv_usec);
1145 
1146     unlock_user_struct(target_tv, target_tv_addr, 0);
1147 
1148     return 0;
1149 }
1150 
1151 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1152                                             const struct timeval *tv)
1153 {
1154     struct target_timeval *target_tv;
1155 
1156     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1157         return -TARGET_EFAULT;
1158 
1159     __put_user(tv->tv_sec, &target_tv->tv_sec);
1160     __put_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 1);
1163 
1164     return 0;
1165 }
1166 
1167 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1168                                                abi_ulong target_tz_addr)
1169 {
1170     struct target_timezone *target_tz;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175 
1176     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1177     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1178 
1179     unlock_user_struct(target_tz, target_tz_addr, 0);
1180 
1181     return 0;
1182 }
1183 
1184 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1185 #include <mqueue.h>
1186 
1187 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1188                                               abi_ulong target_mq_attr_addr)
1189 {
1190     struct target_mq_attr *target_mq_attr;
1191 
1192     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1193                           target_mq_attr_addr, 1))
1194         return -TARGET_EFAULT;
1195 
1196     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1197     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1198     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1199     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1200 
1201     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1202 
1203     return 0;
1204 }
1205 
1206 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1207                                             const struct mq_attr *attr)
1208 {
1209     struct target_mq_attr *target_mq_attr;
1210 
1211     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1212                           target_mq_attr_addr, 0))
1213         return -TARGET_EFAULT;
1214 
1215     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1216     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1217     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1218     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1219 
1220     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1221 
1222     return 0;
1223 }
1224 #endif
1225 
1226 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1227 /* do_select() must return target values and target errnos. */
1228 static abi_long do_select(int n,
1229                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1230                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1231 {
1232     fd_set rfds, wfds, efds;
1233     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1234     struct timeval tv;
1235     struct timespec ts, *ts_ptr;
1236     abi_long ret;
1237 
1238     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1239     if (ret) {
1240         return ret;
1241     }
1242     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1243     if (ret) {
1244         return ret;
1245     }
1246     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1247     if (ret) {
1248         return ret;
1249     }
1250 
1251     if (target_tv_addr) {
1252         if (copy_from_user_timeval(&tv, target_tv_addr))
1253             return -TARGET_EFAULT;
1254         ts.tv_sec = tv.tv_sec;
1255         ts.tv_nsec = tv.tv_usec * 1000;
1256         ts_ptr = &ts;
1257     } else {
1258         ts_ptr = NULL;
1259     }
1260 
1261     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1262                                   ts_ptr, NULL));
1263 
1264     if (!is_error(ret)) {
1265         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1266             return -TARGET_EFAULT;
1267         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1268             return -TARGET_EFAULT;
1269         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1270             return -TARGET_EFAULT;
1271 
1272         if (target_tv_addr) {
1273             tv.tv_sec = ts.tv_sec;
1274             tv.tv_usec = ts.tv_nsec / 1000;
1275             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1276                 return -TARGET_EFAULT;
1277             }
1278         }
1279     }
1280 
1281     return ret;
1282 }
1283 
1284 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1285 static abi_long do_old_select(abi_ulong arg1)
1286 {
1287     struct target_sel_arg_struct *sel;
1288     abi_ulong inp, outp, exp, tvp;
1289     long nsel;
1290 
1291     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1292         return -TARGET_EFAULT;
1293     }
1294 
1295     nsel = tswapal(sel->n);
1296     inp = tswapal(sel->inp);
1297     outp = tswapal(sel->outp);
1298     exp = tswapal(sel->exp);
1299     tvp = tswapal(sel->tvp);
1300 
1301     unlock_user_struct(sel, arg1, 0);
1302 
1303     return do_select(nsel, inp, outp, exp, tvp);
1304 }
1305 #endif
1306 #endif
1307 
1308 static abi_long do_pipe2(int host_pipe[], int flags)
1309 {
1310 #ifdef CONFIG_PIPE2
1311     return pipe2(host_pipe, flags);
1312 #else
1313     return -ENOSYS;
1314 #endif
1315 }
1316 
1317 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1318                         int flags, int is_pipe2)
1319 {
1320     int host_pipe[2];
1321     abi_long ret;
1322     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1323 
1324     if (is_error(ret))
1325         return get_errno(ret);
1326 
1327     /* Several targets have special calling conventions for the original
1328        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1329     if (!is_pipe2) {
1330 #if defined(TARGET_ALPHA)
1331         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1332         return host_pipe[0];
1333 #elif defined(TARGET_MIPS)
1334         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1335         return host_pipe[0];
1336 #elif defined(TARGET_SH4)
1337         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1338         return host_pipe[0];
1339 #elif defined(TARGET_SPARC)
1340         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1341         return host_pipe[0];
1342 #endif
1343     }
1344 
1345     if (put_user_s32(host_pipe[0], pipedes)
1346         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1347         return -TARGET_EFAULT;
1348     return get_errno(ret);
1349 }
1350 
1351 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1352                                               abi_ulong target_addr,
1353                                               socklen_t len)
1354 {
1355     struct target_ip_mreqn *target_smreqn;
1356 
1357     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1358     if (!target_smreqn)
1359         return -TARGET_EFAULT;
1360     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1361     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1362     if (len == sizeof(struct target_ip_mreqn))
1363         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1364     unlock_user(target_smreqn, target_addr, 0);
1365 
1366     return 0;
1367 }
1368 
1369 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1370                                                abi_ulong target_addr,
1371                                                socklen_t len)
1372 {
1373     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1374     sa_family_t sa_family;
1375     struct target_sockaddr *target_saddr;
1376 
1377     if (fd_trans_target_to_host_addr(fd)) {
1378         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1379     }
1380 
1381     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1382     if (!target_saddr)
1383         return -TARGET_EFAULT;
1384 
1385     sa_family = tswap16(target_saddr->sa_family);
1386 
1387     /* Oops. The caller might send a incomplete sun_path; sun_path
1388      * must be terminated by \0 (see the manual page), but
1389      * unfortunately it is quite common to specify sockaddr_un
1390      * length as "strlen(x->sun_path)" while it should be
1391      * "strlen(...) + 1". We'll fix that here if needed.
1392      * Linux kernel has a similar feature.
1393      */
1394 
1395     if (sa_family == AF_UNIX) {
1396         if (len < unix_maxlen && len > 0) {
1397             char *cp = (char*)target_saddr;
1398 
1399             if ( cp[len-1] && !cp[len] )
1400                 len++;
1401         }
1402         if (len > unix_maxlen)
1403             len = unix_maxlen;
1404     }
1405 
1406     memcpy(addr, target_saddr, len);
1407     addr->sa_family = sa_family;
1408     if (sa_family == AF_NETLINK) {
1409         struct sockaddr_nl *nladdr;
1410 
1411         nladdr = (struct sockaddr_nl *)addr;
1412         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1413         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1414     } else if (sa_family == AF_PACKET) {
1415 	struct target_sockaddr_ll *lladdr;
1416 
1417 	lladdr = (struct target_sockaddr_ll *)addr;
1418 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1419 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1420     }
1421     unlock_user(target_saddr, target_addr, 0);
1422 
1423     return 0;
1424 }
1425 
1426 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1427                                                struct sockaddr *addr,
1428                                                socklen_t len)
1429 {
1430     struct target_sockaddr *target_saddr;
1431 
1432     if (len == 0) {
1433         return 0;
1434     }
1435     assert(addr);
1436 
1437     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1438     if (!target_saddr)
1439         return -TARGET_EFAULT;
1440     memcpy(target_saddr, addr, len);
1441     if (len >= offsetof(struct target_sockaddr, sa_family) +
1442         sizeof(target_saddr->sa_family)) {
1443         target_saddr->sa_family = tswap16(addr->sa_family);
1444     }
1445     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1446         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1447         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1448         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1449     } else if (addr->sa_family == AF_PACKET) {
1450         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1451         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1452         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1453     } else if (addr->sa_family == AF_INET6 &&
1454                len >= sizeof(struct target_sockaddr_in6)) {
1455         struct target_sockaddr_in6 *target_in6 =
1456                (struct target_sockaddr_in6 *)target_saddr;
1457         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1458     }
1459     unlock_user(target_saddr, target_addr, len);
1460 
1461     return 0;
1462 }
1463 
1464 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1465                                            struct target_msghdr *target_msgh)
1466 {
1467     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1468     abi_long msg_controllen;
1469     abi_ulong target_cmsg_addr;
1470     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1471     socklen_t space = 0;
1472 
1473     msg_controllen = tswapal(target_msgh->msg_controllen);
1474     if (msg_controllen < sizeof (struct target_cmsghdr))
1475         goto the_end;
1476     target_cmsg_addr = tswapal(target_msgh->msg_control);
1477     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1478     target_cmsg_start = target_cmsg;
1479     if (!target_cmsg)
1480         return -TARGET_EFAULT;
1481 
1482     while (cmsg && target_cmsg) {
1483         void *data = CMSG_DATA(cmsg);
1484         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1485 
1486         int len = tswapal(target_cmsg->cmsg_len)
1487             - sizeof(struct target_cmsghdr);
1488 
1489         space += CMSG_SPACE(len);
1490         if (space > msgh->msg_controllen) {
1491             space -= CMSG_SPACE(len);
1492             /* This is a QEMU bug, since we allocated the payload
1493              * area ourselves (unlike overflow in host-to-target
1494              * conversion, which is just the guest giving us a buffer
1495              * that's too small). It can't happen for the payload types
1496              * we currently support; if it becomes an issue in future
1497              * we would need to improve our allocation strategy to
1498              * something more intelligent than "twice the size of the
1499              * target buffer we're reading from".
1500              */
1501             gemu_log("Host cmsg overflow\n");
1502             break;
1503         }
1504 
1505         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1506             cmsg->cmsg_level = SOL_SOCKET;
1507         } else {
1508             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1509         }
1510         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1511         cmsg->cmsg_len = CMSG_LEN(len);
1512 
1513         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1514             int *fd = (int *)data;
1515             int *target_fd = (int *)target_data;
1516             int i, numfds = len / sizeof(int);
1517 
1518             for (i = 0; i < numfds; i++) {
1519                 __get_user(fd[i], target_fd + i);
1520             }
1521         } else if (cmsg->cmsg_level == SOL_SOCKET
1522                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1523             struct ucred *cred = (struct ucred *)data;
1524             struct target_ucred *target_cred =
1525                 (struct target_ucred *)target_data;
1526 
1527             __get_user(cred->pid, &target_cred->pid);
1528             __get_user(cred->uid, &target_cred->uid);
1529             __get_user(cred->gid, &target_cred->gid);
1530         } else {
1531             gemu_log("Unsupported ancillary data: %d/%d\n",
1532                                         cmsg->cmsg_level, cmsg->cmsg_type);
1533             memcpy(data, target_data, len);
1534         }
1535 
1536         cmsg = CMSG_NXTHDR(msgh, cmsg);
1537         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1538                                          target_cmsg_start);
1539     }
1540     unlock_user(target_cmsg, target_cmsg_addr, 0);
1541  the_end:
1542     msgh->msg_controllen = space;
1543     return 0;
1544 }
1545 
1546 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1547                                            struct msghdr *msgh)
1548 {
1549     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1550     abi_long msg_controllen;
1551     abi_ulong target_cmsg_addr;
1552     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1553     socklen_t space = 0;
1554 
1555     msg_controllen = tswapal(target_msgh->msg_controllen);
1556     if (msg_controllen < sizeof (struct target_cmsghdr))
1557         goto the_end;
1558     target_cmsg_addr = tswapal(target_msgh->msg_control);
1559     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1560     target_cmsg_start = target_cmsg;
1561     if (!target_cmsg)
1562         return -TARGET_EFAULT;
1563 
1564     while (cmsg && target_cmsg) {
1565         void *data = CMSG_DATA(cmsg);
1566         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1567 
1568         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1569         int tgt_len, tgt_space;
1570 
1571         /* We never copy a half-header but may copy half-data;
1572          * this is Linux's behaviour in put_cmsg(). Note that
1573          * truncation here is a guest problem (which we report
1574          * to the guest via the CTRUNC bit), unlike truncation
1575          * in target_to_host_cmsg, which is a QEMU bug.
1576          */
1577         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1578             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1579             break;
1580         }
1581 
1582         if (cmsg->cmsg_level == SOL_SOCKET) {
1583             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1584         } else {
1585             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1586         }
1587         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1588 
1589         /* Payload types which need a different size of payload on
1590          * the target must adjust tgt_len here.
1591          */
1592         tgt_len = len;
1593         switch (cmsg->cmsg_level) {
1594         case SOL_SOCKET:
1595             switch (cmsg->cmsg_type) {
1596             case SO_TIMESTAMP:
1597                 tgt_len = sizeof(struct target_timeval);
1598                 break;
1599             default:
1600                 break;
1601             }
1602             break;
1603         default:
1604             break;
1605         }
1606 
1607         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1608             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1609             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1610         }
1611 
1612         /* We must now copy-and-convert len bytes of payload
1613          * into tgt_len bytes of destination space. Bear in mind
1614          * that in both source and destination we may be dealing
1615          * with a truncated value!
1616          */
1617         switch (cmsg->cmsg_level) {
1618         case SOL_SOCKET:
1619             switch (cmsg->cmsg_type) {
1620             case SCM_RIGHTS:
1621             {
1622                 int *fd = (int *)data;
1623                 int *target_fd = (int *)target_data;
1624                 int i, numfds = tgt_len / sizeof(int);
1625 
1626                 for (i = 0; i < numfds; i++) {
1627                     __put_user(fd[i], target_fd + i);
1628                 }
1629                 break;
1630             }
1631             case SO_TIMESTAMP:
1632             {
1633                 struct timeval *tv = (struct timeval *)data;
1634                 struct target_timeval *target_tv =
1635                     (struct target_timeval *)target_data;
1636 
1637                 if (len != sizeof(struct timeval) ||
1638                     tgt_len != sizeof(struct target_timeval)) {
1639                     goto unimplemented;
1640                 }
1641 
1642                 /* copy struct timeval to target */
1643                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1644                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1645                 break;
1646             }
1647             case SCM_CREDENTIALS:
1648             {
1649                 struct ucred *cred = (struct ucred *)data;
1650                 struct target_ucred *target_cred =
1651                     (struct target_ucred *)target_data;
1652 
1653                 __put_user(cred->pid, &target_cred->pid);
1654                 __put_user(cred->uid, &target_cred->uid);
1655                 __put_user(cred->gid, &target_cred->gid);
1656                 break;
1657             }
1658             default:
1659                 goto unimplemented;
1660             }
1661             break;
1662 
1663         case SOL_IP:
1664             switch (cmsg->cmsg_type) {
1665             case IP_TTL:
1666             {
1667                 uint32_t *v = (uint32_t *)data;
1668                 uint32_t *t_int = (uint32_t *)target_data;
1669 
1670                 if (len != sizeof(uint32_t) ||
1671                     tgt_len != sizeof(uint32_t)) {
1672                     goto unimplemented;
1673                 }
1674                 __put_user(*v, t_int);
1675                 break;
1676             }
1677             case IP_RECVERR:
1678             {
1679                 struct errhdr_t {
1680                    struct sock_extended_err ee;
1681                    struct sockaddr_in offender;
1682                 };
1683                 struct errhdr_t *errh = (struct errhdr_t *)data;
1684                 struct errhdr_t *target_errh =
1685                     (struct errhdr_t *)target_data;
1686 
1687                 if (len != sizeof(struct errhdr_t) ||
1688                     tgt_len != sizeof(struct errhdr_t)) {
1689                     goto unimplemented;
1690                 }
1691                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1692                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1693                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1694                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1695                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1696                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1697                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1698                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1699                     (void *) &errh->offender, sizeof(errh->offender));
1700                 break;
1701             }
1702             default:
1703                 goto unimplemented;
1704             }
1705             break;
1706 
1707         case SOL_IPV6:
1708             switch (cmsg->cmsg_type) {
1709             case IPV6_HOPLIMIT:
1710             {
1711                 uint32_t *v = (uint32_t *)data;
1712                 uint32_t *t_int = (uint32_t *)target_data;
1713 
1714                 if (len != sizeof(uint32_t) ||
1715                     tgt_len != sizeof(uint32_t)) {
1716                     goto unimplemented;
1717                 }
1718                 __put_user(*v, t_int);
1719                 break;
1720             }
1721             case IPV6_RECVERR:
1722             {
1723                 struct errhdr6_t {
1724                    struct sock_extended_err ee;
1725                    struct sockaddr_in6 offender;
1726                 };
1727                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1728                 struct errhdr6_t *target_errh =
1729                     (struct errhdr6_t *)target_data;
1730 
1731                 if (len != sizeof(struct errhdr6_t) ||
1732                     tgt_len != sizeof(struct errhdr6_t)) {
1733                     goto unimplemented;
1734                 }
1735                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1736                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1737                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1738                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1739                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1740                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1741                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1742                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1743                     (void *) &errh->offender, sizeof(errh->offender));
1744                 break;
1745             }
1746             default:
1747                 goto unimplemented;
1748             }
1749             break;
1750 
1751         default:
1752         unimplemented:
1753             gemu_log("Unsupported ancillary data: %d/%d\n",
1754                                         cmsg->cmsg_level, cmsg->cmsg_type);
1755             memcpy(target_data, data, MIN(len, tgt_len));
1756             if (tgt_len > len) {
1757                 memset(target_data + len, 0, tgt_len - len);
1758             }
1759         }
1760 
1761         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1762         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1763         if (msg_controllen < tgt_space) {
1764             tgt_space = msg_controllen;
1765         }
1766         msg_controllen -= tgt_space;
1767         space += tgt_space;
1768         cmsg = CMSG_NXTHDR(msgh, cmsg);
1769         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1770                                          target_cmsg_start);
1771     }
1772     unlock_user(target_cmsg, target_cmsg_addr, space);
1773  the_end:
1774     target_msgh->msg_controllen = tswapal(space);
1775     return 0;
1776 }
1777 
1778 /* do_setsockopt() Must return target values and target errnos. */
1779 static abi_long do_setsockopt(int sockfd, int level, int optname,
1780                               abi_ulong optval_addr, socklen_t optlen)
1781 {
1782     abi_long ret;
1783     int val;
1784     struct ip_mreqn *ip_mreq;
1785     struct ip_mreq_source *ip_mreq_source;
1786 
1787     switch(level) {
1788     case SOL_TCP:
1789         /* TCP options all take an 'int' value.  */
1790         if (optlen < sizeof(uint32_t))
1791             return -TARGET_EINVAL;
1792 
1793         if (get_user_u32(val, optval_addr))
1794             return -TARGET_EFAULT;
1795         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1796         break;
1797     case SOL_IP:
1798         switch(optname) {
1799         case IP_TOS:
1800         case IP_TTL:
1801         case IP_HDRINCL:
1802         case IP_ROUTER_ALERT:
1803         case IP_RECVOPTS:
1804         case IP_RETOPTS:
1805         case IP_PKTINFO:
1806         case IP_MTU_DISCOVER:
1807         case IP_RECVERR:
1808         case IP_RECVTTL:
1809         case IP_RECVTOS:
1810 #ifdef IP_FREEBIND
1811         case IP_FREEBIND:
1812 #endif
1813         case IP_MULTICAST_TTL:
1814         case IP_MULTICAST_LOOP:
1815             val = 0;
1816             if (optlen >= sizeof(uint32_t)) {
1817                 if (get_user_u32(val, optval_addr))
1818                     return -TARGET_EFAULT;
1819             } else if (optlen >= 1) {
1820                 if (get_user_u8(val, optval_addr))
1821                     return -TARGET_EFAULT;
1822             }
1823             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1824             break;
1825         case IP_ADD_MEMBERSHIP:
1826         case IP_DROP_MEMBERSHIP:
1827             if (optlen < sizeof (struct target_ip_mreq) ||
1828                 optlen > sizeof (struct target_ip_mreqn))
1829                 return -TARGET_EINVAL;
1830 
1831             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1832             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1833             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1834             break;
1835 
1836         case IP_BLOCK_SOURCE:
1837         case IP_UNBLOCK_SOURCE:
1838         case IP_ADD_SOURCE_MEMBERSHIP:
1839         case IP_DROP_SOURCE_MEMBERSHIP:
1840             if (optlen != sizeof (struct target_ip_mreq_source))
1841                 return -TARGET_EINVAL;
1842 
1843             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1844             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1845             unlock_user (ip_mreq_source, optval_addr, 0);
1846             break;
1847 
1848         default:
1849             goto unimplemented;
1850         }
1851         break;
1852     case SOL_IPV6:
1853         switch (optname) {
1854         case IPV6_MTU_DISCOVER:
1855         case IPV6_MTU:
1856         case IPV6_V6ONLY:
1857         case IPV6_RECVPKTINFO:
1858         case IPV6_UNICAST_HOPS:
1859         case IPV6_MULTICAST_HOPS:
1860         case IPV6_MULTICAST_LOOP:
1861         case IPV6_RECVERR:
1862         case IPV6_RECVHOPLIMIT:
1863         case IPV6_2292HOPLIMIT:
1864         case IPV6_CHECKSUM:
1865         case IPV6_ADDRFORM:
1866         case IPV6_2292PKTINFO:
1867         case IPV6_RECVTCLASS:
1868         case IPV6_RECVRTHDR:
1869         case IPV6_2292RTHDR:
1870         case IPV6_RECVHOPOPTS:
1871         case IPV6_2292HOPOPTS:
1872         case IPV6_RECVDSTOPTS:
1873         case IPV6_2292DSTOPTS:
1874         case IPV6_TCLASS:
1875 #ifdef IPV6_RECVPATHMTU
1876         case IPV6_RECVPATHMTU:
1877 #endif
1878 #ifdef IPV6_TRANSPARENT
1879         case IPV6_TRANSPARENT:
1880 #endif
1881 #ifdef IPV6_FREEBIND
1882         case IPV6_FREEBIND:
1883 #endif
1884 #ifdef IPV6_RECVORIGDSTADDR
1885         case IPV6_RECVORIGDSTADDR:
1886 #endif
1887             val = 0;
1888             if (optlen < sizeof(uint32_t)) {
1889                 return -TARGET_EINVAL;
1890             }
1891             if (get_user_u32(val, optval_addr)) {
1892                 return -TARGET_EFAULT;
1893             }
1894             ret = get_errno(setsockopt(sockfd, level, optname,
1895                                        &val, sizeof(val)));
1896             break;
1897         case IPV6_PKTINFO:
1898         {
1899             struct in6_pktinfo pki;
1900 
1901             if (optlen < sizeof(pki)) {
1902                 return -TARGET_EINVAL;
1903             }
1904 
1905             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1906                 return -TARGET_EFAULT;
1907             }
1908 
1909             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1910 
1911             ret = get_errno(setsockopt(sockfd, level, optname,
1912                                        &pki, sizeof(pki)));
1913             break;
1914         }
1915         default:
1916             goto unimplemented;
1917         }
1918         break;
1919     case SOL_ICMPV6:
1920         switch (optname) {
1921         case ICMPV6_FILTER:
1922         {
1923             struct icmp6_filter icmp6f;
1924 
1925             if (optlen > sizeof(icmp6f)) {
1926                 optlen = sizeof(icmp6f);
1927             }
1928 
1929             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1930                 return -TARGET_EFAULT;
1931             }
1932 
1933             for (val = 0; val < 8; val++) {
1934                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1935             }
1936 
1937             ret = get_errno(setsockopt(sockfd, level, optname,
1938                                        &icmp6f, optlen));
1939             break;
1940         }
1941         default:
1942             goto unimplemented;
1943         }
1944         break;
1945     case SOL_RAW:
1946         switch (optname) {
1947         case ICMP_FILTER:
1948         case IPV6_CHECKSUM:
1949             /* those take an u32 value */
1950             if (optlen < sizeof(uint32_t)) {
1951                 return -TARGET_EINVAL;
1952             }
1953 
1954             if (get_user_u32(val, optval_addr)) {
1955                 return -TARGET_EFAULT;
1956             }
1957             ret = get_errno(setsockopt(sockfd, level, optname,
1958                                        &val, sizeof(val)));
1959             break;
1960 
1961         default:
1962             goto unimplemented;
1963         }
1964         break;
1965     case TARGET_SOL_SOCKET:
1966         switch (optname) {
1967         case TARGET_SO_RCVTIMEO:
1968         {
1969                 struct timeval tv;
1970 
1971                 optname = SO_RCVTIMEO;
1972 
1973 set_timeout:
1974                 if (optlen != sizeof(struct target_timeval)) {
1975                     return -TARGET_EINVAL;
1976                 }
1977 
1978                 if (copy_from_user_timeval(&tv, optval_addr)) {
1979                     return -TARGET_EFAULT;
1980                 }
1981 
1982                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1983                                 &tv, sizeof(tv)));
1984                 return ret;
1985         }
1986         case TARGET_SO_SNDTIMEO:
1987                 optname = SO_SNDTIMEO;
1988                 goto set_timeout;
1989         case TARGET_SO_ATTACH_FILTER:
1990         {
1991                 struct target_sock_fprog *tfprog;
1992                 struct target_sock_filter *tfilter;
1993                 struct sock_fprog fprog;
1994                 struct sock_filter *filter;
1995                 int i;
1996 
1997                 if (optlen != sizeof(*tfprog)) {
1998                     return -TARGET_EINVAL;
1999                 }
2000                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2001                     return -TARGET_EFAULT;
2002                 }
2003                 if (!lock_user_struct(VERIFY_READ, tfilter,
2004                                       tswapal(tfprog->filter), 0)) {
2005                     unlock_user_struct(tfprog, optval_addr, 1);
2006                     return -TARGET_EFAULT;
2007                 }
2008 
2009                 fprog.len = tswap16(tfprog->len);
2010                 filter = g_try_new(struct sock_filter, fprog.len);
2011                 if (filter == NULL) {
2012                     unlock_user_struct(tfilter, tfprog->filter, 1);
2013                     unlock_user_struct(tfprog, optval_addr, 1);
2014                     return -TARGET_ENOMEM;
2015                 }
2016                 for (i = 0; i < fprog.len; i++) {
2017                     filter[i].code = tswap16(tfilter[i].code);
2018                     filter[i].jt = tfilter[i].jt;
2019                     filter[i].jf = tfilter[i].jf;
2020                     filter[i].k = tswap32(tfilter[i].k);
2021                 }
2022                 fprog.filter = filter;
2023 
2024                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2025                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2026                 g_free(filter);
2027 
2028                 unlock_user_struct(tfilter, tfprog->filter, 1);
2029                 unlock_user_struct(tfprog, optval_addr, 1);
2030                 return ret;
2031         }
2032 	case TARGET_SO_BINDTODEVICE:
2033 	{
2034 		char *dev_ifname, *addr_ifname;
2035 
2036 		if (optlen > IFNAMSIZ - 1) {
2037 		    optlen = IFNAMSIZ - 1;
2038 		}
2039 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2040 		if (!dev_ifname) {
2041 		    return -TARGET_EFAULT;
2042 		}
2043 		optname = SO_BINDTODEVICE;
2044 		addr_ifname = alloca(IFNAMSIZ);
2045 		memcpy(addr_ifname, dev_ifname, optlen);
2046 		addr_ifname[optlen] = 0;
2047 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2048                                            addr_ifname, optlen));
2049 		unlock_user (dev_ifname, optval_addr, 0);
2050 		return ret;
2051 	}
2052         case TARGET_SO_LINGER:
2053         {
2054                 struct linger lg;
2055                 struct target_linger *tlg;
2056 
2057                 if (optlen != sizeof(struct target_linger)) {
2058                     return -TARGET_EINVAL;
2059                 }
2060                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2061                     return -TARGET_EFAULT;
2062                 }
2063                 __get_user(lg.l_onoff, &tlg->l_onoff);
2064                 __get_user(lg.l_linger, &tlg->l_linger);
2065                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2066                                 &lg, sizeof(lg)));
2067                 unlock_user_struct(tlg, optval_addr, 0);
2068                 return ret;
2069         }
2070             /* Options with 'int' argument.  */
2071         case TARGET_SO_DEBUG:
2072 		optname = SO_DEBUG;
2073 		break;
2074         case TARGET_SO_REUSEADDR:
2075 		optname = SO_REUSEADDR;
2076 		break;
2077 #ifdef SO_REUSEPORT
2078         case TARGET_SO_REUSEPORT:
2079                 optname = SO_REUSEPORT;
2080                 break;
2081 #endif
2082         case TARGET_SO_TYPE:
2083 		optname = SO_TYPE;
2084 		break;
2085         case TARGET_SO_ERROR:
2086 		optname = SO_ERROR;
2087 		break;
2088         case TARGET_SO_DONTROUTE:
2089 		optname = SO_DONTROUTE;
2090 		break;
2091         case TARGET_SO_BROADCAST:
2092 		optname = SO_BROADCAST;
2093 		break;
2094         case TARGET_SO_SNDBUF:
2095 		optname = SO_SNDBUF;
2096 		break;
2097         case TARGET_SO_SNDBUFFORCE:
2098                 optname = SO_SNDBUFFORCE;
2099                 break;
2100         case TARGET_SO_RCVBUF:
2101 		optname = SO_RCVBUF;
2102 		break;
2103         case TARGET_SO_RCVBUFFORCE:
2104                 optname = SO_RCVBUFFORCE;
2105                 break;
2106         case TARGET_SO_KEEPALIVE:
2107 		optname = SO_KEEPALIVE;
2108 		break;
2109         case TARGET_SO_OOBINLINE:
2110 		optname = SO_OOBINLINE;
2111 		break;
2112         case TARGET_SO_NO_CHECK:
2113 		optname = SO_NO_CHECK;
2114 		break;
2115         case TARGET_SO_PRIORITY:
2116 		optname = SO_PRIORITY;
2117 		break;
2118 #ifdef SO_BSDCOMPAT
2119         case TARGET_SO_BSDCOMPAT:
2120 		optname = SO_BSDCOMPAT;
2121 		break;
2122 #endif
2123         case TARGET_SO_PASSCRED:
2124 		optname = SO_PASSCRED;
2125 		break;
2126         case TARGET_SO_PASSSEC:
2127                 optname = SO_PASSSEC;
2128                 break;
2129         case TARGET_SO_TIMESTAMP:
2130 		optname = SO_TIMESTAMP;
2131 		break;
2132         case TARGET_SO_RCVLOWAT:
2133 		optname = SO_RCVLOWAT;
2134 		break;
2135         default:
2136             goto unimplemented;
2137         }
2138 	if (optlen < sizeof(uint32_t))
2139             return -TARGET_EINVAL;
2140 
2141 	if (get_user_u32(val, optval_addr))
2142             return -TARGET_EFAULT;
2143 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2144         break;
2145     default:
2146     unimplemented:
2147         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2148         ret = -TARGET_ENOPROTOOPT;
2149     }
2150     return ret;
2151 }
2152 
2153 /* do_getsockopt() Must return target values and target errnos. */
2154 static abi_long do_getsockopt(int sockfd, int level, int optname,
2155                               abi_ulong optval_addr, abi_ulong optlen)
2156 {
2157     abi_long ret;
2158     int len, val;
2159     socklen_t lv;
2160 
2161     switch(level) {
2162     case TARGET_SOL_SOCKET:
2163         level = SOL_SOCKET;
2164         switch (optname) {
2165         /* These don't just return a single integer */
2166         case TARGET_SO_RCVTIMEO:
2167         case TARGET_SO_SNDTIMEO:
2168         case TARGET_SO_PEERNAME:
2169             goto unimplemented;
2170         case TARGET_SO_PEERCRED: {
2171             struct ucred cr;
2172             socklen_t crlen;
2173             struct target_ucred *tcr;
2174 
2175             if (get_user_u32(len, optlen)) {
2176                 return -TARGET_EFAULT;
2177             }
2178             if (len < 0) {
2179                 return -TARGET_EINVAL;
2180             }
2181 
2182             crlen = sizeof(cr);
2183             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2184                                        &cr, &crlen));
2185             if (ret < 0) {
2186                 return ret;
2187             }
2188             if (len > crlen) {
2189                 len = crlen;
2190             }
2191             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2192                 return -TARGET_EFAULT;
2193             }
2194             __put_user(cr.pid, &tcr->pid);
2195             __put_user(cr.uid, &tcr->uid);
2196             __put_user(cr.gid, &tcr->gid);
2197             unlock_user_struct(tcr, optval_addr, 1);
2198             if (put_user_u32(len, optlen)) {
2199                 return -TARGET_EFAULT;
2200             }
2201             break;
2202         }
2203         case TARGET_SO_LINGER:
2204         {
2205             struct linger lg;
2206             socklen_t lglen;
2207             struct target_linger *tlg;
2208 
2209             if (get_user_u32(len, optlen)) {
2210                 return -TARGET_EFAULT;
2211             }
2212             if (len < 0) {
2213                 return -TARGET_EINVAL;
2214             }
2215 
2216             lglen = sizeof(lg);
2217             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2218                                        &lg, &lglen));
2219             if (ret < 0) {
2220                 return ret;
2221             }
2222             if (len > lglen) {
2223                 len = lglen;
2224             }
2225             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2226                 return -TARGET_EFAULT;
2227             }
2228             __put_user(lg.l_onoff, &tlg->l_onoff);
2229             __put_user(lg.l_linger, &tlg->l_linger);
2230             unlock_user_struct(tlg, optval_addr, 1);
2231             if (put_user_u32(len, optlen)) {
2232                 return -TARGET_EFAULT;
2233             }
2234             break;
2235         }
2236         /* Options with 'int' argument.  */
2237         case TARGET_SO_DEBUG:
2238             optname = SO_DEBUG;
2239             goto int_case;
2240         case TARGET_SO_REUSEADDR:
2241             optname = SO_REUSEADDR;
2242             goto int_case;
2243 #ifdef SO_REUSEPORT
2244         case TARGET_SO_REUSEPORT:
2245             optname = SO_REUSEPORT;
2246             goto int_case;
2247 #endif
2248         case TARGET_SO_TYPE:
2249             optname = SO_TYPE;
2250             goto int_case;
2251         case TARGET_SO_ERROR:
2252             optname = SO_ERROR;
2253             goto int_case;
2254         case TARGET_SO_DONTROUTE:
2255             optname = SO_DONTROUTE;
2256             goto int_case;
2257         case TARGET_SO_BROADCAST:
2258             optname = SO_BROADCAST;
2259             goto int_case;
2260         case TARGET_SO_SNDBUF:
2261             optname = SO_SNDBUF;
2262             goto int_case;
2263         case TARGET_SO_RCVBUF:
2264             optname = SO_RCVBUF;
2265             goto int_case;
2266         case TARGET_SO_KEEPALIVE:
2267             optname = SO_KEEPALIVE;
2268             goto int_case;
2269         case TARGET_SO_OOBINLINE:
2270             optname = SO_OOBINLINE;
2271             goto int_case;
2272         case TARGET_SO_NO_CHECK:
2273             optname = SO_NO_CHECK;
2274             goto int_case;
2275         case TARGET_SO_PRIORITY:
2276             optname = SO_PRIORITY;
2277             goto int_case;
2278 #ifdef SO_BSDCOMPAT
2279         case TARGET_SO_BSDCOMPAT:
2280             optname = SO_BSDCOMPAT;
2281             goto int_case;
2282 #endif
2283         case TARGET_SO_PASSCRED:
2284             optname = SO_PASSCRED;
2285             goto int_case;
2286         case TARGET_SO_TIMESTAMP:
2287             optname = SO_TIMESTAMP;
2288             goto int_case;
2289         case TARGET_SO_RCVLOWAT:
2290             optname = SO_RCVLOWAT;
2291             goto int_case;
2292         case TARGET_SO_ACCEPTCONN:
2293             optname = SO_ACCEPTCONN;
2294             goto int_case;
2295         default:
2296             goto int_case;
2297         }
2298         break;
2299     case SOL_TCP:
2300         /* TCP options all take an 'int' value.  */
2301     int_case:
2302         if (get_user_u32(len, optlen))
2303             return -TARGET_EFAULT;
2304         if (len < 0)
2305             return -TARGET_EINVAL;
2306         lv = sizeof(lv);
2307         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2308         if (ret < 0)
2309             return ret;
2310         if (optname == SO_TYPE) {
2311             val = host_to_target_sock_type(val);
2312         }
2313         if (len > lv)
2314             len = lv;
2315         if (len == 4) {
2316             if (put_user_u32(val, optval_addr))
2317                 return -TARGET_EFAULT;
2318         } else {
2319             if (put_user_u8(val, optval_addr))
2320                 return -TARGET_EFAULT;
2321         }
2322         if (put_user_u32(len, optlen))
2323             return -TARGET_EFAULT;
2324         break;
2325     case SOL_IP:
2326         switch(optname) {
2327         case IP_TOS:
2328         case IP_TTL:
2329         case IP_HDRINCL:
2330         case IP_ROUTER_ALERT:
2331         case IP_RECVOPTS:
2332         case IP_RETOPTS:
2333         case IP_PKTINFO:
2334         case IP_MTU_DISCOVER:
2335         case IP_RECVERR:
2336         case IP_RECVTOS:
2337 #ifdef IP_FREEBIND
2338         case IP_FREEBIND:
2339 #endif
2340         case IP_MULTICAST_TTL:
2341         case IP_MULTICAST_LOOP:
2342             if (get_user_u32(len, optlen))
2343                 return -TARGET_EFAULT;
2344             if (len < 0)
2345                 return -TARGET_EINVAL;
2346             lv = sizeof(lv);
2347             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2348             if (ret < 0)
2349                 return ret;
2350             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2351                 len = 1;
2352                 if (put_user_u32(len, optlen)
2353                     || put_user_u8(val, optval_addr))
2354                     return -TARGET_EFAULT;
2355             } else {
2356                 if (len > sizeof(int))
2357                     len = sizeof(int);
2358                 if (put_user_u32(len, optlen)
2359                     || put_user_u32(val, optval_addr))
2360                     return -TARGET_EFAULT;
2361             }
2362             break;
2363         default:
2364             ret = -TARGET_ENOPROTOOPT;
2365             break;
2366         }
2367         break;
2368     case SOL_IPV6:
2369         switch (optname) {
2370         case IPV6_MTU_DISCOVER:
2371         case IPV6_MTU:
2372         case IPV6_V6ONLY:
2373         case IPV6_RECVPKTINFO:
2374         case IPV6_UNICAST_HOPS:
2375         case IPV6_MULTICAST_HOPS:
2376         case IPV6_MULTICAST_LOOP:
2377         case IPV6_RECVERR:
2378         case IPV6_RECVHOPLIMIT:
2379         case IPV6_2292HOPLIMIT:
2380         case IPV6_CHECKSUM:
2381         case IPV6_ADDRFORM:
2382         case IPV6_2292PKTINFO:
2383         case IPV6_RECVTCLASS:
2384         case IPV6_RECVRTHDR:
2385         case IPV6_2292RTHDR:
2386         case IPV6_RECVHOPOPTS:
2387         case IPV6_2292HOPOPTS:
2388         case IPV6_RECVDSTOPTS:
2389         case IPV6_2292DSTOPTS:
2390         case IPV6_TCLASS:
2391 #ifdef IPV6_RECVPATHMTU
2392         case IPV6_RECVPATHMTU:
2393 #endif
2394 #ifdef IPV6_TRANSPARENT
2395         case IPV6_TRANSPARENT:
2396 #endif
2397 #ifdef IPV6_FREEBIND
2398         case IPV6_FREEBIND:
2399 #endif
2400 #ifdef IPV6_RECVORIGDSTADDR
2401         case IPV6_RECVORIGDSTADDR:
2402 #endif
2403             if (get_user_u32(len, optlen))
2404                 return -TARGET_EFAULT;
2405             if (len < 0)
2406                 return -TARGET_EINVAL;
2407             lv = sizeof(lv);
2408             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2409             if (ret < 0)
2410                 return ret;
2411             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2412                 len = 1;
2413                 if (put_user_u32(len, optlen)
2414                     || put_user_u8(val, optval_addr))
2415                     return -TARGET_EFAULT;
2416             } else {
2417                 if (len > sizeof(int))
2418                     len = sizeof(int);
2419                 if (put_user_u32(len, optlen)
2420                     || put_user_u32(val, optval_addr))
2421                     return -TARGET_EFAULT;
2422             }
2423             break;
2424         default:
2425             ret = -TARGET_ENOPROTOOPT;
2426             break;
2427         }
2428         break;
2429     default:
2430     unimplemented:
2431         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2432                  level, optname);
2433         ret = -TARGET_EOPNOTSUPP;
2434         break;
2435     }
2436     return ret;
2437 }
2438 
2439 /* Convert target low/high pair representing file offset into the host
2440  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2441  * as the kernel doesn't handle them either.
2442  */
2443 static void target_to_host_low_high(abi_ulong tlow,
2444                                     abi_ulong thigh,
2445                                     unsigned long *hlow,
2446                                     unsigned long *hhigh)
2447 {
2448     uint64_t off = tlow |
2449         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2450         TARGET_LONG_BITS / 2;
2451 
2452     *hlow = off;
2453     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2454 }
2455 
2456 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2457                                 abi_ulong count, int copy)
2458 {
2459     struct target_iovec *target_vec;
2460     struct iovec *vec;
2461     abi_ulong total_len, max_len;
2462     int i;
2463     int err = 0;
2464     bool bad_address = false;
2465 
2466     if (count == 0) {
2467         errno = 0;
2468         return NULL;
2469     }
2470     if (count > IOV_MAX) {
2471         errno = EINVAL;
2472         return NULL;
2473     }
2474 
2475     vec = g_try_new0(struct iovec, count);
2476     if (vec == NULL) {
2477         errno = ENOMEM;
2478         return NULL;
2479     }
2480 
2481     target_vec = lock_user(VERIFY_READ, target_addr,
2482                            count * sizeof(struct target_iovec), 1);
2483     if (target_vec == NULL) {
2484         err = EFAULT;
2485         goto fail2;
2486     }
2487 
2488     /* ??? If host page size > target page size, this will result in a
2489        value larger than what we can actually support.  */
2490     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2491     total_len = 0;
2492 
2493     for (i = 0; i < count; i++) {
2494         abi_ulong base = tswapal(target_vec[i].iov_base);
2495         abi_long len = tswapal(target_vec[i].iov_len);
2496 
2497         if (len < 0) {
2498             err = EINVAL;
2499             goto fail;
2500         } else if (len == 0) {
2501             /* Zero length pointer is ignored.  */
2502             vec[i].iov_base = 0;
2503         } else {
2504             vec[i].iov_base = lock_user(type, base, len, copy);
2505             /* If the first buffer pointer is bad, this is a fault.  But
2506              * subsequent bad buffers will result in a partial write; this
2507              * is realized by filling the vector with null pointers and
2508              * zero lengths. */
2509             if (!vec[i].iov_base) {
2510                 if (i == 0) {
2511                     err = EFAULT;
2512                     goto fail;
2513                 } else {
2514                     bad_address = true;
2515                 }
2516             }
2517             if (bad_address) {
2518                 len = 0;
2519             }
2520             if (len > max_len - total_len) {
2521                 len = max_len - total_len;
2522             }
2523         }
2524         vec[i].iov_len = len;
2525         total_len += len;
2526     }
2527 
2528     unlock_user(target_vec, target_addr, 0);
2529     return vec;
2530 
2531  fail:
2532     while (--i >= 0) {
2533         if (tswapal(target_vec[i].iov_len) > 0) {
2534             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2535         }
2536     }
2537     unlock_user(target_vec, target_addr, 0);
2538  fail2:
2539     g_free(vec);
2540     errno = err;
2541     return NULL;
2542 }
2543 
2544 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2545                          abi_ulong count, int copy)
2546 {
2547     struct target_iovec *target_vec;
2548     int i;
2549 
2550     target_vec = lock_user(VERIFY_READ, target_addr,
2551                            count * sizeof(struct target_iovec), 1);
2552     if (target_vec) {
2553         for (i = 0; i < count; i++) {
2554             abi_ulong base = tswapal(target_vec[i].iov_base);
2555             abi_long len = tswapal(target_vec[i].iov_len);
2556             if (len < 0) {
2557                 break;
2558             }
2559             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2560         }
2561         unlock_user(target_vec, target_addr, 0);
2562     }
2563 
2564     g_free(vec);
2565 }
2566 
2567 static inline int target_to_host_sock_type(int *type)
2568 {
2569     int host_type = 0;
2570     int target_type = *type;
2571 
2572     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2573     case TARGET_SOCK_DGRAM:
2574         host_type = SOCK_DGRAM;
2575         break;
2576     case TARGET_SOCK_STREAM:
2577         host_type = SOCK_STREAM;
2578         break;
2579     default:
2580         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2581         break;
2582     }
2583     if (target_type & TARGET_SOCK_CLOEXEC) {
2584 #if defined(SOCK_CLOEXEC)
2585         host_type |= SOCK_CLOEXEC;
2586 #else
2587         return -TARGET_EINVAL;
2588 #endif
2589     }
2590     if (target_type & TARGET_SOCK_NONBLOCK) {
2591 #if defined(SOCK_NONBLOCK)
2592         host_type |= SOCK_NONBLOCK;
2593 #elif !defined(O_NONBLOCK)
2594         return -TARGET_EINVAL;
2595 #endif
2596     }
2597     *type = host_type;
2598     return 0;
2599 }
2600 
2601 /* Try to emulate socket type flags after socket creation.  */
2602 static int sock_flags_fixup(int fd, int target_type)
2603 {
2604 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2605     if (target_type & TARGET_SOCK_NONBLOCK) {
2606         int flags = fcntl(fd, F_GETFL);
2607         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2608             close(fd);
2609             return -TARGET_EINVAL;
2610         }
2611     }
2612 #endif
2613     return fd;
2614 }
2615 
2616 /* do_socket() Must return target values and target errnos. */
2617 static abi_long do_socket(int domain, int type, int protocol)
2618 {
2619     int target_type = type;
2620     int ret;
2621 
2622     ret = target_to_host_sock_type(&type);
2623     if (ret) {
2624         return ret;
2625     }
2626 
2627     if (domain == PF_NETLINK && !(
2628 #ifdef CONFIG_RTNETLINK
2629          protocol == NETLINK_ROUTE ||
2630 #endif
2631          protocol == NETLINK_KOBJECT_UEVENT ||
2632          protocol == NETLINK_AUDIT)) {
2633         return -EPFNOSUPPORT;
2634     }
2635 
2636     if (domain == AF_PACKET ||
2637         (domain == AF_INET && type == SOCK_PACKET)) {
2638         protocol = tswap16(protocol);
2639     }
2640 
2641     ret = get_errno(socket(domain, type, protocol));
2642     if (ret >= 0) {
2643         ret = sock_flags_fixup(ret, target_type);
2644         if (type == SOCK_PACKET) {
2645             /* Manage an obsolete case :
2646              * if socket type is SOCK_PACKET, bind by name
2647              */
2648             fd_trans_register(ret, &target_packet_trans);
2649         } else if (domain == PF_NETLINK) {
2650             switch (protocol) {
2651 #ifdef CONFIG_RTNETLINK
2652             case NETLINK_ROUTE:
2653                 fd_trans_register(ret, &target_netlink_route_trans);
2654                 break;
2655 #endif
2656             case NETLINK_KOBJECT_UEVENT:
2657                 /* nothing to do: messages are strings */
2658                 break;
2659             case NETLINK_AUDIT:
2660                 fd_trans_register(ret, &target_netlink_audit_trans);
2661                 break;
2662             default:
2663                 g_assert_not_reached();
2664             }
2665         }
2666     }
2667     return ret;
2668 }
2669 
2670 /* do_bind() Must return target values and target errnos. */
2671 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2672                         socklen_t addrlen)
2673 {
2674     void *addr;
2675     abi_long ret;
2676 
2677     if ((int)addrlen < 0) {
2678         return -TARGET_EINVAL;
2679     }
2680 
2681     addr = alloca(addrlen+1);
2682 
2683     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2684     if (ret)
2685         return ret;
2686 
2687     return get_errno(bind(sockfd, addr, addrlen));
2688 }
2689 
2690 /* do_connect() Must return target values and target errnos. */
2691 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2692                            socklen_t addrlen)
2693 {
2694     void *addr;
2695     abi_long ret;
2696 
2697     if ((int)addrlen < 0) {
2698         return -TARGET_EINVAL;
2699     }
2700 
2701     addr = alloca(addrlen+1);
2702 
2703     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2704     if (ret)
2705         return ret;
2706 
2707     return get_errno(safe_connect(sockfd, addr, addrlen));
2708 }
2709 
2710 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2711 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2712                                       int flags, int send)
2713 {
2714     abi_long ret, len;
2715     struct msghdr msg;
2716     abi_ulong count;
2717     struct iovec *vec;
2718     abi_ulong target_vec;
2719 
2720     if (msgp->msg_name) {
2721         msg.msg_namelen = tswap32(msgp->msg_namelen);
2722         msg.msg_name = alloca(msg.msg_namelen+1);
2723         ret = target_to_host_sockaddr(fd, msg.msg_name,
2724                                       tswapal(msgp->msg_name),
2725                                       msg.msg_namelen);
2726         if (ret == -TARGET_EFAULT) {
2727             /* For connected sockets msg_name and msg_namelen must
2728              * be ignored, so returning EFAULT immediately is wrong.
2729              * Instead, pass a bad msg_name to the host kernel, and
2730              * let it decide whether to return EFAULT or not.
2731              */
2732             msg.msg_name = (void *)-1;
2733         } else if (ret) {
2734             goto out2;
2735         }
2736     } else {
2737         msg.msg_name = NULL;
2738         msg.msg_namelen = 0;
2739     }
2740     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2741     msg.msg_control = alloca(msg.msg_controllen);
2742     memset(msg.msg_control, 0, msg.msg_controllen);
2743 
2744     msg.msg_flags = tswap32(msgp->msg_flags);
2745 
2746     count = tswapal(msgp->msg_iovlen);
2747     target_vec = tswapal(msgp->msg_iov);
2748 
2749     if (count > IOV_MAX) {
2750         /* sendrcvmsg returns a different errno for this condition than
2751          * readv/writev, so we must catch it here before lock_iovec() does.
2752          */
2753         ret = -TARGET_EMSGSIZE;
2754         goto out2;
2755     }
2756 
2757     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2758                      target_vec, count, send);
2759     if (vec == NULL) {
2760         ret = -host_to_target_errno(errno);
2761         goto out2;
2762     }
2763     msg.msg_iovlen = count;
2764     msg.msg_iov = vec;
2765 
2766     if (send) {
2767         if (fd_trans_target_to_host_data(fd)) {
2768             void *host_msg;
2769 
2770             host_msg = g_malloc(msg.msg_iov->iov_len);
2771             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2772             ret = fd_trans_target_to_host_data(fd)(host_msg,
2773                                                    msg.msg_iov->iov_len);
2774             if (ret >= 0) {
2775                 msg.msg_iov->iov_base = host_msg;
2776                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2777             }
2778             g_free(host_msg);
2779         } else {
2780             ret = target_to_host_cmsg(&msg, msgp);
2781             if (ret == 0) {
2782                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2783             }
2784         }
2785     } else {
2786         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2787         if (!is_error(ret)) {
2788             len = ret;
2789             if (fd_trans_host_to_target_data(fd)) {
2790                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2791                                                MIN(msg.msg_iov->iov_len, len));
2792             } else {
2793                 ret = host_to_target_cmsg(msgp, &msg);
2794             }
2795             if (!is_error(ret)) {
2796                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2797                 msgp->msg_flags = tswap32(msg.msg_flags);
2798                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2799                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2800                                     msg.msg_name, msg.msg_namelen);
2801                     if (ret) {
2802                         goto out;
2803                     }
2804                 }
2805 
2806                 ret = len;
2807             }
2808         }
2809     }
2810 
2811 out:
2812     unlock_iovec(vec, target_vec, count, !send);
2813 out2:
2814     return ret;
2815 }
2816 
2817 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2818                                int flags, int send)
2819 {
2820     abi_long ret;
2821     struct target_msghdr *msgp;
2822 
2823     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2824                           msgp,
2825                           target_msg,
2826                           send ? 1 : 0)) {
2827         return -TARGET_EFAULT;
2828     }
2829     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2830     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2831     return ret;
2832 }
2833 
2834 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2835  * so it might not have this *mmsg-specific flag either.
2836  */
2837 #ifndef MSG_WAITFORONE
2838 #define MSG_WAITFORONE 0x10000
2839 #endif
2840 
2841 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2842                                 unsigned int vlen, unsigned int flags,
2843                                 int send)
2844 {
2845     struct target_mmsghdr *mmsgp;
2846     abi_long ret = 0;
2847     int i;
2848 
2849     if (vlen > UIO_MAXIOV) {
2850         vlen = UIO_MAXIOV;
2851     }
2852 
2853     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2854     if (!mmsgp) {
2855         return -TARGET_EFAULT;
2856     }
2857 
2858     for (i = 0; i < vlen; i++) {
2859         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2860         if (is_error(ret)) {
2861             break;
2862         }
2863         mmsgp[i].msg_len = tswap32(ret);
2864         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2865         if (flags & MSG_WAITFORONE) {
2866             flags |= MSG_DONTWAIT;
2867         }
2868     }
2869 
2870     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2871 
2872     /* Return number of datagrams sent if we sent any at all;
2873      * otherwise return the error.
2874      */
2875     if (i) {
2876         return i;
2877     }
2878     return ret;
2879 }
2880 
2881 /* do_accept4() Must return target values and target errnos. */
2882 static abi_long do_accept4(int fd, abi_ulong target_addr,
2883                            abi_ulong target_addrlen_addr, int flags)
2884 {
2885     socklen_t addrlen, ret_addrlen;
2886     void *addr;
2887     abi_long ret;
2888     int host_flags;
2889 
2890     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2891 
2892     if (target_addr == 0) {
2893         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2894     }
2895 
2896     /* linux returns EINVAL if addrlen pointer is invalid */
2897     if (get_user_u32(addrlen, target_addrlen_addr))
2898         return -TARGET_EINVAL;
2899 
2900     if ((int)addrlen < 0) {
2901         return -TARGET_EINVAL;
2902     }
2903 
2904     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2905         return -TARGET_EINVAL;
2906 
2907     addr = alloca(addrlen);
2908 
2909     ret_addrlen = addrlen;
2910     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2911     if (!is_error(ret)) {
2912         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2913         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2914             ret = -TARGET_EFAULT;
2915         }
2916     }
2917     return ret;
2918 }
2919 
2920 /* do_getpeername() Must return target values and target errnos. */
2921 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2922                                abi_ulong target_addrlen_addr)
2923 {
2924     socklen_t addrlen, ret_addrlen;
2925     void *addr;
2926     abi_long ret;
2927 
2928     if (get_user_u32(addrlen, target_addrlen_addr))
2929         return -TARGET_EFAULT;
2930 
2931     if ((int)addrlen < 0) {
2932         return -TARGET_EINVAL;
2933     }
2934 
2935     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2936         return -TARGET_EFAULT;
2937 
2938     addr = alloca(addrlen);
2939 
2940     ret_addrlen = addrlen;
2941     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2942     if (!is_error(ret)) {
2943         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2944         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2945             ret = -TARGET_EFAULT;
2946         }
2947     }
2948     return ret;
2949 }
2950 
2951 /* do_getsockname() Must return target values and target errnos. */
2952 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2953                                abi_ulong target_addrlen_addr)
2954 {
2955     socklen_t addrlen, ret_addrlen;
2956     void *addr;
2957     abi_long ret;
2958 
2959     if (get_user_u32(addrlen, target_addrlen_addr))
2960         return -TARGET_EFAULT;
2961 
2962     if ((int)addrlen < 0) {
2963         return -TARGET_EINVAL;
2964     }
2965 
2966     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2967         return -TARGET_EFAULT;
2968 
2969     addr = alloca(addrlen);
2970 
2971     ret_addrlen = addrlen;
2972     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2973     if (!is_error(ret)) {
2974         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2975         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2976             ret = -TARGET_EFAULT;
2977         }
2978     }
2979     return ret;
2980 }
2981 
2982 /* do_socketpair() Must return target values and target errnos. */
2983 static abi_long do_socketpair(int domain, int type, int protocol,
2984                               abi_ulong target_tab_addr)
2985 {
2986     int tab[2];
2987     abi_long ret;
2988 
2989     target_to_host_sock_type(&type);
2990 
2991     ret = get_errno(socketpair(domain, type, protocol, tab));
2992     if (!is_error(ret)) {
2993         if (put_user_s32(tab[0], target_tab_addr)
2994             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2995             ret = -TARGET_EFAULT;
2996     }
2997     return ret;
2998 }
2999 
3000 /* do_sendto() Must return target values and target errnos. */
3001 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3002                           abi_ulong target_addr, socklen_t addrlen)
3003 {
3004     void *addr;
3005     void *host_msg;
3006     void *copy_msg = NULL;
3007     abi_long ret;
3008 
3009     if ((int)addrlen < 0) {
3010         return -TARGET_EINVAL;
3011     }
3012 
3013     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3014     if (!host_msg)
3015         return -TARGET_EFAULT;
3016     if (fd_trans_target_to_host_data(fd)) {
3017         copy_msg = host_msg;
3018         host_msg = g_malloc(len);
3019         memcpy(host_msg, copy_msg, len);
3020         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3021         if (ret < 0) {
3022             goto fail;
3023         }
3024     }
3025     if (target_addr) {
3026         addr = alloca(addrlen+1);
3027         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3028         if (ret) {
3029             goto fail;
3030         }
3031         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3032     } else {
3033         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3034     }
3035 fail:
3036     if (copy_msg) {
3037         g_free(host_msg);
3038         host_msg = copy_msg;
3039     }
3040     unlock_user(host_msg, msg, 0);
3041     return ret;
3042 }
3043 
3044 /* do_recvfrom() Must return target values and target errnos. */
3045 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3046                             abi_ulong target_addr,
3047                             abi_ulong target_addrlen)
3048 {
3049     socklen_t addrlen, ret_addrlen;
3050     void *addr;
3051     void *host_msg;
3052     abi_long ret;
3053 
3054     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3055     if (!host_msg)
3056         return -TARGET_EFAULT;
3057     if (target_addr) {
3058         if (get_user_u32(addrlen, target_addrlen)) {
3059             ret = -TARGET_EFAULT;
3060             goto fail;
3061         }
3062         if ((int)addrlen < 0) {
3063             ret = -TARGET_EINVAL;
3064             goto fail;
3065         }
3066         addr = alloca(addrlen);
3067         ret_addrlen = addrlen;
3068         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3069                                       addr, &ret_addrlen));
3070     } else {
3071         addr = NULL; /* To keep compiler quiet.  */
3072         addrlen = 0; /* To keep compiler quiet.  */
3073         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3074     }
3075     if (!is_error(ret)) {
3076         if (fd_trans_host_to_target_data(fd)) {
3077             abi_long trans;
3078             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3079             if (is_error(trans)) {
3080                 ret = trans;
3081                 goto fail;
3082             }
3083         }
3084         if (target_addr) {
3085             host_to_target_sockaddr(target_addr, addr,
3086                                     MIN(addrlen, ret_addrlen));
3087             if (put_user_u32(ret_addrlen, target_addrlen)) {
3088                 ret = -TARGET_EFAULT;
3089                 goto fail;
3090             }
3091         }
3092         unlock_user(host_msg, msg, len);
3093     } else {
3094 fail:
3095         unlock_user(host_msg, msg, 0);
3096     }
3097     return ret;
3098 }
3099 
3100 #ifdef TARGET_NR_socketcall
3101 /* do_socketcall() must return target values and target errnos. */
3102 static abi_long do_socketcall(int num, abi_ulong vptr)
3103 {
3104     static const unsigned nargs[] = { /* number of arguments per operation */
3105         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3106         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3107         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3108         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3109         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3110         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3111         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3112         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3113         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3114         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3115         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3116         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3117         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3118         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3119         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3120         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3121         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3122         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3123         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3124         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3125     };
3126     abi_long a[6]; /* max 6 args */
3127     unsigned i;
3128 
3129     /* check the range of the first argument num */
3130     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3131     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3132         return -TARGET_EINVAL;
3133     }
3134     /* ensure we have space for args */
3135     if (nargs[num] > ARRAY_SIZE(a)) {
3136         return -TARGET_EINVAL;
3137     }
3138     /* collect the arguments in a[] according to nargs[] */
3139     for (i = 0; i < nargs[num]; ++i) {
3140         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3141             return -TARGET_EFAULT;
3142         }
3143     }
3144     /* now when we have the args, invoke the appropriate underlying function */
3145     switch (num) {
3146     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3147         return do_socket(a[0], a[1], a[2]);
3148     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3149         return do_bind(a[0], a[1], a[2]);
3150     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3151         return do_connect(a[0], a[1], a[2]);
3152     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3153         return get_errno(listen(a[0], a[1]));
3154     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3155         return do_accept4(a[0], a[1], a[2], 0);
3156     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3157         return do_getsockname(a[0], a[1], a[2]);
3158     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3159         return do_getpeername(a[0], a[1], a[2]);
3160     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3161         return do_socketpair(a[0], a[1], a[2], a[3]);
3162     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3163         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3164     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3165         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3166     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3167         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3168     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3169         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3170     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3171         return get_errno(shutdown(a[0], a[1]));
3172     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3173         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3174     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3175         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3176     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3177         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3178     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3179         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3180     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3181         return do_accept4(a[0], a[1], a[2], a[3]);
3182     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3183         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3184     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3185         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3186     default:
3187         gemu_log("Unsupported socketcall: %d\n", num);
3188         return -TARGET_EINVAL;
3189     }
3190 }
3191 #endif
3192 
3193 #define N_SHM_REGIONS	32
3194 
3195 static struct shm_region {
3196     abi_ulong start;
3197     abi_ulong size;
3198     bool in_use;
3199 } shm_regions[N_SHM_REGIONS];
3200 
3201 #ifndef TARGET_SEMID64_DS
3202 /* asm-generic version of this struct */
3203 struct target_semid64_ds
3204 {
3205   struct target_ipc_perm sem_perm;
3206   abi_ulong sem_otime;
3207 #if TARGET_ABI_BITS == 32
3208   abi_ulong __unused1;
3209 #endif
3210   abi_ulong sem_ctime;
3211 #if TARGET_ABI_BITS == 32
3212   abi_ulong __unused2;
3213 #endif
3214   abi_ulong sem_nsems;
3215   abi_ulong __unused3;
3216   abi_ulong __unused4;
3217 };
3218 #endif
3219 
3220 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3221                                                abi_ulong target_addr)
3222 {
3223     struct target_ipc_perm *target_ip;
3224     struct target_semid64_ds *target_sd;
3225 
3226     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3227         return -TARGET_EFAULT;
3228     target_ip = &(target_sd->sem_perm);
3229     host_ip->__key = tswap32(target_ip->__key);
3230     host_ip->uid = tswap32(target_ip->uid);
3231     host_ip->gid = tswap32(target_ip->gid);
3232     host_ip->cuid = tswap32(target_ip->cuid);
3233     host_ip->cgid = tswap32(target_ip->cgid);
3234 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3235     host_ip->mode = tswap32(target_ip->mode);
3236 #else
3237     host_ip->mode = tswap16(target_ip->mode);
3238 #endif
3239 #if defined(TARGET_PPC)
3240     host_ip->__seq = tswap32(target_ip->__seq);
3241 #else
3242     host_ip->__seq = tswap16(target_ip->__seq);
3243 #endif
3244     unlock_user_struct(target_sd, target_addr, 0);
3245     return 0;
3246 }
3247 
3248 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3249                                                struct ipc_perm *host_ip)
3250 {
3251     struct target_ipc_perm *target_ip;
3252     struct target_semid64_ds *target_sd;
3253 
3254     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3255         return -TARGET_EFAULT;
3256     target_ip = &(target_sd->sem_perm);
3257     target_ip->__key = tswap32(host_ip->__key);
3258     target_ip->uid = tswap32(host_ip->uid);
3259     target_ip->gid = tswap32(host_ip->gid);
3260     target_ip->cuid = tswap32(host_ip->cuid);
3261     target_ip->cgid = tswap32(host_ip->cgid);
3262 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3263     target_ip->mode = tswap32(host_ip->mode);
3264 #else
3265     target_ip->mode = tswap16(host_ip->mode);
3266 #endif
3267 #if defined(TARGET_PPC)
3268     target_ip->__seq = tswap32(host_ip->__seq);
3269 #else
3270     target_ip->__seq = tswap16(host_ip->__seq);
3271 #endif
3272     unlock_user_struct(target_sd, target_addr, 1);
3273     return 0;
3274 }
3275 
3276 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3277                                                abi_ulong target_addr)
3278 {
3279     struct target_semid64_ds *target_sd;
3280 
3281     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3282         return -TARGET_EFAULT;
3283     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3284         return -TARGET_EFAULT;
3285     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3286     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3287     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3288     unlock_user_struct(target_sd, target_addr, 0);
3289     return 0;
3290 }
3291 
3292 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3293                                                struct semid_ds *host_sd)
3294 {
3295     struct target_semid64_ds *target_sd;
3296 
3297     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3298         return -TARGET_EFAULT;
3299     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3300         return -TARGET_EFAULT;
3301     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3302     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3303     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3304     unlock_user_struct(target_sd, target_addr, 1);
3305     return 0;
3306 }
3307 
3308 struct target_seminfo {
3309     int semmap;
3310     int semmni;
3311     int semmns;
3312     int semmnu;
3313     int semmsl;
3314     int semopm;
3315     int semume;
3316     int semusz;
3317     int semvmx;
3318     int semaem;
3319 };
3320 
3321 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3322                                               struct seminfo *host_seminfo)
3323 {
3324     struct target_seminfo *target_seminfo;
3325     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3326         return -TARGET_EFAULT;
3327     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3328     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3329     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3330     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3331     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3332     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3333     __put_user(host_seminfo->semume, &target_seminfo->semume);
3334     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3335     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3336     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3337     unlock_user_struct(target_seminfo, target_addr, 1);
3338     return 0;
3339 }
3340 
3341 union semun {
3342 	int val;
3343 	struct semid_ds *buf;
3344 	unsigned short *array;
3345 	struct seminfo *__buf;
3346 };
3347 
3348 union target_semun {
3349 	int val;
3350 	abi_ulong buf;
3351 	abi_ulong array;
3352 	abi_ulong __buf;
3353 };
3354 
3355 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3356                                                abi_ulong target_addr)
3357 {
3358     int nsems;
3359     unsigned short *array;
3360     union semun semun;
3361     struct semid_ds semid_ds;
3362     int i, ret;
3363 
3364     semun.buf = &semid_ds;
3365 
3366     ret = semctl(semid, 0, IPC_STAT, semun);
3367     if (ret == -1)
3368         return get_errno(ret);
3369 
3370     nsems = semid_ds.sem_nsems;
3371 
3372     *host_array = g_try_new(unsigned short, nsems);
3373     if (!*host_array) {
3374         return -TARGET_ENOMEM;
3375     }
3376     array = lock_user(VERIFY_READ, target_addr,
3377                       nsems*sizeof(unsigned short), 1);
3378     if (!array) {
3379         g_free(*host_array);
3380         return -TARGET_EFAULT;
3381     }
3382 
3383     for(i=0; i<nsems; i++) {
3384         __get_user((*host_array)[i], &array[i]);
3385     }
3386     unlock_user(array, target_addr, 0);
3387 
3388     return 0;
3389 }
3390 
3391 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3392                                                unsigned short **host_array)
3393 {
3394     int nsems;
3395     unsigned short *array;
3396     union semun semun;
3397     struct semid_ds semid_ds;
3398     int i, ret;
3399 
3400     semun.buf = &semid_ds;
3401 
3402     ret = semctl(semid, 0, IPC_STAT, semun);
3403     if (ret == -1)
3404         return get_errno(ret);
3405 
3406     nsems = semid_ds.sem_nsems;
3407 
3408     array = lock_user(VERIFY_WRITE, target_addr,
3409                       nsems*sizeof(unsigned short), 0);
3410     if (!array)
3411         return -TARGET_EFAULT;
3412 
3413     for(i=0; i<nsems; i++) {
3414         __put_user((*host_array)[i], &array[i]);
3415     }
3416     g_free(*host_array);
3417     unlock_user(array, target_addr, 1);
3418 
3419     return 0;
3420 }
3421 
3422 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3423                                  abi_ulong target_arg)
3424 {
3425     union target_semun target_su = { .buf = target_arg };
3426     union semun arg;
3427     struct semid_ds dsarg;
3428     unsigned short *array = NULL;
3429     struct seminfo seminfo;
3430     abi_long ret = -TARGET_EINVAL;
3431     abi_long err;
3432     cmd &= 0xff;
3433 
3434     switch( cmd ) {
3435 	case GETVAL:
3436 	case SETVAL:
3437             /* In 64 bit cross-endian situations, we will erroneously pick up
3438              * the wrong half of the union for the "val" element.  To rectify
3439              * this, the entire 8-byte structure is byteswapped, followed by
3440 	     * a swap of the 4 byte val field. In other cases, the data is
3441 	     * already in proper host byte order. */
3442 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3443 		target_su.buf = tswapal(target_su.buf);
3444 		arg.val = tswap32(target_su.val);
3445 	    } else {
3446 		arg.val = target_su.val;
3447 	    }
3448             ret = get_errno(semctl(semid, semnum, cmd, arg));
3449             break;
3450 	case GETALL:
3451 	case SETALL:
3452             err = target_to_host_semarray(semid, &array, target_su.array);
3453             if (err)
3454                 return err;
3455             arg.array = array;
3456             ret = get_errno(semctl(semid, semnum, cmd, arg));
3457             err = host_to_target_semarray(semid, target_su.array, &array);
3458             if (err)
3459                 return err;
3460             break;
3461 	case IPC_STAT:
3462 	case IPC_SET:
3463 	case SEM_STAT:
3464             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3465             if (err)
3466                 return err;
3467             arg.buf = &dsarg;
3468             ret = get_errno(semctl(semid, semnum, cmd, arg));
3469             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3470             if (err)
3471                 return err;
3472             break;
3473 	case IPC_INFO:
3474 	case SEM_INFO:
3475             arg.__buf = &seminfo;
3476             ret = get_errno(semctl(semid, semnum, cmd, arg));
3477             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3478             if (err)
3479                 return err;
3480             break;
3481 	case IPC_RMID:
3482 	case GETPID:
3483 	case GETNCNT:
3484 	case GETZCNT:
3485             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3486             break;
3487     }
3488 
3489     return ret;
3490 }
3491 
3492 struct target_sembuf {
3493     unsigned short sem_num;
3494     short sem_op;
3495     short sem_flg;
3496 };
3497 
3498 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3499                                              abi_ulong target_addr,
3500                                              unsigned nsops)
3501 {
3502     struct target_sembuf *target_sembuf;
3503     int i;
3504 
3505     target_sembuf = lock_user(VERIFY_READ, target_addr,
3506                               nsops*sizeof(struct target_sembuf), 1);
3507     if (!target_sembuf)
3508         return -TARGET_EFAULT;
3509 
3510     for(i=0; i<nsops; i++) {
3511         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3512         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3513         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3514     }
3515 
3516     unlock_user(target_sembuf, target_addr, 0);
3517 
3518     return 0;
3519 }
3520 
3521 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3522 {
3523     struct sembuf sops[nsops];
3524 
3525     if (target_to_host_sembuf(sops, ptr, nsops))
3526         return -TARGET_EFAULT;
3527 
3528     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3529 }
3530 
3531 struct target_msqid_ds
3532 {
3533     struct target_ipc_perm msg_perm;
3534     abi_ulong msg_stime;
3535 #if TARGET_ABI_BITS == 32
3536     abi_ulong __unused1;
3537 #endif
3538     abi_ulong msg_rtime;
3539 #if TARGET_ABI_BITS == 32
3540     abi_ulong __unused2;
3541 #endif
3542     abi_ulong msg_ctime;
3543 #if TARGET_ABI_BITS == 32
3544     abi_ulong __unused3;
3545 #endif
3546     abi_ulong __msg_cbytes;
3547     abi_ulong msg_qnum;
3548     abi_ulong msg_qbytes;
3549     abi_ulong msg_lspid;
3550     abi_ulong msg_lrpid;
3551     abi_ulong __unused4;
3552     abi_ulong __unused5;
3553 };
3554 
3555 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3556                                                abi_ulong target_addr)
3557 {
3558     struct target_msqid_ds *target_md;
3559 
3560     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3561         return -TARGET_EFAULT;
3562     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3563         return -TARGET_EFAULT;
3564     host_md->msg_stime = tswapal(target_md->msg_stime);
3565     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3566     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3567     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3568     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3569     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3570     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3571     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3572     unlock_user_struct(target_md, target_addr, 0);
3573     return 0;
3574 }
3575 
3576 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3577                                                struct msqid_ds *host_md)
3578 {
3579     struct target_msqid_ds *target_md;
3580 
3581     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3582         return -TARGET_EFAULT;
3583     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3584         return -TARGET_EFAULT;
3585     target_md->msg_stime = tswapal(host_md->msg_stime);
3586     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3587     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3588     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3589     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3590     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3591     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3592     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3593     unlock_user_struct(target_md, target_addr, 1);
3594     return 0;
3595 }
3596 
3597 struct target_msginfo {
3598     int msgpool;
3599     int msgmap;
3600     int msgmax;
3601     int msgmnb;
3602     int msgmni;
3603     int msgssz;
3604     int msgtql;
3605     unsigned short int msgseg;
3606 };
3607 
3608 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3609                                               struct msginfo *host_msginfo)
3610 {
3611     struct target_msginfo *target_msginfo;
3612     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3613         return -TARGET_EFAULT;
3614     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3615     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3616     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3617     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3618     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3619     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3620     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3621     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3622     unlock_user_struct(target_msginfo, target_addr, 1);
3623     return 0;
3624 }
3625 
3626 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3627 {
3628     struct msqid_ds dsarg;
3629     struct msginfo msginfo;
3630     abi_long ret = -TARGET_EINVAL;
3631 
3632     cmd &= 0xff;
3633 
3634     switch (cmd) {
3635     case IPC_STAT:
3636     case IPC_SET:
3637     case MSG_STAT:
3638         if (target_to_host_msqid_ds(&dsarg,ptr))
3639             return -TARGET_EFAULT;
3640         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3641         if (host_to_target_msqid_ds(ptr,&dsarg))
3642             return -TARGET_EFAULT;
3643         break;
3644     case IPC_RMID:
3645         ret = get_errno(msgctl(msgid, cmd, NULL));
3646         break;
3647     case IPC_INFO:
3648     case MSG_INFO:
3649         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3650         if (host_to_target_msginfo(ptr, &msginfo))
3651             return -TARGET_EFAULT;
3652         break;
3653     }
3654 
3655     return ret;
3656 }
3657 
3658 struct target_msgbuf {
3659     abi_long mtype;
3660     char	mtext[1];
3661 };
3662 
3663 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3664                                  ssize_t msgsz, int msgflg)
3665 {
3666     struct target_msgbuf *target_mb;
3667     struct msgbuf *host_mb;
3668     abi_long ret = 0;
3669 
3670     if (msgsz < 0) {
3671         return -TARGET_EINVAL;
3672     }
3673 
3674     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3675         return -TARGET_EFAULT;
3676     host_mb = g_try_malloc(msgsz + sizeof(long));
3677     if (!host_mb) {
3678         unlock_user_struct(target_mb, msgp, 0);
3679         return -TARGET_ENOMEM;
3680     }
3681     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3682     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3683     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3684     g_free(host_mb);
3685     unlock_user_struct(target_mb, msgp, 0);
3686 
3687     return ret;
3688 }
3689 
3690 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3691                                  ssize_t msgsz, abi_long msgtyp,
3692                                  int msgflg)
3693 {
3694     struct target_msgbuf *target_mb;
3695     char *target_mtext;
3696     struct msgbuf *host_mb;
3697     abi_long ret = 0;
3698 
3699     if (msgsz < 0) {
3700         return -TARGET_EINVAL;
3701     }
3702 
3703     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3704         return -TARGET_EFAULT;
3705 
3706     host_mb = g_try_malloc(msgsz + sizeof(long));
3707     if (!host_mb) {
3708         ret = -TARGET_ENOMEM;
3709         goto end;
3710     }
3711     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3712 
3713     if (ret > 0) {
3714         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3715         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3716         if (!target_mtext) {
3717             ret = -TARGET_EFAULT;
3718             goto end;
3719         }
3720         memcpy(target_mb->mtext, host_mb->mtext, ret);
3721         unlock_user(target_mtext, target_mtext_addr, ret);
3722     }
3723 
3724     target_mb->mtype = tswapal(host_mb->mtype);
3725 
3726 end:
3727     if (target_mb)
3728         unlock_user_struct(target_mb, msgp, 1);
3729     g_free(host_mb);
3730     return ret;
3731 }
3732 
3733 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3734                                                abi_ulong target_addr)
3735 {
3736     struct target_shmid_ds *target_sd;
3737 
3738     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3739         return -TARGET_EFAULT;
3740     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3741         return -TARGET_EFAULT;
3742     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3743     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3744     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3745     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3746     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3747     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3748     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3749     unlock_user_struct(target_sd, target_addr, 0);
3750     return 0;
3751 }
3752 
3753 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3754                                                struct shmid_ds *host_sd)
3755 {
3756     struct target_shmid_ds *target_sd;
3757 
3758     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3759         return -TARGET_EFAULT;
3760     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3761         return -TARGET_EFAULT;
3762     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3763     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3764     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3765     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3766     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3767     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3768     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3769     unlock_user_struct(target_sd, target_addr, 1);
3770     return 0;
3771 }
3772 
3773 struct  target_shminfo {
3774     abi_ulong shmmax;
3775     abi_ulong shmmin;
3776     abi_ulong shmmni;
3777     abi_ulong shmseg;
3778     abi_ulong shmall;
3779 };
3780 
3781 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3782                                               struct shminfo *host_shminfo)
3783 {
3784     struct target_shminfo *target_shminfo;
3785     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3786         return -TARGET_EFAULT;
3787     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3788     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3789     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3790     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3791     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3792     unlock_user_struct(target_shminfo, target_addr, 1);
3793     return 0;
3794 }
3795 
3796 struct target_shm_info {
3797     int used_ids;
3798     abi_ulong shm_tot;
3799     abi_ulong shm_rss;
3800     abi_ulong shm_swp;
3801     abi_ulong swap_attempts;
3802     abi_ulong swap_successes;
3803 };
3804 
3805 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3806                                                struct shm_info *host_shm_info)
3807 {
3808     struct target_shm_info *target_shm_info;
3809     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3810         return -TARGET_EFAULT;
3811     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3812     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3813     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3814     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3815     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3816     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3817     unlock_user_struct(target_shm_info, target_addr, 1);
3818     return 0;
3819 }
3820 
3821 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3822 {
3823     struct shmid_ds dsarg;
3824     struct shminfo shminfo;
3825     struct shm_info shm_info;
3826     abi_long ret = -TARGET_EINVAL;
3827 
3828     cmd &= 0xff;
3829 
3830     switch(cmd) {
3831     case IPC_STAT:
3832     case IPC_SET:
3833     case SHM_STAT:
3834         if (target_to_host_shmid_ds(&dsarg, buf))
3835             return -TARGET_EFAULT;
3836         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3837         if (host_to_target_shmid_ds(buf, &dsarg))
3838             return -TARGET_EFAULT;
3839         break;
3840     case IPC_INFO:
3841         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3842         if (host_to_target_shminfo(buf, &shminfo))
3843             return -TARGET_EFAULT;
3844         break;
3845     case SHM_INFO:
3846         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3847         if (host_to_target_shm_info(buf, &shm_info))
3848             return -TARGET_EFAULT;
3849         break;
3850     case IPC_RMID:
3851     case SHM_LOCK:
3852     case SHM_UNLOCK:
3853         ret = get_errno(shmctl(shmid, cmd, NULL));
3854         break;
3855     }
3856 
3857     return ret;
3858 }
3859 
3860 #ifndef TARGET_FORCE_SHMLBA
3861 /* For most architectures, SHMLBA is the same as the page size;
3862  * some architectures have larger values, in which case they should
3863  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3864  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3865  * and defining its own value for SHMLBA.
3866  *
3867  * The kernel also permits SHMLBA to be set by the architecture to a
3868  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3869  * this means that addresses are rounded to the large size if
3870  * SHM_RND is set but addresses not aligned to that size are not rejected
3871  * as long as they are at least page-aligned. Since the only architecture
3872  * which uses this is ia64 this code doesn't provide for that oddity.
3873  */
3874 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3875 {
3876     return TARGET_PAGE_SIZE;
3877 }
3878 #endif
3879 
3880 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3881                                  int shmid, abi_ulong shmaddr, int shmflg)
3882 {
3883     abi_long raddr;
3884     void *host_raddr;
3885     struct shmid_ds shm_info;
3886     int i,ret;
3887     abi_ulong shmlba;
3888 
3889     /* find out the length of the shared memory segment */
3890     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3891     if (is_error(ret)) {
3892         /* can't get length, bail out */
3893         return ret;
3894     }
3895 
3896     shmlba = target_shmlba(cpu_env);
3897 
3898     if (shmaddr & (shmlba - 1)) {
3899         if (shmflg & SHM_RND) {
3900             shmaddr &= ~(shmlba - 1);
3901         } else {
3902             return -TARGET_EINVAL;
3903         }
3904     }
3905     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3906         return -TARGET_EINVAL;
3907     }
3908 
3909     mmap_lock();
3910 
3911     if (shmaddr)
3912         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3913     else {
3914         abi_ulong mmap_start;
3915 
3916         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3917 
3918         if (mmap_start == -1) {
3919             errno = ENOMEM;
3920             host_raddr = (void *)-1;
3921         } else
3922             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3923     }
3924 
3925     if (host_raddr == (void *)-1) {
3926         mmap_unlock();
3927         return get_errno((long)host_raddr);
3928     }
3929     raddr=h2g((unsigned long)host_raddr);
3930 
3931     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3932                    PAGE_VALID | PAGE_READ |
3933                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3934 
3935     for (i = 0; i < N_SHM_REGIONS; i++) {
3936         if (!shm_regions[i].in_use) {
3937             shm_regions[i].in_use = true;
3938             shm_regions[i].start = raddr;
3939             shm_regions[i].size = shm_info.shm_segsz;
3940             break;
3941         }
3942     }
3943 
3944     mmap_unlock();
3945     return raddr;
3946 
3947 }
3948 
3949 static inline abi_long do_shmdt(abi_ulong shmaddr)
3950 {
3951     int i;
3952     abi_long rv;
3953 
3954     mmap_lock();
3955 
3956     for (i = 0; i < N_SHM_REGIONS; ++i) {
3957         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3958             shm_regions[i].in_use = false;
3959             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3960             break;
3961         }
3962     }
3963     rv = get_errno(shmdt(g2h(shmaddr)));
3964 
3965     mmap_unlock();
3966 
3967     return rv;
3968 }
3969 
3970 #ifdef TARGET_NR_ipc
3971 /* ??? This only works with linear mappings.  */
3972 /* do_ipc() must return target values and target errnos. */
3973 static abi_long do_ipc(CPUArchState *cpu_env,
3974                        unsigned int call, abi_long first,
3975                        abi_long second, abi_long third,
3976                        abi_long ptr, abi_long fifth)
3977 {
3978     int version;
3979     abi_long ret = 0;
3980 
3981     version = call >> 16;
3982     call &= 0xffff;
3983 
3984     switch (call) {
3985     case IPCOP_semop:
3986         ret = do_semop(first, ptr, second);
3987         break;
3988 
3989     case IPCOP_semget:
3990         ret = get_errno(semget(first, second, third));
3991         break;
3992 
3993     case IPCOP_semctl: {
3994         /* The semun argument to semctl is passed by value, so dereference the
3995          * ptr argument. */
3996         abi_ulong atptr;
3997         get_user_ual(atptr, ptr);
3998         ret = do_semctl(first, second, third, atptr);
3999         break;
4000     }
4001 
4002     case IPCOP_msgget:
4003         ret = get_errno(msgget(first, second));
4004         break;
4005 
4006     case IPCOP_msgsnd:
4007         ret = do_msgsnd(first, ptr, second, third);
4008         break;
4009 
4010     case IPCOP_msgctl:
4011         ret = do_msgctl(first, second, ptr);
4012         break;
4013 
4014     case IPCOP_msgrcv:
4015         switch (version) {
4016         case 0:
4017             {
4018                 struct target_ipc_kludge {
4019                     abi_long msgp;
4020                     abi_long msgtyp;
4021                 } *tmp;
4022 
4023                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4024                     ret = -TARGET_EFAULT;
4025                     break;
4026                 }
4027 
4028                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4029 
4030                 unlock_user_struct(tmp, ptr, 0);
4031                 break;
4032             }
4033         default:
4034             ret = do_msgrcv(first, ptr, second, fifth, third);
4035         }
4036         break;
4037 
4038     case IPCOP_shmat:
4039         switch (version) {
4040         default:
4041         {
4042             abi_ulong raddr;
4043             raddr = do_shmat(cpu_env, first, ptr, second);
4044             if (is_error(raddr))
4045                 return get_errno(raddr);
4046             if (put_user_ual(raddr, third))
4047                 return -TARGET_EFAULT;
4048             break;
4049         }
4050         case 1:
4051             ret = -TARGET_EINVAL;
4052             break;
4053         }
4054 	break;
4055     case IPCOP_shmdt:
4056         ret = do_shmdt(ptr);
4057 	break;
4058 
4059     case IPCOP_shmget:
4060 	/* IPC_* flag values are the same on all linux platforms */
4061 	ret = get_errno(shmget(first, second, third));
4062 	break;
4063 
4064 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4065     case IPCOP_shmctl:
4066         ret = do_shmctl(first, second, ptr);
4067         break;
4068     default:
4069 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4070 	ret = -TARGET_ENOSYS;
4071 	break;
4072     }
4073     return ret;
4074 }
4075 #endif
4076 
4077 /* kernel structure types definitions */
4078 
4079 #define STRUCT(name, ...) STRUCT_ ## name,
4080 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4081 enum {
4082 #include "syscall_types.h"
4083 STRUCT_MAX
4084 };
4085 #undef STRUCT
4086 #undef STRUCT_SPECIAL
4087 
4088 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4089 #define STRUCT_SPECIAL(name)
4090 #include "syscall_types.h"
4091 #undef STRUCT
4092 #undef STRUCT_SPECIAL
4093 
4094 typedef struct IOCTLEntry IOCTLEntry;
4095 
4096 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4097                              int fd, int cmd, abi_long arg);
4098 
4099 struct IOCTLEntry {
4100     int target_cmd;
4101     unsigned int host_cmd;
4102     const char *name;
4103     int access;
4104     do_ioctl_fn *do_ioctl;
4105     const argtype arg_type[5];
4106 };
4107 
4108 #define IOC_R 0x0001
4109 #define IOC_W 0x0002
4110 #define IOC_RW (IOC_R | IOC_W)
4111 
4112 #define MAX_STRUCT_SIZE 4096
4113 
4114 #ifdef CONFIG_FIEMAP
4115 /* So fiemap access checks don't overflow on 32 bit systems.
4116  * This is very slightly smaller than the limit imposed by
4117  * the underlying kernel.
4118  */
4119 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4120                             / sizeof(struct fiemap_extent))
4121 
4122 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4123                                        int fd, int cmd, abi_long arg)
4124 {
4125     /* The parameter for this ioctl is a struct fiemap followed
4126      * by an array of struct fiemap_extent whose size is set
4127      * in fiemap->fm_extent_count. The array is filled in by the
4128      * ioctl.
4129      */
4130     int target_size_in, target_size_out;
4131     struct fiemap *fm;
4132     const argtype *arg_type = ie->arg_type;
4133     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4134     void *argptr, *p;
4135     abi_long ret;
4136     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4137     uint32_t outbufsz;
4138     int free_fm = 0;
4139 
4140     assert(arg_type[0] == TYPE_PTR);
4141     assert(ie->access == IOC_RW);
4142     arg_type++;
4143     target_size_in = thunk_type_size(arg_type, 0);
4144     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4145     if (!argptr) {
4146         return -TARGET_EFAULT;
4147     }
4148     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4149     unlock_user(argptr, arg, 0);
4150     fm = (struct fiemap *)buf_temp;
4151     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4152         return -TARGET_EINVAL;
4153     }
4154 
4155     outbufsz = sizeof (*fm) +
4156         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4157 
4158     if (outbufsz > MAX_STRUCT_SIZE) {
4159         /* We can't fit all the extents into the fixed size buffer.
4160          * Allocate one that is large enough and use it instead.
4161          */
4162         fm = g_try_malloc(outbufsz);
4163         if (!fm) {
4164             return -TARGET_ENOMEM;
4165         }
4166         memcpy(fm, buf_temp, sizeof(struct fiemap));
4167         free_fm = 1;
4168     }
4169     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4170     if (!is_error(ret)) {
4171         target_size_out = target_size_in;
4172         /* An extent_count of 0 means we were only counting the extents
4173          * so there are no structs to copy
4174          */
4175         if (fm->fm_extent_count != 0) {
4176             target_size_out += fm->fm_mapped_extents * extent_size;
4177         }
4178         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4179         if (!argptr) {
4180             ret = -TARGET_EFAULT;
4181         } else {
4182             /* Convert the struct fiemap */
4183             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4184             if (fm->fm_extent_count != 0) {
4185                 p = argptr + target_size_in;
4186                 /* ...and then all the struct fiemap_extents */
4187                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4188                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4189                                   THUNK_TARGET);
4190                     p += extent_size;
4191                 }
4192             }
4193             unlock_user(argptr, arg, target_size_out);
4194         }
4195     }
4196     if (free_fm) {
4197         g_free(fm);
4198     }
4199     return ret;
4200 }
4201 #endif
4202 
4203 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4204                                 int fd, int cmd, abi_long arg)
4205 {
4206     const argtype *arg_type = ie->arg_type;
4207     int target_size;
4208     void *argptr;
4209     int ret;
4210     struct ifconf *host_ifconf;
4211     uint32_t outbufsz;
4212     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4213     int target_ifreq_size;
4214     int nb_ifreq;
4215     int free_buf = 0;
4216     int i;
4217     int target_ifc_len;
4218     abi_long target_ifc_buf;
4219     int host_ifc_len;
4220     char *host_ifc_buf;
4221 
4222     assert(arg_type[0] == TYPE_PTR);
4223     assert(ie->access == IOC_RW);
4224 
4225     arg_type++;
4226     target_size = thunk_type_size(arg_type, 0);
4227 
4228     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4229     if (!argptr)
4230         return -TARGET_EFAULT;
4231     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4232     unlock_user(argptr, arg, 0);
4233 
4234     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4235     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4236     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4237 
4238     if (target_ifc_buf != 0) {
4239         target_ifc_len = host_ifconf->ifc_len;
4240         nb_ifreq = target_ifc_len / target_ifreq_size;
4241         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4242 
4243         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4244         if (outbufsz > MAX_STRUCT_SIZE) {
4245             /*
4246              * We can't fit all the extents into the fixed size buffer.
4247              * Allocate one that is large enough and use it instead.
4248              */
4249             host_ifconf = malloc(outbufsz);
4250             if (!host_ifconf) {
4251                 return -TARGET_ENOMEM;
4252             }
4253             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4254             free_buf = 1;
4255         }
4256         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4257 
4258         host_ifconf->ifc_len = host_ifc_len;
4259     } else {
4260       host_ifc_buf = NULL;
4261     }
4262     host_ifconf->ifc_buf = host_ifc_buf;
4263 
4264     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4265     if (!is_error(ret)) {
4266 	/* convert host ifc_len to target ifc_len */
4267 
4268         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4269         target_ifc_len = nb_ifreq * target_ifreq_size;
4270         host_ifconf->ifc_len = target_ifc_len;
4271 
4272 	/* restore target ifc_buf */
4273 
4274         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4275 
4276 	/* copy struct ifconf to target user */
4277 
4278         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4279         if (!argptr)
4280             return -TARGET_EFAULT;
4281         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4282         unlock_user(argptr, arg, target_size);
4283 
4284         if (target_ifc_buf != 0) {
4285             /* copy ifreq[] to target user */
4286             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4287             for (i = 0; i < nb_ifreq ; i++) {
4288                 thunk_convert(argptr + i * target_ifreq_size,
4289                               host_ifc_buf + i * sizeof(struct ifreq),
4290                               ifreq_arg_type, THUNK_TARGET);
4291             }
4292             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4293         }
4294     }
4295 
4296     if (free_buf) {
4297         free(host_ifconf);
4298     }
4299 
4300     return ret;
4301 }
4302 
4303 #if defined(CONFIG_USBFS)
4304 #if HOST_LONG_BITS > 64
4305 #error USBDEVFS thunks do not support >64 bit hosts yet.
4306 #endif
4307 struct live_urb {
4308     uint64_t target_urb_adr;
4309     uint64_t target_buf_adr;
4310     char *target_buf_ptr;
4311     struct usbdevfs_urb host_urb;
4312 };
4313 
4314 static GHashTable *usbdevfs_urb_hashtable(void)
4315 {
4316     static GHashTable *urb_hashtable;
4317 
4318     if (!urb_hashtable) {
4319         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4320     }
4321     return urb_hashtable;
4322 }
4323 
4324 static void urb_hashtable_insert(struct live_urb *urb)
4325 {
4326     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4327     g_hash_table_insert(urb_hashtable, urb, urb);
4328 }
4329 
4330 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4331 {
4332     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4333     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4334 }
4335 
4336 static void urb_hashtable_remove(struct live_urb *urb)
4337 {
4338     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4339     g_hash_table_remove(urb_hashtable, urb);
4340 }
4341 
4342 static abi_long
4343 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4344                           int fd, int cmd, abi_long arg)
4345 {
4346     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4347     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4348     struct live_urb *lurb;
4349     void *argptr;
4350     uint64_t hurb;
4351     int target_size;
4352     uintptr_t target_urb_adr;
4353     abi_long ret;
4354 
4355     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4356 
4357     memset(buf_temp, 0, sizeof(uint64_t));
4358     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4359     if (is_error(ret)) {
4360         return ret;
4361     }
4362 
4363     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4364     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4365     if (!lurb->target_urb_adr) {
4366         return -TARGET_EFAULT;
4367     }
4368     urb_hashtable_remove(lurb);
4369     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4370         lurb->host_urb.buffer_length);
4371     lurb->target_buf_ptr = NULL;
4372 
4373     /* restore the guest buffer pointer */
4374     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4375 
4376     /* update the guest urb struct */
4377     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4378     if (!argptr) {
4379         g_free(lurb);
4380         return -TARGET_EFAULT;
4381     }
4382     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4383     unlock_user(argptr, lurb->target_urb_adr, target_size);
4384 
4385     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4386     /* write back the urb handle */
4387     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4388     if (!argptr) {
4389         g_free(lurb);
4390         return -TARGET_EFAULT;
4391     }
4392 
4393     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4394     target_urb_adr = lurb->target_urb_adr;
4395     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4396     unlock_user(argptr, arg, target_size);
4397 
4398     g_free(lurb);
4399     return ret;
4400 }
4401 
4402 static abi_long
4403 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4404                              uint8_t *buf_temp __attribute__((unused)),
4405                              int fd, int cmd, abi_long arg)
4406 {
4407     struct live_urb *lurb;
4408 
4409     /* map target address back to host URB with metadata. */
4410     lurb = urb_hashtable_lookup(arg);
4411     if (!lurb) {
4412         return -TARGET_EFAULT;
4413     }
4414     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4415 }
4416 
4417 static abi_long
4418 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4419                             int fd, int cmd, abi_long arg)
4420 {
4421     const argtype *arg_type = ie->arg_type;
4422     int target_size;
4423     abi_long ret;
4424     void *argptr;
4425     int rw_dir;
4426     struct live_urb *lurb;
4427 
4428     /*
4429      * each submitted URB needs to map to a unique ID for the
4430      * kernel, and that unique ID needs to be a pointer to
4431      * host memory.  hence, we need to malloc for each URB.
4432      * isochronous transfers have a variable length struct.
4433      */
4434     arg_type++;
4435     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4436 
4437     /* construct host copy of urb and metadata */
4438     lurb = g_try_malloc0(sizeof(struct live_urb));
4439     if (!lurb) {
4440         return -TARGET_ENOMEM;
4441     }
4442 
4443     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4444     if (!argptr) {
4445         g_free(lurb);
4446         return -TARGET_EFAULT;
4447     }
4448     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4449     unlock_user(argptr, arg, 0);
4450 
4451     lurb->target_urb_adr = arg;
4452     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4453 
4454     /* buffer space used depends on endpoint type so lock the entire buffer */
4455     /* control type urbs should check the buffer contents for true direction */
4456     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4457     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4458         lurb->host_urb.buffer_length, 1);
4459     if (lurb->target_buf_ptr == NULL) {
4460         g_free(lurb);
4461         return -TARGET_EFAULT;
4462     }
4463 
4464     /* update buffer pointer in host copy */
4465     lurb->host_urb.buffer = lurb->target_buf_ptr;
4466 
4467     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4468     if (is_error(ret)) {
4469         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4470         g_free(lurb);
4471     } else {
4472         urb_hashtable_insert(lurb);
4473     }
4474 
4475     return ret;
4476 }
4477 #endif /* CONFIG_USBFS */
4478 
4479 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4480                             int cmd, abi_long arg)
4481 {
4482     void *argptr;
4483     struct dm_ioctl *host_dm;
4484     abi_long guest_data;
4485     uint32_t guest_data_size;
4486     int target_size;
4487     const argtype *arg_type = ie->arg_type;
4488     abi_long ret;
4489     void *big_buf = NULL;
4490     char *host_data;
4491 
4492     arg_type++;
4493     target_size = thunk_type_size(arg_type, 0);
4494     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4495     if (!argptr) {
4496         ret = -TARGET_EFAULT;
4497         goto out;
4498     }
4499     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4500     unlock_user(argptr, arg, 0);
4501 
4502     /* buf_temp is too small, so fetch things into a bigger buffer */
4503     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4504     memcpy(big_buf, buf_temp, target_size);
4505     buf_temp = big_buf;
4506     host_dm = big_buf;
4507 
4508     guest_data = arg + host_dm->data_start;
4509     if ((guest_data - arg) < 0) {
4510         ret = -TARGET_EINVAL;
4511         goto out;
4512     }
4513     guest_data_size = host_dm->data_size - host_dm->data_start;
4514     host_data = (char*)host_dm + host_dm->data_start;
4515 
4516     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4517     if (!argptr) {
4518         ret = -TARGET_EFAULT;
4519         goto out;
4520     }
4521 
4522     switch (ie->host_cmd) {
4523     case DM_REMOVE_ALL:
4524     case DM_LIST_DEVICES:
4525     case DM_DEV_CREATE:
4526     case DM_DEV_REMOVE:
4527     case DM_DEV_SUSPEND:
4528     case DM_DEV_STATUS:
4529     case DM_DEV_WAIT:
4530     case DM_TABLE_STATUS:
4531     case DM_TABLE_CLEAR:
4532     case DM_TABLE_DEPS:
4533     case DM_LIST_VERSIONS:
4534         /* no input data */
4535         break;
4536     case DM_DEV_RENAME:
4537     case DM_DEV_SET_GEOMETRY:
4538         /* data contains only strings */
4539         memcpy(host_data, argptr, guest_data_size);
4540         break;
4541     case DM_TARGET_MSG:
4542         memcpy(host_data, argptr, guest_data_size);
4543         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4544         break;
4545     case DM_TABLE_LOAD:
4546     {
4547         void *gspec = argptr;
4548         void *cur_data = host_data;
4549         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4550         int spec_size = thunk_type_size(arg_type, 0);
4551         int i;
4552 
4553         for (i = 0; i < host_dm->target_count; i++) {
4554             struct dm_target_spec *spec = cur_data;
4555             uint32_t next;
4556             int slen;
4557 
4558             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4559             slen = strlen((char*)gspec + spec_size) + 1;
4560             next = spec->next;
4561             spec->next = sizeof(*spec) + slen;
4562             strcpy((char*)&spec[1], gspec + spec_size);
4563             gspec += next;
4564             cur_data += spec->next;
4565         }
4566         break;
4567     }
4568     default:
4569         ret = -TARGET_EINVAL;
4570         unlock_user(argptr, guest_data, 0);
4571         goto out;
4572     }
4573     unlock_user(argptr, guest_data, 0);
4574 
4575     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4576     if (!is_error(ret)) {
4577         guest_data = arg + host_dm->data_start;
4578         guest_data_size = host_dm->data_size - host_dm->data_start;
4579         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4580         switch (ie->host_cmd) {
4581         case DM_REMOVE_ALL:
4582         case DM_DEV_CREATE:
4583         case DM_DEV_REMOVE:
4584         case DM_DEV_RENAME:
4585         case DM_DEV_SUSPEND:
4586         case DM_DEV_STATUS:
4587         case DM_TABLE_LOAD:
4588         case DM_TABLE_CLEAR:
4589         case DM_TARGET_MSG:
4590         case DM_DEV_SET_GEOMETRY:
4591             /* no return data */
4592             break;
4593         case DM_LIST_DEVICES:
4594         {
4595             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4596             uint32_t remaining_data = guest_data_size;
4597             void *cur_data = argptr;
4598             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4599             int nl_size = 12; /* can't use thunk_size due to alignment */
4600 
4601             while (1) {
4602                 uint32_t next = nl->next;
4603                 if (next) {
4604                     nl->next = nl_size + (strlen(nl->name) + 1);
4605                 }
4606                 if (remaining_data < nl->next) {
4607                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4608                     break;
4609                 }
4610                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4611                 strcpy(cur_data + nl_size, nl->name);
4612                 cur_data += nl->next;
4613                 remaining_data -= nl->next;
4614                 if (!next) {
4615                     break;
4616                 }
4617                 nl = (void*)nl + next;
4618             }
4619             break;
4620         }
4621         case DM_DEV_WAIT:
4622         case DM_TABLE_STATUS:
4623         {
4624             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4625             void *cur_data = argptr;
4626             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4627             int spec_size = thunk_type_size(arg_type, 0);
4628             int i;
4629 
4630             for (i = 0; i < host_dm->target_count; i++) {
4631                 uint32_t next = spec->next;
4632                 int slen = strlen((char*)&spec[1]) + 1;
4633                 spec->next = (cur_data - argptr) + spec_size + slen;
4634                 if (guest_data_size < spec->next) {
4635                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4636                     break;
4637                 }
4638                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4639                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4640                 cur_data = argptr + spec->next;
4641                 spec = (void*)host_dm + host_dm->data_start + next;
4642             }
4643             break;
4644         }
4645         case DM_TABLE_DEPS:
4646         {
4647             void *hdata = (void*)host_dm + host_dm->data_start;
4648             int count = *(uint32_t*)hdata;
4649             uint64_t *hdev = hdata + 8;
4650             uint64_t *gdev = argptr + 8;
4651             int i;
4652 
4653             *(uint32_t*)argptr = tswap32(count);
4654             for (i = 0; i < count; i++) {
4655                 *gdev = tswap64(*hdev);
4656                 gdev++;
4657                 hdev++;
4658             }
4659             break;
4660         }
4661         case DM_LIST_VERSIONS:
4662         {
4663             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4664             uint32_t remaining_data = guest_data_size;
4665             void *cur_data = argptr;
4666             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4667             int vers_size = thunk_type_size(arg_type, 0);
4668 
4669             while (1) {
4670                 uint32_t next = vers->next;
4671                 if (next) {
4672                     vers->next = vers_size + (strlen(vers->name) + 1);
4673                 }
4674                 if (remaining_data < vers->next) {
4675                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4676                     break;
4677                 }
4678                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4679                 strcpy(cur_data + vers_size, vers->name);
4680                 cur_data += vers->next;
4681                 remaining_data -= vers->next;
4682                 if (!next) {
4683                     break;
4684                 }
4685                 vers = (void*)vers + next;
4686             }
4687             break;
4688         }
4689         default:
4690             unlock_user(argptr, guest_data, 0);
4691             ret = -TARGET_EINVAL;
4692             goto out;
4693         }
4694         unlock_user(argptr, guest_data, guest_data_size);
4695 
4696         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4697         if (!argptr) {
4698             ret = -TARGET_EFAULT;
4699             goto out;
4700         }
4701         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4702         unlock_user(argptr, arg, target_size);
4703     }
4704 out:
4705     g_free(big_buf);
4706     return ret;
4707 }
4708 
4709 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4710                                int cmd, abi_long arg)
4711 {
4712     void *argptr;
4713     int target_size;
4714     const argtype *arg_type = ie->arg_type;
4715     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4716     abi_long ret;
4717 
4718     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4719     struct blkpg_partition host_part;
4720 
4721     /* Read and convert blkpg */
4722     arg_type++;
4723     target_size = thunk_type_size(arg_type, 0);
4724     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4725     if (!argptr) {
4726         ret = -TARGET_EFAULT;
4727         goto out;
4728     }
4729     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4730     unlock_user(argptr, arg, 0);
4731 
4732     switch (host_blkpg->op) {
4733     case BLKPG_ADD_PARTITION:
4734     case BLKPG_DEL_PARTITION:
4735         /* payload is struct blkpg_partition */
4736         break;
4737     default:
4738         /* Unknown opcode */
4739         ret = -TARGET_EINVAL;
4740         goto out;
4741     }
4742 
4743     /* Read and convert blkpg->data */
4744     arg = (abi_long)(uintptr_t)host_blkpg->data;
4745     target_size = thunk_type_size(part_arg_type, 0);
4746     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4747     if (!argptr) {
4748         ret = -TARGET_EFAULT;
4749         goto out;
4750     }
4751     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4752     unlock_user(argptr, arg, 0);
4753 
4754     /* Swizzle the data pointer to our local copy and call! */
4755     host_blkpg->data = &host_part;
4756     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4757 
4758 out:
4759     return ret;
4760 }
4761 
4762 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4763                                 int fd, int cmd, abi_long arg)
4764 {
4765     const argtype *arg_type = ie->arg_type;
4766     const StructEntry *se;
4767     const argtype *field_types;
4768     const int *dst_offsets, *src_offsets;
4769     int target_size;
4770     void *argptr;
4771     abi_ulong *target_rt_dev_ptr = NULL;
4772     unsigned long *host_rt_dev_ptr = NULL;
4773     abi_long ret;
4774     int i;
4775 
4776     assert(ie->access == IOC_W);
4777     assert(*arg_type == TYPE_PTR);
4778     arg_type++;
4779     assert(*arg_type == TYPE_STRUCT);
4780     target_size = thunk_type_size(arg_type, 0);
4781     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4782     if (!argptr) {
4783         return -TARGET_EFAULT;
4784     }
4785     arg_type++;
4786     assert(*arg_type == (int)STRUCT_rtentry);
4787     se = struct_entries + *arg_type++;
4788     assert(se->convert[0] == NULL);
4789     /* convert struct here to be able to catch rt_dev string */
4790     field_types = se->field_types;
4791     dst_offsets = se->field_offsets[THUNK_HOST];
4792     src_offsets = se->field_offsets[THUNK_TARGET];
4793     for (i = 0; i < se->nb_fields; i++) {
4794         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4795             assert(*field_types == TYPE_PTRVOID);
4796             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4797             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4798             if (*target_rt_dev_ptr != 0) {
4799                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4800                                                   tswapal(*target_rt_dev_ptr));
4801                 if (!*host_rt_dev_ptr) {
4802                     unlock_user(argptr, arg, 0);
4803                     return -TARGET_EFAULT;
4804                 }
4805             } else {
4806                 *host_rt_dev_ptr = 0;
4807             }
4808             field_types++;
4809             continue;
4810         }
4811         field_types = thunk_convert(buf_temp + dst_offsets[i],
4812                                     argptr + src_offsets[i],
4813                                     field_types, THUNK_HOST);
4814     }
4815     unlock_user(argptr, arg, 0);
4816 
4817     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4818 
4819     assert(host_rt_dev_ptr != NULL);
4820     assert(target_rt_dev_ptr != NULL);
4821     if (*host_rt_dev_ptr != 0) {
4822         unlock_user((void *)*host_rt_dev_ptr,
4823                     *target_rt_dev_ptr, 0);
4824     }
4825     return ret;
4826 }
4827 
4828 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4829                                      int fd, int cmd, abi_long arg)
4830 {
4831     int sig = target_to_host_signal(arg);
4832     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4833 }
4834 
4835 #ifdef TIOCGPTPEER
4836 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4837                                      int fd, int cmd, abi_long arg)
4838 {
4839     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4840     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4841 }
4842 #endif
4843 
4844 static IOCTLEntry ioctl_entries[] = {
4845 #define IOCTL(cmd, access, ...) \
4846     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4847 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4848     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4849 #define IOCTL_IGNORE(cmd) \
4850     { TARGET_ ## cmd, 0, #cmd },
4851 #include "ioctls.h"
4852     { 0, 0, },
4853 };
4854 
4855 /* ??? Implement proper locking for ioctls.  */
4856 /* do_ioctl() Must return target values and target errnos. */
4857 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4858 {
4859     const IOCTLEntry *ie;
4860     const argtype *arg_type;
4861     abi_long ret;
4862     uint8_t buf_temp[MAX_STRUCT_SIZE];
4863     int target_size;
4864     void *argptr;
4865 
4866     ie = ioctl_entries;
4867     for(;;) {
4868         if (ie->target_cmd == 0) {
4869             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4870             return -TARGET_ENOSYS;
4871         }
4872         if (ie->target_cmd == cmd)
4873             break;
4874         ie++;
4875     }
4876     arg_type = ie->arg_type;
4877     if (ie->do_ioctl) {
4878         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4879     } else if (!ie->host_cmd) {
4880         /* Some architectures define BSD ioctls in their headers
4881            that are not implemented in Linux.  */
4882         return -TARGET_ENOSYS;
4883     }
4884 
4885     switch(arg_type[0]) {
4886     case TYPE_NULL:
4887         /* no argument */
4888         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4889         break;
4890     case TYPE_PTRVOID:
4891     case TYPE_INT:
4892         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4893         break;
4894     case TYPE_PTR:
4895         arg_type++;
4896         target_size = thunk_type_size(arg_type, 0);
4897         switch(ie->access) {
4898         case IOC_R:
4899             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4900             if (!is_error(ret)) {
4901                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4902                 if (!argptr)
4903                     return -TARGET_EFAULT;
4904                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4905                 unlock_user(argptr, arg, target_size);
4906             }
4907             break;
4908         case IOC_W:
4909             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4910             if (!argptr)
4911                 return -TARGET_EFAULT;
4912             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4913             unlock_user(argptr, arg, 0);
4914             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4915             break;
4916         default:
4917         case IOC_RW:
4918             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4919             if (!argptr)
4920                 return -TARGET_EFAULT;
4921             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4922             unlock_user(argptr, arg, 0);
4923             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4924             if (!is_error(ret)) {
4925                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4926                 if (!argptr)
4927                     return -TARGET_EFAULT;
4928                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4929                 unlock_user(argptr, arg, target_size);
4930             }
4931             break;
4932         }
4933         break;
4934     default:
4935         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4936                  (long)cmd, arg_type[0]);
4937         ret = -TARGET_ENOSYS;
4938         break;
4939     }
4940     return ret;
4941 }
4942 
4943 static const bitmask_transtbl iflag_tbl[] = {
4944         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4945         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4946         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4947         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4948         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4949         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4950         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4951         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4952         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4953         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4954         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4955         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4956         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4957         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4958         { 0, 0, 0, 0 }
4959 };
4960 
4961 static const bitmask_transtbl oflag_tbl[] = {
4962 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4963 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4964 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4965 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4966 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4967 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4968 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4969 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4970 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4971 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4972 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4973 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4974 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4975 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4976 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4977 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4978 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4979 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4980 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4981 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4982 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4983 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4984 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4985 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4986 	{ 0, 0, 0, 0 }
4987 };
4988 
4989 static const bitmask_transtbl cflag_tbl[] = {
4990 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4991 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4992 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4993 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4994 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4995 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4996 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4997 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4998 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4999 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5000 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5001 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5002 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5003 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5004 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5005 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5006 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5007 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5008 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5009 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5010 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5011 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5012 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5013 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5014 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5015 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5016 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5017 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5018 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5019 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5020 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5021 	{ 0, 0, 0, 0 }
5022 };
5023 
5024 static const bitmask_transtbl lflag_tbl[] = {
5025 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5026 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5027 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5028 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5029 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5030 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5031 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5032 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5033 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5034 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5035 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5036 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5037 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5038 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5039 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5040 	{ 0, 0, 0, 0 }
5041 };
5042 
5043 static void target_to_host_termios (void *dst, const void *src)
5044 {
5045     struct host_termios *host = dst;
5046     const struct target_termios *target = src;
5047 
5048     host->c_iflag =
5049         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5050     host->c_oflag =
5051         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5052     host->c_cflag =
5053         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5054     host->c_lflag =
5055         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5056     host->c_line = target->c_line;
5057 
5058     memset(host->c_cc, 0, sizeof(host->c_cc));
5059     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5060     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5061     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5062     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5063     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5064     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5065     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5066     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5067     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5068     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5069     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5070     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5071     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5072     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5073     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5074     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5075     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5076 }
5077 
5078 static void host_to_target_termios (void *dst, const void *src)
5079 {
5080     struct target_termios *target = dst;
5081     const struct host_termios *host = src;
5082 
5083     target->c_iflag =
5084         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5085     target->c_oflag =
5086         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5087     target->c_cflag =
5088         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5089     target->c_lflag =
5090         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5091     target->c_line = host->c_line;
5092 
5093     memset(target->c_cc, 0, sizeof(target->c_cc));
5094     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5095     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5096     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5097     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5098     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5099     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5100     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5101     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5102     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5103     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5104     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5105     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5106     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5107     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5108     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5109     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5110     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5111 }
5112 
5113 static const StructEntry struct_termios_def = {
5114     .convert = { host_to_target_termios, target_to_host_termios },
5115     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5116     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5117 };
5118 
5119 static bitmask_transtbl mmap_flags_tbl[] = {
5120     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5121     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5122     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5123     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5124       MAP_ANONYMOUS, MAP_ANONYMOUS },
5125     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5126       MAP_GROWSDOWN, MAP_GROWSDOWN },
5127     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5128       MAP_DENYWRITE, MAP_DENYWRITE },
5129     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5130       MAP_EXECUTABLE, MAP_EXECUTABLE },
5131     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5132     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5133       MAP_NORESERVE, MAP_NORESERVE },
5134     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5135     /* MAP_STACK had been ignored by the kernel for quite some time.
5136        Recognize it for the target insofar as we do not want to pass
5137        it through to the host.  */
5138     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5139     { 0, 0, 0, 0 }
5140 };
5141 
5142 #if defined(TARGET_I386)
5143 
5144 /* NOTE: there is really one LDT for all the threads */
5145 static uint8_t *ldt_table;
5146 
5147 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5148 {
5149     int size;
5150     void *p;
5151 
5152     if (!ldt_table)
5153         return 0;
5154     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5155     if (size > bytecount)
5156         size = bytecount;
5157     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5158     if (!p)
5159         return -TARGET_EFAULT;
5160     /* ??? Should this by byteswapped?  */
5161     memcpy(p, ldt_table, size);
5162     unlock_user(p, ptr, size);
5163     return size;
5164 }
5165 
5166 /* XXX: add locking support */
5167 static abi_long write_ldt(CPUX86State *env,
5168                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5169 {
5170     struct target_modify_ldt_ldt_s ldt_info;
5171     struct target_modify_ldt_ldt_s *target_ldt_info;
5172     int seg_32bit, contents, read_exec_only, limit_in_pages;
5173     int seg_not_present, useable, lm;
5174     uint32_t *lp, entry_1, entry_2;
5175 
5176     if (bytecount != sizeof(ldt_info))
5177         return -TARGET_EINVAL;
5178     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5179         return -TARGET_EFAULT;
5180     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5181     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5182     ldt_info.limit = tswap32(target_ldt_info->limit);
5183     ldt_info.flags = tswap32(target_ldt_info->flags);
5184     unlock_user_struct(target_ldt_info, ptr, 0);
5185 
5186     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5187         return -TARGET_EINVAL;
5188     seg_32bit = ldt_info.flags & 1;
5189     contents = (ldt_info.flags >> 1) & 3;
5190     read_exec_only = (ldt_info.flags >> 3) & 1;
5191     limit_in_pages = (ldt_info.flags >> 4) & 1;
5192     seg_not_present = (ldt_info.flags >> 5) & 1;
5193     useable = (ldt_info.flags >> 6) & 1;
5194 #ifdef TARGET_ABI32
5195     lm = 0;
5196 #else
5197     lm = (ldt_info.flags >> 7) & 1;
5198 #endif
5199     if (contents == 3) {
5200         if (oldmode)
5201             return -TARGET_EINVAL;
5202         if (seg_not_present == 0)
5203             return -TARGET_EINVAL;
5204     }
5205     /* allocate the LDT */
5206     if (!ldt_table) {
5207         env->ldt.base = target_mmap(0,
5208                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5209                                     PROT_READ|PROT_WRITE,
5210                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5211         if (env->ldt.base == -1)
5212             return -TARGET_ENOMEM;
5213         memset(g2h(env->ldt.base), 0,
5214                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5215         env->ldt.limit = 0xffff;
5216         ldt_table = g2h(env->ldt.base);
5217     }
5218 
5219     /* NOTE: same code as Linux kernel */
5220     /* Allow LDTs to be cleared by the user. */
5221     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5222         if (oldmode ||
5223             (contents == 0		&&
5224              read_exec_only == 1	&&
5225              seg_32bit == 0		&&
5226              limit_in_pages == 0	&&
5227              seg_not_present == 1	&&
5228              useable == 0 )) {
5229             entry_1 = 0;
5230             entry_2 = 0;
5231             goto install;
5232         }
5233     }
5234 
5235     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5236         (ldt_info.limit & 0x0ffff);
5237     entry_2 = (ldt_info.base_addr & 0xff000000) |
5238         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5239         (ldt_info.limit & 0xf0000) |
5240         ((read_exec_only ^ 1) << 9) |
5241         (contents << 10) |
5242         ((seg_not_present ^ 1) << 15) |
5243         (seg_32bit << 22) |
5244         (limit_in_pages << 23) |
5245         (lm << 21) |
5246         0x7000;
5247     if (!oldmode)
5248         entry_2 |= (useable << 20);
5249 
5250     /* Install the new entry ...  */
5251 install:
5252     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5253     lp[0] = tswap32(entry_1);
5254     lp[1] = tswap32(entry_2);
5255     return 0;
5256 }
5257 
5258 /* specific and weird i386 syscalls */
5259 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5260                               unsigned long bytecount)
5261 {
5262     abi_long ret;
5263 
5264     switch (func) {
5265     case 0:
5266         ret = read_ldt(ptr, bytecount);
5267         break;
5268     case 1:
5269         ret = write_ldt(env, ptr, bytecount, 1);
5270         break;
5271     case 0x11:
5272         ret = write_ldt(env, ptr, bytecount, 0);
5273         break;
5274     default:
5275         ret = -TARGET_ENOSYS;
5276         break;
5277     }
5278     return ret;
5279 }
5280 
5281 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5282 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5283 {
5284     uint64_t *gdt_table = g2h(env->gdt.base);
5285     struct target_modify_ldt_ldt_s ldt_info;
5286     struct target_modify_ldt_ldt_s *target_ldt_info;
5287     int seg_32bit, contents, read_exec_only, limit_in_pages;
5288     int seg_not_present, useable, lm;
5289     uint32_t *lp, entry_1, entry_2;
5290     int i;
5291 
5292     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5293     if (!target_ldt_info)
5294         return -TARGET_EFAULT;
5295     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5296     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5297     ldt_info.limit = tswap32(target_ldt_info->limit);
5298     ldt_info.flags = tswap32(target_ldt_info->flags);
5299     if (ldt_info.entry_number == -1) {
5300         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5301             if (gdt_table[i] == 0) {
5302                 ldt_info.entry_number = i;
5303                 target_ldt_info->entry_number = tswap32(i);
5304                 break;
5305             }
5306         }
5307     }
5308     unlock_user_struct(target_ldt_info, ptr, 1);
5309 
5310     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5311         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5312            return -TARGET_EINVAL;
5313     seg_32bit = ldt_info.flags & 1;
5314     contents = (ldt_info.flags >> 1) & 3;
5315     read_exec_only = (ldt_info.flags >> 3) & 1;
5316     limit_in_pages = (ldt_info.flags >> 4) & 1;
5317     seg_not_present = (ldt_info.flags >> 5) & 1;
5318     useable = (ldt_info.flags >> 6) & 1;
5319 #ifdef TARGET_ABI32
5320     lm = 0;
5321 #else
5322     lm = (ldt_info.flags >> 7) & 1;
5323 #endif
5324 
5325     if (contents == 3) {
5326         if (seg_not_present == 0)
5327             return -TARGET_EINVAL;
5328     }
5329 
5330     /* NOTE: same code as Linux kernel */
5331     /* Allow LDTs to be cleared by the user. */
5332     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5333         if ((contents == 0             &&
5334              read_exec_only == 1       &&
5335              seg_32bit == 0            &&
5336              limit_in_pages == 0       &&
5337              seg_not_present == 1      &&
5338              useable == 0 )) {
5339             entry_1 = 0;
5340             entry_2 = 0;
5341             goto install;
5342         }
5343     }
5344 
5345     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5346         (ldt_info.limit & 0x0ffff);
5347     entry_2 = (ldt_info.base_addr & 0xff000000) |
5348         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5349         (ldt_info.limit & 0xf0000) |
5350         ((read_exec_only ^ 1) << 9) |
5351         (contents << 10) |
5352         ((seg_not_present ^ 1) << 15) |
5353         (seg_32bit << 22) |
5354         (limit_in_pages << 23) |
5355         (useable << 20) |
5356         (lm << 21) |
5357         0x7000;
5358 
5359     /* Install the new entry ...  */
5360 install:
5361     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5362     lp[0] = tswap32(entry_1);
5363     lp[1] = tswap32(entry_2);
5364     return 0;
5365 }
5366 
5367 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5368 {
5369     struct target_modify_ldt_ldt_s *target_ldt_info;
5370     uint64_t *gdt_table = g2h(env->gdt.base);
5371     uint32_t base_addr, limit, flags;
5372     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5373     int seg_not_present, useable, lm;
5374     uint32_t *lp, entry_1, entry_2;
5375 
5376     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5377     if (!target_ldt_info)
5378         return -TARGET_EFAULT;
5379     idx = tswap32(target_ldt_info->entry_number);
5380     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5381         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5382         unlock_user_struct(target_ldt_info, ptr, 1);
5383         return -TARGET_EINVAL;
5384     }
5385     lp = (uint32_t *)(gdt_table + idx);
5386     entry_1 = tswap32(lp[0]);
5387     entry_2 = tswap32(lp[1]);
5388 
5389     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5390     contents = (entry_2 >> 10) & 3;
5391     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5392     seg_32bit = (entry_2 >> 22) & 1;
5393     limit_in_pages = (entry_2 >> 23) & 1;
5394     useable = (entry_2 >> 20) & 1;
5395 #ifdef TARGET_ABI32
5396     lm = 0;
5397 #else
5398     lm = (entry_2 >> 21) & 1;
5399 #endif
5400     flags = (seg_32bit << 0) | (contents << 1) |
5401         (read_exec_only << 3) | (limit_in_pages << 4) |
5402         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5403     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5404     base_addr = (entry_1 >> 16) |
5405         (entry_2 & 0xff000000) |
5406         ((entry_2 & 0xff) << 16);
5407     target_ldt_info->base_addr = tswapal(base_addr);
5408     target_ldt_info->limit = tswap32(limit);
5409     target_ldt_info->flags = tswap32(flags);
5410     unlock_user_struct(target_ldt_info, ptr, 1);
5411     return 0;
5412 }
5413 #endif /* TARGET_I386 && TARGET_ABI32 */
5414 
5415 #ifndef TARGET_ABI32
5416 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5417 {
5418     abi_long ret = 0;
5419     abi_ulong val;
5420     int idx;
5421 
5422     switch(code) {
5423     case TARGET_ARCH_SET_GS:
5424     case TARGET_ARCH_SET_FS:
5425         if (code == TARGET_ARCH_SET_GS)
5426             idx = R_GS;
5427         else
5428             idx = R_FS;
5429         cpu_x86_load_seg(env, idx, 0);
5430         env->segs[idx].base = addr;
5431         break;
5432     case TARGET_ARCH_GET_GS:
5433     case TARGET_ARCH_GET_FS:
5434         if (code == TARGET_ARCH_GET_GS)
5435             idx = R_GS;
5436         else
5437             idx = R_FS;
5438         val = env->segs[idx].base;
5439         if (put_user(val, addr, abi_ulong))
5440             ret = -TARGET_EFAULT;
5441         break;
5442     default:
5443         ret = -TARGET_EINVAL;
5444         break;
5445     }
5446     return ret;
5447 }
5448 #endif
5449 
5450 #endif /* defined(TARGET_I386) */
5451 
5452 #define NEW_STACK_SIZE 0x40000
5453 
5454 
5455 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5456 typedef struct {
5457     CPUArchState *env;
5458     pthread_mutex_t mutex;
5459     pthread_cond_t cond;
5460     pthread_t thread;
5461     uint32_t tid;
5462     abi_ulong child_tidptr;
5463     abi_ulong parent_tidptr;
5464     sigset_t sigmask;
5465 } new_thread_info;
5466 
5467 static void *clone_func(void *arg)
5468 {
5469     new_thread_info *info = arg;
5470     CPUArchState *env;
5471     CPUState *cpu;
5472     TaskState *ts;
5473 
5474     rcu_register_thread();
5475     tcg_register_thread();
5476     env = info->env;
5477     cpu = ENV_GET_CPU(env);
5478     thread_cpu = cpu;
5479     ts = (TaskState *)cpu->opaque;
5480     info->tid = sys_gettid();
5481     task_settid(ts);
5482     if (info->child_tidptr)
5483         put_user_u32(info->tid, info->child_tidptr);
5484     if (info->parent_tidptr)
5485         put_user_u32(info->tid, info->parent_tidptr);
5486     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5487     /* Enable signals.  */
5488     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5489     /* Signal to the parent that we're ready.  */
5490     pthread_mutex_lock(&info->mutex);
5491     pthread_cond_broadcast(&info->cond);
5492     pthread_mutex_unlock(&info->mutex);
5493     /* Wait until the parent has finished initializing the tls state.  */
5494     pthread_mutex_lock(&clone_lock);
5495     pthread_mutex_unlock(&clone_lock);
5496     cpu_loop(env);
5497     /* never exits */
5498     return NULL;
5499 }
5500 
5501 /* do_fork() Must return host values and target errnos (unlike most
5502    do_*() functions). */
5503 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5504                    abi_ulong parent_tidptr, target_ulong newtls,
5505                    abi_ulong child_tidptr)
5506 {
5507     CPUState *cpu = ENV_GET_CPU(env);
5508     int ret;
5509     TaskState *ts;
5510     CPUState *new_cpu;
5511     CPUArchState *new_env;
5512     sigset_t sigmask;
5513 
5514     flags &= ~CLONE_IGNORED_FLAGS;
5515 
5516     /* Emulate vfork() with fork() */
5517     if (flags & CLONE_VFORK)
5518         flags &= ~(CLONE_VFORK | CLONE_VM);
5519 
5520     if (flags & CLONE_VM) {
5521         TaskState *parent_ts = (TaskState *)cpu->opaque;
5522         new_thread_info info;
5523         pthread_attr_t attr;
5524 
5525         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5526             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5527             return -TARGET_EINVAL;
5528         }
5529 
5530         ts = g_new0(TaskState, 1);
5531         init_task_state(ts);
5532 
5533         /* Grab a mutex so that thread setup appears atomic.  */
5534         pthread_mutex_lock(&clone_lock);
5535 
5536         /* we create a new CPU instance. */
5537         new_env = cpu_copy(env);
5538         /* Init regs that differ from the parent.  */
5539         cpu_clone_regs(new_env, newsp);
5540         new_cpu = ENV_GET_CPU(new_env);
5541         new_cpu->opaque = ts;
5542         ts->bprm = parent_ts->bprm;
5543         ts->info = parent_ts->info;
5544         ts->signal_mask = parent_ts->signal_mask;
5545 
5546         if (flags & CLONE_CHILD_CLEARTID) {
5547             ts->child_tidptr = child_tidptr;
5548         }
5549 
5550         if (flags & CLONE_SETTLS) {
5551             cpu_set_tls (new_env, newtls);
5552         }
5553 
5554         memset(&info, 0, sizeof(info));
5555         pthread_mutex_init(&info.mutex, NULL);
5556         pthread_mutex_lock(&info.mutex);
5557         pthread_cond_init(&info.cond, NULL);
5558         info.env = new_env;
5559         if (flags & CLONE_CHILD_SETTID) {
5560             info.child_tidptr = child_tidptr;
5561         }
5562         if (flags & CLONE_PARENT_SETTID) {
5563             info.parent_tidptr = parent_tidptr;
5564         }
5565 
5566         ret = pthread_attr_init(&attr);
5567         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5568         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5569         /* It is not safe to deliver signals until the child has finished
5570            initializing, so temporarily block all signals.  */
5571         sigfillset(&sigmask);
5572         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5573         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5574 
5575         /* If this is our first additional thread, we need to ensure we
5576          * generate code for parallel execution and flush old translations.
5577          */
5578         if (!parallel_cpus) {
5579             parallel_cpus = true;
5580             tb_flush(cpu);
5581         }
5582 
5583         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5584         /* TODO: Free new CPU state if thread creation failed.  */
5585 
5586         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5587         pthread_attr_destroy(&attr);
5588         if (ret == 0) {
5589             /* Wait for the child to initialize.  */
5590             pthread_cond_wait(&info.cond, &info.mutex);
5591             ret = info.tid;
5592         } else {
5593             ret = -1;
5594         }
5595         pthread_mutex_unlock(&info.mutex);
5596         pthread_cond_destroy(&info.cond);
5597         pthread_mutex_destroy(&info.mutex);
5598         pthread_mutex_unlock(&clone_lock);
5599     } else {
5600         /* if no CLONE_VM, we consider it is a fork */
5601         if (flags & CLONE_INVALID_FORK_FLAGS) {
5602             return -TARGET_EINVAL;
5603         }
5604 
5605         /* We can't support custom termination signals */
5606         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5607             return -TARGET_EINVAL;
5608         }
5609 
5610         if (block_signals()) {
5611             return -TARGET_ERESTARTSYS;
5612         }
5613 
5614         fork_start();
5615         ret = fork();
5616         if (ret == 0) {
5617             /* Child Process.  */
5618             cpu_clone_regs(env, newsp);
5619             fork_end(1);
5620             /* There is a race condition here.  The parent process could
5621                theoretically read the TID in the child process before the child
5622                tid is set.  This would require using either ptrace
5623                (not implemented) or having *_tidptr to point at a shared memory
5624                mapping.  We can't repeat the spinlock hack used above because
5625                the child process gets its own copy of the lock.  */
5626             if (flags & CLONE_CHILD_SETTID)
5627                 put_user_u32(sys_gettid(), child_tidptr);
5628             if (flags & CLONE_PARENT_SETTID)
5629                 put_user_u32(sys_gettid(), parent_tidptr);
5630             ts = (TaskState *)cpu->opaque;
5631             if (flags & CLONE_SETTLS)
5632                 cpu_set_tls (env, newtls);
5633             if (flags & CLONE_CHILD_CLEARTID)
5634                 ts->child_tidptr = child_tidptr;
5635         } else {
5636             fork_end(0);
5637         }
5638     }
5639     return ret;
5640 }
5641 
5642 /* warning : doesn't handle linux specific flags... */
5643 static int target_to_host_fcntl_cmd(int cmd)
5644 {
5645     int ret;
5646 
5647     switch(cmd) {
5648     case TARGET_F_DUPFD:
5649     case TARGET_F_GETFD:
5650     case TARGET_F_SETFD:
5651     case TARGET_F_GETFL:
5652     case TARGET_F_SETFL:
5653         ret = cmd;
5654         break;
5655     case TARGET_F_GETLK:
5656         ret = F_GETLK64;
5657         break;
5658     case TARGET_F_SETLK:
5659         ret = F_SETLK64;
5660         break;
5661     case TARGET_F_SETLKW:
5662         ret = F_SETLKW64;
5663         break;
5664     case TARGET_F_GETOWN:
5665         ret = F_GETOWN;
5666         break;
5667     case TARGET_F_SETOWN:
5668         ret = F_SETOWN;
5669         break;
5670     case TARGET_F_GETSIG:
5671         ret = F_GETSIG;
5672         break;
5673     case TARGET_F_SETSIG:
5674         ret = F_SETSIG;
5675         break;
5676 #if TARGET_ABI_BITS == 32
5677     case TARGET_F_GETLK64:
5678         ret = F_GETLK64;
5679         break;
5680     case TARGET_F_SETLK64:
5681         ret = F_SETLK64;
5682         break;
5683     case TARGET_F_SETLKW64:
5684         ret = F_SETLKW64;
5685         break;
5686 #endif
5687     case TARGET_F_SETLEASE:
5688         ret = F_SETLEASE;
5689         break;
5690     case TARGET_F_GETLEASE:
5691         ret = F_GETLEASE;
5692         break;
5693 #ifdef F_DUPFD_CLOEXEC
5694     case TARGET_F_DUPFD_CLOEXEC:
5695         ret = F_DUPFD_CLOEXEC;
5696         break;
5697 #endif
5698     case TARGET_F_NOTIFY:
5699         ret = F_NOTIFY;
5700         break;
5701 #ifdef F_GETOWN_EX
5702     case TARGET_F_GETOWN_EX:
5703         ret = F_GETOWN_EX;
5704         break;
5705 #endif
5706 #ifdef F_SETOWN_EX
5707     case TARGET_F_SETOWN_EX:
5708         ret = F_SETOWN_EX;
5709         break;
5710 #endif
5711 #ifdef F_SETPIPE_SZ
5712     case TARGET_F_SETPIPE_SZ:
5713         ret = F_SETPIPE_SZ;
5714         break;
5715     case TARGET_F_GETPIPE_SZ:
5716         ret = F_GETPIPE_SZ;
5717         break;
5718 #endif
5719     default:
5720         ret = -TARGET_EINVAL;
5721         break;
5722     }
5723 
5724 #if defined(__powerpc64__)
5725     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5726      * is not supported by kernel. The glibc fcntl call actually adjusts
5727      * them to 5, 6 and 7 before making the syscall(). Since we make the
5728      * syscall directly, adjust to what is supported by the kernel.
5729      */
5730     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5731         ret -= F_GETLK64 - 5;
5732     }
5733 #endif
5734 
5735     return ret;
5736 }
5737 
5738 #define FLOCK_TRANSTBL \
5739     switch (type) { \
5740     TRANSTBL_CONVERT(F_RDLCK); \
5741     TRANSTBL_CONVERT(F_WRLCK); \
5742     TRANSTBL_CONVERT(F_UNLCK); \
5743     TRANSTBL_CONVERT(F_EXLCK); \
5744     TRANSTBL_CONVERT(F_SHLCK); \
5745     }
5746 
5747 static int target_to_host_flock(int type)
5748 {
5749 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5750     FLOCK_TRANSTBL
5751 #undef  TRANSTBL_CONVERT
5752     return -TARGET_EINVAL;
5753 }
5754 
5755 static int host_to_target_flock(int type)
5756 {
5757 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5758     FLOCK_TRANSTBL
5759 #undef  TRANSTBL_CONVERT
5760     /* if we don't know how to convert the value coming
5761      * from the host we copy to the target field as-is
5762      */
5763     return type;
5764 }
5765 
5766 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5767                                             abi_ulong target_flock_addr)
5768 {
5769     struct target_flock *target_fl;
5770     int l_type;
5771 
5772     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5773         return -TARGET_EFAULT;
5774     }
5775 
5776     __get_user(l_type, &target_fl->l_type);
5777     l_type = target_to_host_flock(l_type);
5778     if (l_type < 0) {
5779         return l_type;
5780     }
5781     fl->l_type = l_type;
5782     __get_user(fl->l_whence, &target_fl->l_whence);
5783     __get_user(fl->l_start, &target_fl->l_start);
5784     __get_user(fl->l_len, &target_fl->l_len);
5785     __get_user(fl->l_pid, &target_fl->l_pid);
5786     unlock_user_struct(target_fl, target_flock_addr, 0);
5787     return 0;
5788 }
5789 
5790 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5791                                           const struct flock64 *fl)
5792 {
5793     struct target_flock *target_fl;
5794     short l_type;
5795 
5796     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5797         return -TARGET_EFAULT;
5798     }
5799 
5800     l_type = host_to_target_flock(fl->l_type);
5801     __put_user(l_type, &target_fl->l_type);
5802     __put_user(fl->l_whence, &target_fl->l_whence);
5803     __put_user(fl->l_start, &target_fl->l_start);
5804     __put_user(fl->l_len, &target_fl->l_len);
5805     __put_user(fl->l_pid, &target_fl->l_pid);
5806     unlock_user_struct(target_fl, target_flock_addr, 1);
5807     return 0;
5808 }
5809 
5810 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5811 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5812 
5813 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5814 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5815                                                    abi_ulong target_flock_addr)
5816 {
5817     struct target_oabi_flock64 *target_fl;
5818     int l_type;
5819 
5820     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5821         return -TARGET_EFAULT;
5822     }
5823 
5824     __get_user(l_type, &target_fl->l_type);
5825     l_type = target_to_host_flock(l_type);
5826     if (l_type < 0) {
5827         return l_type;
5828     }
5829     fl->l_type = l_type;
5830     __get_user(fl->l_whence, &target_fl->l_whence);
5831     __get_user(fl->l_start, &target_fl->l_start);
5832     __get_user(fl->l_len, &target_fl->l_len);
5833     __get_user(fl->l_pid, &target_fl->l_pid);
5834     unlock_user_struct(target_fl, target_flock_addr, 0);
5835     return 0;
5836 }
5837 
5838 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5839                                                  const struct flock64 *fl)
5840 {
5841     struct target_oabi_flock64 *target_fl;
5842     short l_type;
5843 
5844     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5845         return -TARGET_EFAULT;
5846     }
5847 
5848     l_type = host_to_target_flock(fl->l_type);
5849     __put_user(l_type, &target_fl->l_type);
5850     __put_user(fl->l_whence, &target_fl->l_whence);
5851     __put_user(fl->l_start, &target_fl->l_start);
5852     __put_user(fl->l_len, &target_fl->l_len);
5853     __put_user(fl->l_pid, &target_fl->l_pid);
5854     unlock_user_struct(target_fl, target_flock_addr, 1);
5855     return 0;
5856 }
5857 #endif
5858 
5859 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5860                                               abi_ulong target_flock_addr)
5861 {
5862     struct target_flock64 *target_fl;
5863     int l_type;
5864 
5865     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5866         return -TARGET_EFAULT;
5867     }
5868 
5869     __get_user(l_type, &target_fl->l_type);
5870     l_type = target_to_host_flock(l_type);
5871     if (l_type < 0) {
5872         return l_type;
5873     }
5874     fl->l_type = l_type;
5875     __get_user(fl->l_whence, &target_fl->l_whence);
5876     __get_user(fl->l_start, &target_fl->l_start);
5877     __get_user(fl->l_len, &target_fl->l_len);
5878     __get_user(fl->l_pid, &target_fl->l_pid);
5879     unlock_user_struct(target_fl, target_flock_addr, 0);
5880     return 0;
5881 }
5882 
5883 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5884                                             const struct flock64 *fl)
5885 {
5886     struct target_flock64 *target_fl;
5887     short l_type;
5888 
5889     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5890         return -TARGET_EFAULT;
5891     }
5892 
5893     l_type = host_to_target_flock(fl->l_type);
5894     __put_user(l_type, &target_fl->l_type);
5895     __put_user(fl->l_whence, &target_fl->l_whence);
5896     __put_user(fl->l_start, &target_fl->l_start);
5897     __put_user(fl->l_len, &target_fl->l_len);
5898     __put_user(fl->l_pid, &target_fl->l_pid);
5899     unlock_user_struct(target_fl, target_flock_addr, 1);
5900     return 0;
5901 }
5902 
5903 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5904 {
5905     struct flock64 fl64;
5906 #ifdef F_GETOWN_EX
5907     struct f_owner_ex fox;
5908     struct target_f_owner_ex *target_fox;
5909 #endif
5910     abi_long ret;
5911     int host_cmd = target_to_host_fcntl_cmd(cmd);
5912 
5913     if (host_cmd == -TARGET_EINVAL)
5914 	    return host_cmd;
5915 
5916     switch(cmd) {
5917     case TARGET_F_GETLK:
5918         ret = copy_from_user_flock(&fl64, arg);
5919         if (ret) {
5920             return ret;
5921         }
5922         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5923         if (ret == 0) {
5924             ret = copy_to_user_flock(arg, &fl64);
5925         }
5926         break;
5927 
5928     case TARGET_F_SETLK:
5929     case TARGET_F_SETLKW:
5930         ret = copy_from_user_flock(&fl64, arg);
5931         if (ret) {
5932             return ret;
5933         }
5934         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5935         break;
5936 
5937     case TARGET_F_GETLK64:
5938         ret = copy_from_user_flock64(&fl64, arg);
5939         if (ret) {
5940             return ret;
5941         }
5942         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5943         if (ret == 0) {
5944             ret = copy_to_user_flock64(arg, &fl64);
5945         }
5946         break;
5947     case TARGET_F_SETLK64:
5948     case TARGET_F_SETLKW64:
5949         ret = copy_from_user_flock64(&fl64, arg);
5950         if (ret) {
5951             return ret;
5952         }
5953         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5954         break;
5955 
5956     case TARGET_F_GETFL:
5957         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5958         if (ret >= 0) {
5959             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5960         }
5961         break;
5962 
5963     case TARGET_F_SETFL:
5964         ret = get_errno(safe_fcntl(fd, host_cmd,
5965                                    target_to_host_bitmask(arg,
5966                                                           fcntl_flags_tbl)));
5967         break;
5968 
5969 #ifdef F_GETOWN_EX
5970     case TARGET_F_GETOWN_EX:
5971         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5972         if (ret >= 0) {
5973             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5974                 return -TARGET_EFAULT;
5975             target_fox->type = tswap32(fox.type);
5976             target_fox->pid = tswap32(fox.pid);
5977             unlock_user_struct(target_fox, arg, 1);
5978         }
5979         break;
5980 #endif
5981 
5982 #ifdef F_SETOWN_EX
5983     case TARGET_F_SETOWN_EX:
5984         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5985             return -TARGET_EFAULT;
5986         fox.type = tswap32(target_fox->type);
5987         fox.pid = tswap32(target_fox->pid);
5988         unlock_user_struct(target_fox, arg, 0);
5989         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5990         break;
5991 #endif
5992 
5993     case TARGET_F_SETOWN:
5994     case TARGET_F_GETOWN:
5995     case TARGET_F_SETSIG:
5996     case TARGET_F_GETSIG:
5997     case TARGET_F_SETLEASE:
5998     case TARGET_F_GETLEASE:
5999     case TARGET_F_SETPIPE_SZ:
6000     case TARGET_F_GETPIPE_SZ:
6001         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6002         break;
6003 
6004     default:
6005         ret = get_errno(safe_fcntl(fd, cmd, arg));
6006         break;
6007     }
6008     return ret;
6009 }
6010 
6011 #ifdef USE_UID16
6012 
6013 static inline int high2lowuid(int uid)
6014 {
6015     if (uid > 65535)
6016         return 65534;
6017     else
6018         return uid;
6019 }
6020 
6021 static inline int high2lowgid(int gid)
6022 {
6023     if (gid > 65535)
6024         return 65534;
6025     else
6026         return gid;
6027 }
6028 
6029 static inline int low2highuid(int uid)
6030 {
6031     if ((int16_t)uid == -1)
6032         return -1;
6033     else
6034         return uid;
6035 }
6036 
6037 static inline int low2highgid(int gid)
6038 {
6039     if ((int16_t)gid == -1)
6040         return -1;
6041     else
6042         return gid;
6043 }
6044 static inline int tswapid(int id)
6045 {
6046     return tswap16(id);
6047 }
6048 
6049 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6050 
6051 #else /* !USE_UID16 */
6052 static inline int high2lowuid(int uid)
6053 {
6054     return uid;
6055 }
6056 static inline int high2lowgid(int gid)
6057 {
6058     return gid;
6059 }
6060 static inline int low2highuid(int uid)
6061 {
6062     return uid;
6063 }
6064 static inline int low2highgid(int gid)
6065 {
6066     return gid;
6067 }
6068 static inline int tswapid(int id)
6069 {
6070     return tswap32(id);
6071 }
6072 
6073 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6074 
6075 #endif /* USE_UID16 */
6076 
6077 /* We must do direct syscalls for setting UID/GID, because we want to
6078  * implement the Linux system call semantics of "change only for this thread",
6079  * not the libc/POSIX semantics of "change for all threads in process".
6080  * (See http://ewontfix.com/17/ for more details.)
6081  * We use the 32-bit version of the syscalls if present; if it is not
6082  * then either the host architecture supports 32-bit UIDs natively with
6083  * the standard syscall, or the 16-bit UID is the best we can do.
6084  */
6085 #ifdef __NR_setuid32
6086 #define __NR_sys_setuid __NR_setuid32
6087 #else
6088 #define __NR_sys_setuid __NR_setuid
6089 #endif
6090 #ifdef __NR_setgid32
6091 #define __NR_sys_setgid __NR_setgid32
6092 #else
6093 #define __NR_sys_setgid __NR_setgid
6094 #endif
6095 #ifdef __NR_setresuid32
6096 #define __NR_sys_setresuid __NR_setresuid32
6097 #else
6098 #define __NR_sys_setresuid __NR_setresuid
6099 #endif
6100 #ifdef __NR_setresgid32
6101 #define __NR_sys_setresgid __NR_setresgid32
6102 #else
6103 #define __NR_sys_setresgid __NR_setresgid
6104 #endif
6105 
6106 _syscall1(int, sys_setuid, uid_t, uid)
6107 _syscall1(int, sys_setgid, gid_t, gid)
6108 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6109 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6110 
6111 void syscall_init(void)
6112 {
6113     IOCTLEntry *ie;
6114     const argtype *arg_type;
6115     int size;
6116     int i;
6117 
6118     thunk_init(STRUCT_MAX);
6119 
6120 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6121 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6122 #include "syscall_types.h"
6123 #undef STRUCT
6124 #undef STRUCT_SPECIAL
6125 
6126     /* Build target_to_host_errno_table[] table from
6127      * host_to_target_errno_table[]. */
6128     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6129         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6130     }
6131 
6132     /* we patch the ioctl size if necessary. We rely on the fact that
6133        no ioctl has all the bits at '1' in the size field */
6134     ie = ioctl_entries;
6135     while (ie->target_cmd != 0) {
6136         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6137             TARGET_IOC_SIZEMASK) {
6138             arg_type = ie->arg_type;
6139             if (arg_type[0] != TYPE_PTR) {
6140                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6141                         ie->target_cmd);
6142                 exit(1);
6143             }
6144             arg_type++;
6145             size = thunk_type_size(arg_type, 0);
6146             ie->target_cmd = (ie->target_cmd &
6147                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6148                 (size << TARGET_IOC_SIZESHIFT);
6149         }
6150 
6151         /* automatic consistency check if same arch */
6152 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6153     (defined(__x86_64__) && defined(TARGET_X86_64))
6154         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6155             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6156                     ie->name, ie->target_cmd, ie->host_cmd);
6157         }
6158 #endif
6159         ie++;
6160     }
6161 }
6162 
6163 #if TARGET_ABI_BITS == 32
6164 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6165 {
6166 #ifdef TARGET_WORDS_BIGENDIAN
6167     return ((uint64_t)word0 << 32) | word1;
6168 #else
6169     return ((uint64_t)word1 << 32) | word0;
6170 #endif
6171 }
6172 #else /* TARGET_ABI_BITS == 32 */
6173 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6174 {
6175     return word0;
6176 }
6177 #endif /* TARGET_ABI_BITS != 32 */
6178 
6179 #ifdef TARGET_NR_truncate64
6180 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6181                                          abi_long arg2,
6182                                          abi_long arg3,
6183                                          abi_long arg4)
6184 {
6185     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6186         arg2 = arg3;
6187         arg3 = arg4;
6188     }
6189     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6190 }
6191 #endif
6192 
6193 #ifdef TARGET_NR_ftruncate64
6194 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6195                                           abi_long arg2,
6196                                           abi_long arg3,
6197                                           abi_long arg4)
6198 {
6199     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6200         arg2 = arg3;
6201         arg3 = arg4;
6202     }
6203     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6204 }
6205 #endif
6206 
6207 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6208                                                abi_ulong target_addr)
6209 {
6210     struct target_timespec *target_ts;
6211 
6212     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6213         return -TARGET_EFAULT;
6214     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6215     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6216     unlock_user_struct(target_ts, target_addr, 0);
6217     return 0;
6218 }
6219 
6220 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6221                                                struct timespec *host_ts)
6222 {
6223     struct target_timespec *target_ts;
6224 
6225     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6226         return -TARGET_EFAULT;
6227     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6228     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6229     unlock_user_struct(target_ts, target_addr, 1);
6230     return 0;
6231 }
6232 
6233 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6234                                                  abi_ulong target_addr)
6235 {
6236     struct target_itimerspec *target_itspec;
6237 
6238     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6239         return -TARGET_EFAULT;
6240     }
6241 
6242     host_itspec->it_interval.tv_sec =
6243                             tswapal(target_itspec->it_interval.tv_sec);
6244     host_itspec->it_interval.tv_nsec =
6245                             tswapal(target_itspec->it_interval.tv_nsec);
6246     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6247     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6248 
6249     unlock_user_struct(target_itspec, target_addr, 1);
6250     return 0;
6251 }
6252 
6253 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6254                                                struct itimerspec *host_its)
6255 {
6256     struct target_itimerspec *target_itspec;
6257 
6258     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6259         return -TARGET_EFAULT;
6260     }
6261 
6262     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6263     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6264 
6265     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6266     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6267 
6268     unlock_user_struct(target_itspec, target_addr, 0);
6269     return 0;
6270 }
6271 
6272 static inline abi_long target_to_host_timex(struct timex *host_tx,
6273                                             abi_long target_addr)
6274 {
6275     struct target_timex *target_tx;
6276 
6277     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6278         return -TARGET_EFAULT;
6279     }
6280 
6281     __get_user(host_tx->modes, &target_tx->modes);
6282     __get_user(host_tx->offset, &target_tx->offset);
6283     __get_user(host_tx->freq, &target_tx->freq);
6284     __get_user(host_tx->maxerror, &target_tx->maxerror);
6285     __get_user(host_tx->esterror, &target_tx->esterror);
6286     __get_user(host_tx->status, &target_tx->status);
6287     __get_user(host_tx->constant, &target_tx->constant);
6288     __get_user(host_tx->precision, &target_tx->precision);
6289     __get_user(host_tx->tolerance, &target_tx->tolerance);
6290     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6291     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6292     __get_user(host_tx->tick, &target_tx->tick);
6293     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6294     __get_user(host_tx->jitter, &target_tx->jitter);
6295     __get_user(host_tx->shift, &target_tx->shift);
6296     __get_user(host_tx->stabil, &target_tx->stabil);
6297     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6298     __get_user(host_tx->calcnt, &target_tx->calcnt);
6299     __get_user(host_tx->errcnt, &target_tx->errcnt);
6300     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6301     __get_user(host_tx->tai, &target_tx->tai);
6302 
6303     unlock_user_struct(target_tx, target_addr, 0);
6304     return 0;
6305 }
6306 
6307 static inline abi_long host_to_target_timex(abi_long target_addr,
6308                                             struct timex *host_tx)
6309 {
6310     struct target_timex *target_tx;
6311 
6312     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6313         return -TARGET_EFAULT;
6314     }
6315 
6316     __put_user(host_tx->modes, &target_tx->modes);
6317     __put_user(host_tx->offset, &target_tx->offset);
6318     __put_user(host_tx->freq, &target_tx->freq);
6319     __put_user(host_tx->maxerror, &target_tx->maxerror);
6320     __put_user(host_tx->esterror, &target_tx->esterror);
6321     __put_user(host_tx->status, &target_tx->status);
6322     __put_user(host_tx->constant, &target_tx->constant);
6323     __put_user(host_tx->precision, &target_tx->precision);
6324     __put_user(host_tx->tolerance, &target_tx->tolerance);
6325     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6326     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6327     __put_user(host_tx->tick, &target_tx->tick);
6328     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6329     __put_user(host_tx->jitter, &target_tx->jitter);
6330     __put_user(host_tx->shift, &target_tx->shift);
6331     __put_user(host_tx->stabil, &target_tx->stabil);
6332     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6333     __put_user(host_tx->calcnt, &target_tx->calcnt);
6334     __put_user(host_tx->errcnt, &target_tx->errcnt);
6335     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6336     __put_user(host_tx->tai, &target_tx->tai);
6337 
6338     unlock_user_struct(target_tx, target_addr, 1);
6339     return 0;
6340 }
6341 
6342 
6343 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6344                                                abi_ulong target_addr)
6345 {
6346     struct target_sigevent *target_sevp;
6347 
6348     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6349         return -TARGET_EFAULT;
6350     }
6351 
6352     /* This union is awkward on 64 bit systems because it has a 32 bit
6353      * integer and a pointer in it; we follow the conversion approach
6354      * used for handling sigval types in signal.c so the guest should get
6355      * the correct value back even if we did a 64 bit byteswap and it's
6356      * using the 32 bit integer.
6357      */
6358     host_sevp->sigev_value.sival_ptr =
6359         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6360     host_sevp->sigev_signo =
6361         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6362     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6363     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6364 
6365     unlock_user_struct(target_sevp, target_addr, 1);
6366     return 0;
6367 }
6368 
6369 #if defined(TARGET_NR_mlockall)
6370 static inline int target_to_host_mlockall_arg(int arg)
6371 {
6372     int result = 0;
6373 
6374     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6375         result |= MCL_CURRENT;
6376     }
6377     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6378         result |= MCL_FUTURE;
6379     }
6380     return result;
6381 }
6382 #endif
6383 
6384 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6385      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6386      defined(TARGET_NR_newfstatat))
6387 static inline abi_long host_to_target_stat64(void *cpu_env,
6388                                              abi_ulong target_addr,
6389                                              struct stat *host_st)
6390 {
6391 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6392     if (((CPUARMState *)cpu_env)->eabi) {
6393         struct target_eabi_stat64 *target_st;
6394 
6395         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6396             return -TARGET_EFAULT;
6397         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6398         __put_user(host_st->st_dev, &target_st->st_dev);
6399         __put_user(host_st->st_ino, &target_st->st_ino);
6400 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6401         __put_user(host_st->st_ino, &target_st->__st_ino);
6402 #endif
6403         __put_user(host_st->st_mode, &target_st->st_mode);
6404         __put_user(host_st->st_nlink, &target_st->st_nlink);
6405         __put_user(host_st->st_uid, &target_st->st_uid);
6406         __put_user(host_st->st_gid, &target_st->st_gid);
6407         __put_user(host_st->st_rdev, &target_st->st_rdev);
6408         __put_user(host_st->st_size, &target_st->st_size);
6409         __put_user(host_st->st_blksize, &target_st->st_blksize);
6410         __put_user(host_st->st_blocks, &target_st->st_blocks);
6411         __put_user(host_st->st_atime, &target_st->target_st_atime);
6412         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6413         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6414         unlock_user_struct(target_st, target_addr, 1);
6415     } else
6416 #endif
6417     {
6418 #if defined(TARGET_HAS_STRUCT_STAT64)
6419         struct target_stat64 *target_st;
6420 #else
6421         struct target_stat *target_st;
6422 #endif
6423 
6424         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6425             return -TARGET_EFAULT;
6426         memset(target_st, 0, sizeof(*target_st));
6427         __put_user(host_st->st_dev, &target_st->st_dev);
6428         __put_user(host_st->st_ino, &target_st->st_ino);
6429 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6430         __put_user(host_st->st_ino, &target_st->__st_ino);
6431 #endif
6432         __put_user(host_st->st_mode, &target_st->st_mode);
6433         __put_user(host_st->st_nlink, &target_st->st_nlink);
6434         __put_user(host_st->st_uid, &target_st->st_uid);
6435         __put_user(host_st->st_gid, &target_st->st_gid);
6436         __put_user(host_st->st_rdev, &target_st->st_rdev);
6437         /* XXX: better use of kernel struct */
6438         __put_user(host_st->st_size, &target_st->st_size);
6439         __put_user(host_st->st_blksize, &target_st->st_blksize);
6440         __put_user(host_st->st_blocks, &target_st->st_blocks);
6441         __put_user(host_st->st_atime, &target_st->target_st_atime);
6442         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6443         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6444         unlock_user_struct(target_st, target_addr, 1);
6445     }
6446 
6447     return 0;
6448 }
6449 #endif
6450 
6451 /* ??? Using host futex calls even when target atomic operations
6452    are not really atomic probably breaks things.  However implementing
6453    futexes locally would make futexes shared between multiple processes
6454    tricky.  However they're probably useless because guest atomic
6455    operations won't work either.  */
6456 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6457                     target_ulong uaddr2, int val3)
6458 {
6459     struct timespec ts, *pts;
6460     int base_op;
6461 
6462     /* ??? We assume FUTEX_* constants are the same on both host
6463        and target.  */
6464 #ifdef FUTEX_CMD_MASK
6465     base_op = op & FUTEX_CMD_MASK;
6466 #else
6467     base_op = op;
6468 #endif
6469     switch (base_op) {
6470     case FUTEX_WAIT:
6471     case FUTEX_WAIT_BITSET:
6472         if (timeout) {
6473             pts = &ts;
6474             target_to_host_timespec(pts, timeout);
6475         } else {
6476             pts = NULL;
6477         }
6478         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6479                          pts, NULL, val3));
6480     case FUTEX_WAKE:
6481         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6482     case FUTEX_FD:
6483         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6484     case FUTEX_REQUEUE:
6485     case FUTEX_CMP_REQUEUE:
6486     case FUTEX_WAKE_OP:
6487         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6488            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6489            But the prototype takes a `struct timespec *'; insert casts
6490            to satisfy the compiler.  We do not need to tswap TIMEOUT
6491            since it's not compared to guest memory.  */
6492         pts = (struct timespec *)(uintptr_t) timeout;
6493         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6494                                     g2h(uaddr2),
6495                                     (base_op == FUTEX_CMP_REQUEUE
6496                                      ? tswap32(val3)
6497                                      : val3)));
6498     default:
6499         return -TARGET_ENOSYS;
6500     }
6501 }
6502 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6503 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6504                                      abi_long handle, abi_long mount_id,
6505                                      abi_long flags)
6506 {
6507     struct file_handle *target_fh;
6508     struct file_handle *fh;
6509     int mid = 0;
6510     abi_long ret;
6511     char *name;
6512     unsigned int size, total_size;
6513 
6514     if (get_user_s32(size, handle)) {
6515         return -TARGET_EFAULT;
6516     }
6517 
6518     name = lock_user_string(pathname);
6519     if (!name) {
6520         return -TARGET_EFAULT;
6521     }
6522 
6523     total_size = sizeof(struct file_handle) + size;
6524     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6525     if (!target_fh) {
6526         unlock_user(name, pathname, 0);
6527         return -TARGET_EFAULT;
6528     }
6529 
6530     fh = g_malloc0(total_size);
6531     fh->handle_bytes = size;
6532 
6533     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6534     unlock_user(name, pathname, 0);
6535 
6536     /* man name_to_handle_at(2):
6537      * Other than the use of the handle_bytes field, the caller should treat
6538      * the file_handle structure as an opaque data type
6539      */
6540 
6541     memcpy(target_fh, fh, total_size);
6542     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6543     target_fh->handle_type = tswap32(fh->handle_type);
6544     g_free(fh);
6545     unlock_user(target_fh, handle, total_size);
6546 
6547     if (put_user_s32(mid, mount_id)) {
6548         return -TARGET_EFAULT;
6549     }
6550 
6551     return ret;
6552 
6553 }
6554 #endif
6555 
6556 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6557 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6558                                      abi_long flags)
6559 {
6560     struct file_handle *target_fh;
6561     struct file_handle *fh;
6562     unsigned int size, total_size;
6563     abi_long ret;
6564 
6565     if (get_user_s32(size, handle)) {
6566         return -TARGET_EFAULT;
6567     }
6568 
6569     total_size = sizeof(struct file_handle) + size;
6570     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6571     if (!target_fh) {
6572         return -TARGET_EFAULT;
6573     }
6574 
6575     fh = g_memdup(target_fh, total_size);
6576     fh->handle_bytes = size;
6577     fh->handle_type = tswap32(target_fh->handle_type);
6578 
6579     ret = get_errno(open_by_handle_at(mount_fd, fh,
6580                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6581 
6582     g_free(fh);
6583 
6584     unlock_user(target_fh, handle, total_size);
6585 
6586     return ret;
6587 }
6588 #endif
6589 
6590 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6591 
6592 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6593 {
6594     int host_flags;
6595     target_sigset_t *target_mask;
6596     sigset_t host_mask;
6597     abi_long ret;
6598 
6599     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6600         return -TARGET_EINVAL;
6601     }
6602     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6603         return -TARGET_EFAULT;
6604     }
6605 
6606     target_to_host_sigset(&host_mask, target_mask);
6607 
6608     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6609 
6610     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6611     if (ret >= 0) {
6612         fd_trans_register(ret, &target_signalfd_trans);
6613     }
6614 
6615     unlock_user_struct(target_mask, mask, 0);
6616 
6617     return ret;
6618 }
6619 #endif
6620 
6621 /* Map host to target signal numbers for the wait family of syscalls.
6622    Assume all other status bits are the same.  */
6623 int host_to_target_waitstatus(int status)
6624 {
6625     if (WIFSIGNALED(status)) {
6626         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6627     }
6628     if (WIFSTOPPED(status)) {
6629         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6630                | (status & 0xff);
6631     }
6632     return status;
6633 }
6634 
6635 static int open_self_cmdline(void *cpu_env, int fd)
6636 {
6637     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6638     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6639     int i;
6640 
6641     for (i = 0; i < bprm->argc; i++) {
6642         size_t len = strlen(bprm->argv[i]) + 1;
6643 
6644         if (write(fd, bprm->argv[i], len) != len) {
6645             return -1;
6646         }
6647     }
6648 
6649     return 0;
6650 }
6651 
6652 static int open_self_maps(void *cpu_env, int fd)
6653 {
6654     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6655     TaskState *ts = cpu->opaque;
6656     FILE *fp;
6657     char *line = NULL;
6658     size_t len = 0;
6659     ssize_t read;
6660 
6661     fp = fopen("/proc/self/maps", "r");
6662     if (fp == NULL) {
6663         return -1;
6664     }
6665 
6666     while ((read = getline(&line, &len, fp)) != -1) {
6667         int fields, dev_maj, dev_min, inode;
6668         uint64_t min, max, offset;
6669         char flag_r, flag_w, flag_x, flag_p;
6670         char path[512] = "";
6671         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6672                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6673                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6674 
6675         if ((fields < 10) || (fields > 11)) {
6676             continue;
6677         }
6678         if (h2g_valid(min)) {
6679             int flags = page_get_flags(h2g(min));
6680             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6681             if (page_check_range(h2g(min), max - min, flags) == -1) {
6682                 continue;
6683             }
6684             if (h2g(min) == ts->info->stack_limit) {
6685                 pstrcpy(path, sizeof(path), "      [stack]");
6686             }
6687             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6688                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6689                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6690                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6691                     path[0] ? "         " : "", path);
6692         }
6693     }
6694 
6695     free(line);
6696     fclose(fp);
6697 
6698     return 0;
6699 }
6700 
6701 static int open_self_stat(void *cpu_env, int fd)
6702 {
6703     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6704     TaskState *ts = cpu->opaque;
6705     abi_ulong start_stack = ts->info->start_stack;
6706     int i;
6707 
6708     for (i = 0; i < 44; i++) {
6709       char buf[128];
6710       int len;
6711       uint64_t val = 0;
6712 
6713       if (i == 0) {
6714         /* pid */
6715         val = getpid();
6716         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6717       } else if (i == 1) {
6718         /* app name */
6719         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6720       } else if (i == 27) {
6721         /* stack bottom */
6722         val = start_stack;
6723         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6724       } else {
6725         /* for the rest, there is MasterCard */
6726         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6727       }
6728 
6729       len = strlen(buf);
6730       if (write(fd, buf, len) != len) {
6731           return -1;
6732       }
6733     }
6734 
6735     return 0;
6736 }
6737 
6738 static int open_self_auxv(void *cpu_env, int fd)
6739 {
6740     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6741     TaskState *ts = cpu->opaque;
6742     abi_ulong auxv = ts->info->saved_auxv;
6743     abi_ulong len = ts->info->auxv_len;
6744     char *ptr;
6745 
6746     /*
6747      * Auxiliary vector is stored in target process stack.
6748      * read in whole auxv vector and copy it to file
6749      */
6750     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6751     if (ptr != NULL) {
6752         while (len > 0) {
6753             ssize_t r;
6754             r = write(fd, ptr, len);
6755             if (r <= 0) {
6756                 break;
6757             }
6758             len -= r;
6759             ptr += r;
6760         }
6761         lseek(fd, 0, SEEK_SET);
6762         unlock_user(ptr, auxv, len);
6763     }
6764 
6765     return 0;
6766 }
6767 
6768 static int is_proc_myself(const char *filename, const char *entry)
6769 {
6770     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6771         filename += strlen("/proc/");
6772         if (!strncmp(filename, "self/", strlen("self/"))) {
6773             filename += strlen("self/");
6774         } else if (*filename >= '1' && *filename <= '9') {
6775             char myself[80];
6776             snprintf(myself, sizeof(myself), "%d/", getpid());
6777             if (!strncmp(filename, myself, strlen(myself))) {
6778                 filename += strlen(myself);
6779             } else {
6780                 return 0;
6781             }
6782         } else {
6783             return 0;
6784         }
6785         if (!strcmp(filename, entry)) {
6786             return 1;
6787         }
6788     }
6789     return 0;
6790 }
6791 
6792 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6793 static int is_proc(const char *filename, const char *entry)
6794 {
6795     return strcmp(filename, entry) == 0;
6796 }
6797 
6798 static int open_net_route(void *cpu_env, int fd)
6799 {
6800     FILE *fp;
6801     char *line = NULL;
6802     size_t len = 0;
6803     ssize_t read;
6804 
6805     fp = fopen("/proc/net/route", "r");
6806     if (fp == NULL) {
6807         return -1;
6808     }
6809 
6810     /* read header */
6811 
6812     read = getline(&line, &len, fp);
6813     dprintf(fd, "%s", line);
6814 
6815     /* read routes */
6816 
6817     while ((read = getline(&line, &len, fp)) != -1) {
6818         char iface[16];
6819         uint32_t dest, gw, mask;
6820         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6821         int fields;
6822 
6823         fields = sscanf(line,
6824                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6825                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6826                         &mask, &mtu, &window, &irtt);
6827         if (fields != 11) {
6828             continue;
6829         }
6830         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6831                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6832                 metric, tswap32(mask), mtu, window, irtt);
6833     }
6834 
6835     free(line);
6836     fclose(fp);
6837 
6838     return 0;
6839 }
6840 #endif
6841 
6842 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6843 {
6844     struct fake_open {
6845         const char *filename;
6846         int (*fill)(void *cpu_env, int fd);
6847         int (*cmp)(const char *s1, const char *s2);
6848     };
6849     const struct fake_open *fake_open;
6850     static const struct fake_open fakes[] = {
6851         { "maps", open_self_maps, is_proc_myself },
6852         { "stat", open_self_stat, is_proc_myself },
6853         { "auxv", open_self_auxv, is_proc_myself },
6854         { "cmdline", open_self_cmdline, is_proc_myself },
6855 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6856         { "/proc/net/route", open_net_route, is_proc },
6857 #endif
6858         { NULL, NULL, NULL }
6859     };
6860 
6861     if (is_proc_myself(pathname, "exe")) {
6862         int execfd = qemu_getauxval(AT_EXECFD);
6863         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6864     }
6865 
6866     for (fake_open = fakes; fake_open->filename; fake_open++) {
6867         if (fake_open->cmp(pathname, fake_open->filename)) {
6868             break;
6869         }
6870     }
6871 
6872     if (fake_open->filename) {
6873         const char *tmpdir;
6874         char filename[PATH_MAX];
6875         int fd, r;
6876 
6877         /* create temporary file to map stat to */
6878         tmpdir = getenv("TMPDIR");
6879         if (!tmpdir)
6880             tmpdir = "/tmp";
6881         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6882         fd = mkstemp(filename);
6883         if (fd < 0) {
6884             return fd;
6885         }
6886         unlink(filename);
6887 
6888         if ((r = fake_open->fill(cpu_env, fd))) {
6889             int e = errno;
6890             close(fd);
6891             errno = e;
6892             return r;
6893         }
6894         lseek(fd, 0, SEEK_SET);
6895 
6896         return fd;
6897     }
6898 
6899     return safe_openat(dirfd, path(pathname), flags, mode);
6900 }
6901 
6902 #define TIMER_MAGIC 0x0caf0000
6903 #define TIMER_MAGIC_MASK 0xffff0000
6904 
6905 /* Convert QEMU provided timer ID back to internal 16bit index format */
6906 static target_timer_t get_timer_id(abi_long arg)
6907 {
6908     target_timer_t timerid = arg;
6909 
6910     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6911         return -TARGET_EINVAL;
6912     }
6913 
6914     timerid &= 0xffff;
6915 
6916     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6917         return -TARGET_EINVAL;
6918     }
6919 
6920     return timerid;
6921 }
6922 
6923 static int target_to_host_cpu_mask(unsigned long *host_mask,
6924                                    size_t host_size,
6925                                    abi_ulong target_addr,
6926                                    size_t target_size)
6927 {
6928     unsigned target_bits = sizeof(abi_ulong) * 8;
6929     unsigned host_bits = sizeof(*host_mask) * 8;
6930     abi_ulong *target_mask;
6931     unsigned i, j;
6932 
6933     assert(host_size >= target_size);
6934 
6935     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6936     if (!target_mask) {
6937         return -TARGET_EFAULT;
6938     }
6939     memset(host_mask, 0, host_size);
6940 
6941     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6942         unsigned bit = i * target_bits;
6943         abi_ulong val;
6944 
6945         __get_user(val, &target_mask[i]);
6946         for (j = 0; j < target_bits; j++, bit++) {
6947             if (val & (1UL << j)) {
6948                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6949             }
6950         }
6951     }
6952 
6953     unlock_user(target_mask, target_addr, 0);
6954     return 0;
6955 }
6956 
6957 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6958                                    size_t host_size,
6959                                    abi_ulong target_addr,
6960                                    size_t target_size)
6961 {
6962     unsigned target_bits = sizeof(abi_ulong) * 8;
6963     unsigned host_bits = sizeof(*host_mask) * 8;
6964     abi_ulong *target_mask;
6965     unsigned i, j;
6966 
6967     assert(host_size >= target_size);
6968 
6969     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6970     if (!target_mask) {
6971         return -TARGET_EFAULT;
6972     }
6973 
6974     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6975         unsigned bit = i * target_bits;
6976         abi_ulong val = 0;
6977 
6978         for (j = 0; j < target_bits; j++, bit++) {
6979             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6980                 val |= 1UL << j;
6981             }
6982         }
6983         __put_user(val, &target_mask[i]);
6984     }
6985 
6986     unlock_user(target_mask, target_addr, target_size);
6987     return 0;
6988 }
6989 
6990 /* This is an internal helper for do_syscall so that it is easier
6991  * to have a single return point, so that actions, such as logging
6992  * of syscall results, can be performed.
6993  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6994  */
6995 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6996                             abi_long arg2, abi_long arg3, abi_long arg4,
6997                             abi_long arg5, abi_long arg6, abi_long arg7,
6998                             abi_long arg8)
6999 {
7000     CPUState *cpu = ENV_GET_CPU(cpu_env);
7001     abi_long ret;
7002 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7003     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7004     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7005     struct stat st;
7006 #endif
7007 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7008     || defined(TARGET_NR_fstatfs)
7009     struct statfs stfs;
7010 #endif
7011     void *p;
7012 
7013     switch(num) {
7014     case TARGET_NR_exit:
7015         /* In old applications this may be used to implement _exit(2).
7016            However in threaded applictions it is used for thread termination,
7017            and _exit_group is used for application termination.
7018            Do thread termination if we have more then one thread.  */
7019 
7020         if (block_signals()) {
7021             return -TARGET_ERESTARTSYS;
7022         }
7023 
7024         cpu_list_lock();
7025 
7026         if (CPU_NEXT(first_cpu)) {
7027             TaskState *ts;
7028 
7029             /* Remove the CPU from the list.  */
7030             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7031 
7032             cpu_list_unlock();
7033 
7034             ts = cpu->opaque;
7035             if (ts->child_tidptr) {
7036                 put_user_u32(0, ts->child_tidptr);
7037                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7038                           NULL, NULL, 0);
7039             }
7040             thread_cpu = NULL;
7041             object_unref(OBJECT(cpu));
7042             g_free(ts);
7043             rcu_unregister_thread();
7044             pthread_exit(NULL);
7045         }
7046 
7047         cpu_list_unlock();
7048         preexit_cleanup(cpu_env, arg1);
7049         _exit(arg1);
7050         return 0; /* avoid warning */
7051     case TARGET_NR_read:
7052         if (arg2 == 0 && arg3 == 0) {
7053             return get_errno(safe_read(arg1, 0, 0));
7054         } else {
7055             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7056                 return -TARGET_EFAULT;
7057             ret = get_errno(safe_read(arg1, p, arg3));
7058             if (ret >= 0 &&
7059                 fd_trans_host_to_target_data(arg1)) {
7060                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7061             }
7062             unlock_user(p, arg2, ret);
7063         }
7064         return ret;
7065     case TARGET_NR_write:
7066         if (arg2 == 0 && arg3 == 0) {
7067             return get_errno(safe_write(arg1, 0, 0));
7068         }
7069         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7070             return -TARGET_EFAULT;
7071         if (fd_trans_target_to_host_data(arg1)) {
7072             void *copy = g_malloc(arg3);
7073             memcpy(copy, p, arg3);
7074             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7075             if (ret >= 0) {
7076                 ret = get_errno(safe_write(arg1, copy, ret));
7077             }
7078             g_free(copy);
7079         } else {
7080             ret = get_errno(safe_write(arg1, p, arg3));
7081         }
7082         unlock_user(p, arg2, 0);
7083         return ret;
7084 
7085 #ifdef TARGET_NR_open
7086     case TARGET_NR_open:
7087         if (!(p = lock_user_string(arg1)))
7088             return -TARGET_EFAULT;
7089         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7090                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7091                                   arg3));
7092         fd_trans_unregister(ret);
7093         unlock_user(p, arg1, 0);
7094         return ret;
7095 #endif
7096     case TARGET_NR_openat:
7097         if (!(p = lock_user_string(arg2)))
7098             return -TARGET_EFAULT;
7099         ret = get_errno(do_openat(cpu_env, arg1, p,
7100                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7101                                   arg4));
7102         fd_trans_unregister(ret);
7103         unlock_user(p, arg2, 0);
7104         return ret;
7105 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7106     case TARGET_NR_name_to_handle_at:
7107         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7108         return ret;
7109 #endif
7110 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7111     case TARGET_NR_open_by_handle_at:
7112         ret = do_open_by_handle_at(arg1, arg2, arg3);
7113         fd_trans_unregister(ret);
7114         return ret;
7115 #endif
7116     case TARGET_NR_close:
7117         fd_trans_unregister(arg1);
7118         return get_errno(close(arg1));
7119 
7120     case TARGET_NR_brk:
7121         return do_brk(arg1);
7122 #ifdef TARGET_NR_fork
7123     case TARGET_NR_fork:
7124         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7125 #endif
7126 #ifdef TARGET_NR_waitpid
7127     case TARGET_NR_waitpid:
7128         {
7129             int status;
7130             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7131             if (!is_error(ret) && arg2 && ret
7132                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7133                 return -TARGET_EFAULT;
7134         }
7135         return ret;
7136 #endif
7137 #ifdef TARGET_NR_waitid
7138     case TARGET_NR_waitid:
7139         {
7140             siginfo_t info;
7141             info.si_pid = 0;
7142             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7143             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7144                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7145                     return -TARGET_EFAULT;
7146                 host_to_target_siginfo(p, &info);
7147                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7148             }
7149         }
7150         return ret;
7151 #endif
7152 #ifdef TARGET_NR_creat /* not on alpha */
7153     case TARGET_NR_creat:
7154         if (!(p = lock_user_string(arg1)))
7155             return -TARGET_EFAULT;
7156         ret = get_errno(creat(p, arg2));
7157         fd_trans_unregister(ret);
7158         unlock_user(p, arg1, 0);
7159         return ret;
7160 #endif
7161 #ifdef TARGET_NR_link
7162     case TARGET_NR_link:
7163         {
7164             void * p2;
7165             p = lock_user_string(arg1);
7166             p2 = lock_user_string(arg2);
7167             if (!p || !p2)
7168                 ret = -TARGET_EFAULT;
7169             else
7170                 ret = get_errno(link(p, p2));
7171             unlock_user(p2, arg2, 0);
7172             unlock_user(p, arg1, 0);
7173         }
7174         return ret;
7175 #endif
7176 #if defined(TARGET_NR_linkat)
7177     case TARGET_NR_linkat:
7178         {
7179             void * p2 = NULL;
7180             if (!arg2 || !arg4)
7181                 return -TARGET_EFAULT;
7182             p  = lock_user_string(arg2);
7183             p2 = lock_user_string(arg4);
7184             if (!p || !p2)
7185                 ret = -TARGET_EFAULT;
7186             else
7187                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7188             unlock_user(p, arg2, 0);
7189             unlock_user(p2, arg4, 0);
7190         }
7191         return ret;
7192 #endif
7193 #ifdef TARGET_NR_unlink
7194     case TARGET_NR_unlink:
7195         if (!(p = lock_user_string(arg1)))
7196             return -TARGET_EFAULT;
7197         ret = get_errno(unlink(p));
7198         unlock_user(p, arg1, 0);
7199         return ret;
7200 #endif
7201 #if defined(TARGET_NR_unlinkat)
7202     case TARGET_NR_unlinkat:
7203         if (!(p = lock_user_string(arg2)))
7204             return -TARGET_EFAULT;
7205         ret = get_errno(unlinkat(arg1, p, arg3));
7206         unlock_user(p, arg2, 0);
7207         return ret;
7208 #endif
7209     case TARGET_NR_execve:
7210         {
7211             char **argp, **envp;
7212             int argc, envc;
7213             abi_ulong gp;
7214             abi_ulong guest_argp;
7215             abi_ulong guest_envp;
7216             abi_ulong addr;
7217             char **q;
7218             int total_size = 0;
7219 
7220             argc = 0;
7221             guest_argp = arg2;
7222             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7223                 if (get_user_ual(addr, gp))
7224                     return -TARGET_EFAULT;
7225                 if (!addr)
7226                     break;
7227                 argc++;
7228             }
7229             envc = 0;
7230             guest_envp = arg3;
7231             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7232                 if (get_user_ual(addr, gp))
7233                     return -TARGET_EFAULT;
7234                 if (!addr)
7235                     break;
7236                 envc++;
7237             }
7238 
7239             argp = g_new0(char *, argc + 1);
7240             envp = g_new0(char *, envc + 1);
7241 
7242             for (gp = guest_argp, q = argp; gp;
7243                   gp += sizeof(abi_ulong), q++) {
7244                 if (get_user_ual(addr, gp))
7245                     goto execve_efault;
7246                 if (!addr)
7247                     break;
7248                 if (!(*q = lock_user_string(addr)))
7249                     goto execve_efault;
7250                 total_size += strlen(*q) + 1;
7251             }
7252             *q = NULL;
7253 
7254             for (gp = guest_envp, q = envp; gp;
7255                   gp += sizeof(abi_ulong), q++) {
7256                 if (get_user_ual(addr, gp))
7257                     goto execve_efault;
7258                 if (!addr)
7259                     break;
7260                 if (!(*q = lock_user_string(addr)))
7261                     goto execve_efault;
7262                 total_size += strlen(*q) + 1;
7263             }
7264             *q = NULL;
7265 
7266             if (!(p = lock_user_string(arg1)))
7267                 goto execve_efault;
7268             /* Although execve() is not an interruptible syscall it is
7269              * a special case where we must use the safe_syscall wrapper:
7270              * if we allow a signal to happen before we make the host
7271              * syscall then we will 'lose' it, because at the point of
7272              * execve the process leaves QEMU's control. So we use the
7273              * safe syscall wrapper to ensure that we either take the
7274              * signal as a guest signal, or else it does not happen
7275              * before the execve completes and makes it the other
7276              * program's problem.
7277              */
7278             ret = get_errno(safe_execve(p, argp, envp));
7279             unlock_user(p, arg1, 0);
7280 
7281             goto execve_end;
7282 
7283         execve_efault:
7284             ret = -TARGET_EFAULT;
7285 
7286         execve_end:
7287             for (gp = guest_argp, q = argp; *q;
7288                   gp += sizeof(abi_ulong), q++) {
7289                 if (get_user_ual(addr, gp)
7290                     || !addr)
7291                     break;
7292                 unlock_user(*q, addr, 0);
7293             }
7294             for (gp = guest_envp, q = envp; *q;
7295                   gp += sizeof(abi_ulong), q++) {
7296                 if (get_user_ual(addr, gp)
7297                     || !addr)
7298                     break;
7299                 unlock_user(*q, addr, 0);
7300             }
7301 
7302             g_free(argp);
7303             g_free(envp);
7304         }
7305         return ret;
7306     case TARGET_NR_chdir:
7307         if (!(p = lock_user_string(arg1)))
7308             return -TARGET_EFAULT;
7309         ret = get_errno(chdir(p));
7310         unlock_user(p, arg1, 0);
7311         return ret;
7312 #ifdef TARGET_NR_time
7313     case TARGET_NR_time:
7314         {
7315             time_t host_time;
7316             ret = get_errno(time(&host_time));
7317             if (!is_error(ret)
7318                 && arg1
7319                 && put_user_sal(host_time, arg1))
7320                 return -TARGET_EFAULT;
7321         }
7322         return ret;
7323 #endif
7324 #ifdef TARGET_NR_mknod
7325     case TARGET_NR_mknod:
7326         if (!(p = lock_user_string(arg1)))
7327             return -TARGET_EFAULT;
7328         ret = get_errno(mknod(p, arg2, arg3));
7329         unlock_user(p, arg1, 0);
7330         return ret;
7331 #endif
7332 #if defined(TARGET_NR_mknodat)
7333     case TARGET_NR_mknodat:
7334         if (!(p = lock_user_string(arg2)))
7335             return -TARGET_EFAULT;
7336         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7337         unlock_user(p, arg2, 0);
7338         return ret;
7339 #endif
7340 #ifdef TARGET_NR_chmod
7341     case TARGET_NR_chmod:
7342         if (!(p = lock_user_string(arg1)))
7343             return -TARGET_EFAULT;
7344         ret = get_errno(chmod(p, arg2));
7345         unlock_user(p, arg1, 0);
7346         return ret;
7347 #endif
7348 #ifdef TARGET_NR_lseek
7349     case TARGET_NR_lseek:
7350         return get_errno(lseek(arg1, arg2, arg3));
7351 #endif
7352 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7353     /* Alpha specific */
7354     case TARGET_NR_getxpid:
7355         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7356         return get_errno(getpid());
7357 #endif
7358 #ifdef TARGET_NR_getpid
7359     case TARGET_NR_getpid:
7360         return get_errno(getpid());
7361 #endif
7362     case TARGET_NR_mount:
7363         {
7364             /* need to look at the data field */
7365             void *p2, *p3;
7366 
7367             if (arg1) {
7368                 p = lock_user_string(arg1);
7369                 if (!p) {
7370                     return -TARGET_EFAULT;
7371                 }
7372             } else {
7373                 p = NULL;
7374             }
7375 
7376             p2 = lock_user_string(arg2);
7377             if (!p2) {
7378                 if (arg1) {
7379                     unlock_user(p, arg1, 0);
7380                 }
7381                 return -TARGET_EFAULT;
7382             }
7383 
7384             if (arg3) {
7385                 p3 = lock_user_string(arg3);
7386                 if (!p3) {
7387                     if (arg1) {
7388                         unlock_user(p, arg1, 0);
7389                     }
7390                     unlock_user(p2, arg2, 0);
7391                     return -TARGET_EFAULT;
7392                 }
7393             } else {
7394                 p3 = NULL;
7395             }
7396 
7397             /* FIXME - arg5 should be locked, but it isn't clear how to
7398              * do that since it's not guaranteed to be a NULL-terminated
7399              * string.
7400              */
7401             if (!arg5) {
7402                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7403             } else {
7404                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7405             }
7406             ret = get_errno(ret);
7407 
7408             if (arg1) {
7409                 unlock_user(p, arg1, 0);
7410             }
7411             unlock_user(p2, arg2, 0);
7412             if (arg3) {
7413                 unlock_user(p3, arg3, 0);
7414             }
7415         }
7416         return ret;
7417 #ifdef TARGET_NR_umount
7418     case TARGET_NR_umount:
7419         if (!(p = lock_user_string(arg1)))
7420             return -TARGET_EFAULT;
7421         ret = get_errno(umount(p));
7422         unlock_user(p, arg1, 0);
7423         return ret;
7424 #endif
7425 #ifdef TARGET_NR_stime /* not on alpha */
7426     case TARGET_NR_stime:
7427         {
7428             time_t host_time;
7429             if (get_user_sal(host_time, arg1))
7430                 return -TARGET_EFAULT;
7431             return get_errno(stime(&host_time));
7432         }
7433 #endif
7434 #ifdef TARGET_NR_alarm /* not on alpha */
7435     case TARGET_NR_alarm:
7436         return alarm(arg1);
7437 #endif
7438 #ifdef TARGET_NR_pause /* not on alpha */
7439     case TARGET_NR_pause:
7440         if (!block_signals()) {
7441             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7442         }
7443         return -TARGET_EINTR;
7444 #endif
7445 #ifdef TARGET_NR_utime
7446     case TARGET_NR_utime:
7447         {
7448             struct utimbuf tbuf, *host_tbuf;
7449             struct target_utimbuf *target_tbuf;
7450             if (arg2) {
7451                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7452                     return -TARGET_EFAULT;
7453                 tbuf.actime = tswapal(target_tbuf->actime);
7454                 tbuf.modtime = tswapal(target_tbuf->modtime);
7455                 unlock_user_struct(target_tbuf, arg2, 0);
7456                 host_tbuf = &tbuf;
7457             } else {
7458                 host_tbuf = NULL;
7459             }
7460             if (!(p = lock_user_string(arg1)))
7461                 return -TARGET_EFAULT;
7462             ret = get_errno(utime(p, host_tbuf));
7463             unlock_user(p, arg1, 0);
7464         }
7465         return ret;
7466 #endif
7467 #ifdef TARGET_NR_utimes
7468     case TARGET_NR_utimes:
7469         {
7470             struct timeval *tvp, tv[2];
7471             if (arg2) {
7472                 if (copy_from_user_timeval(&tv[0], arg2)
7473                     || copy_from_user_timeval(&tv[1],
7474                                               arg2 + sizeof(struct target_timeval)))
7475                     return -TARGET_EFAULT;
7476                 tvp = tv;
7477             } else {
7478                 tvp = NULL;
7479             }
7480             if (!(p = lock_user_string(arg1)))
7481                 return -TARGET_EFAULT;
7482             ret = get_errno(utimes(p, tvp));
7483             unlock_user(p, arg1, 0);
7484         }
7485         return ret;
7486 #endif
7487 #if defined(TARGET_NR_futimesat)
7488     case TARGET_NR_futimesat:
7489         {
7490             struct timeval *tvp, tv[2];
7491             if (arg3) {
7492                 if (copy_from_user_timeval(&tv[0], arg3)
7493                     || copy_from_user_timeval(&tv[1],
7494                                               arg3 + sizeof(struct target_timeval)))
7495                     return -TARGET_EFAULT;
7496                 tvp = tv;
7497             } else {
7498                 tvp = NULL;
7499             }
7500             if (!(p = lock_user_string(arg2))) {
7501                 return -TARGET_EFAULT;
7502             }
7503             ret = get_errno(futimesat(arg1, path(p), tvp));
7504             unlock_user(p, arg2, 0);
7505         }
7506         return ret;
7507 #endif
7508 #ifdef TARGET_NR_access
7509     case TARGET_NR_access:
7510         if (!(p = lock_user_string(arg1))) {
7511             return -TARGET_EFAULT;
7512         }
7513         ret = get_errno(access(path(p), arg2));
7514         unlock_user(p, arg1, 0);
7515         return ret;
7516 #endif
7517 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7518     case TARGET_NR_faccessat:
7519         if (!(p = lock_user_string(arg2))) {
7520             return -TARGET_EFAULT;
7521         }
7522         ret = get_errno(faccessat(arg1, p, arg3, 0));
7523         unlock_user(p, arg2, 0);
7524         return ret;
7525 #endif
7526 #ifdef TARGET_NR_nice /* not on alpha */
7527     case TARGET_NR_nice:
7528         return get_errno(nice(arg1));
7529 #endif
7530     case TARGET_NR_sync:
7531         sync();
7532         return 0;
7533 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7534     case TARGET_NR_syncfs:
7535         return get_errno(syncfs(arg1));
7536 #endif
7537     case TARGET_NR_kill:
7538         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7539 #ifdef TARGET_NR_rename
7540     case TARGET_NR_rename:
7541         {
7542             void *p2;
7543             p = lock_user_string(arg1);
7544             p2 = lock_user_string(arg2);
7545             if (!p || !p2)
7546                 ret = -TARGET_EFAULT;
7547             else
7548                 ret = get_errno(rename(p, p2));
7549             unlock_user(p2, arg2, 0);
7550             unlock_user(p, arg1, 0);
7551         }
7552         return ret;
7553 #endif
7554 #if defined(TARGET_NR_renameat)
7555     case TARGET_NR_renameat:
7556         {
7557             void *p2;
7558             p  = lock_user_string(arg2);
7559             p2 = lock_user_string(arg4);
7560             if (!p || !p2)
7561                 ret = -TARGET_EFAULT;
7562             else
7563                 ret = get_errno(renameat(arg1, p, arg3, p2));
7564             unlock_user(p2, arg4, 0);
7565             unlock_user(p, arg2, 0);
7566         }
7567         return ret;
7568 #endif
7569 #if defined(TARGET_NR_renameat2)
7570     case TARGET_NR_renameat2:
7571         {
7572             void *p2;
7573             p  = lock_user_string(arg2);
7574             p2 = lock_user_string(arg4);
7575             if (!p || !p2) {
7576                 ret = -TARGET_EFAULT;
7577             } else {
7578                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7579             }
7580             unlock_user(p2, arg4, 0);
7581             unlock_user(p, arg2, 0);
7582         }
7583         return ret;
7584 #endif
7585 #ifdef TARGET_NR_mkdir
7586     case TARGET_NR_mkdir:
7587         if (!(p = lock_user_string(arg1)))
7588             return -TARGET_EFAULT;
7589         ret = get_errno(mkdir(p, arg2));
7590         unlock_user(p, arg1, 0);
7591         return ret;
7592 #endif
7593 #if defined(TARGET_NR_mkdirat)
7594     case TARGET_NR_mkdirat:
7595         if (!(p = lock_user_string(arg2)))
7596             return -TARGET_EFAULT;
7597         ret = get_errno(mkdirat(arg1, p, arg3));
7598         unlock_user(p, arg2, 0);
7599         return ret;
7600 #endif
7601 #ifdef TARGET_NR_rmdir
7602     case TARGET_NR_rmdir:
7603         if (!(p = lock_user_string(arg1)))
7604             return -TARGET_EFAULT;
7605         ret = get_errno(rmdir(p));
7606         unlock_user(p, arg1, 0);
7607         return ret;
7608 #endif
7609     case TARGET_NR_dup:
7610         ret = get_errno(dup(arg1));
7611         if (ret >= 0) {
7612             fd_trans_dup(arg1, ret);
7613         }
7614         return ret;
7615 #ifdef TARGET_NR_pipe
7616     case TARGET_NR_pipe:
7617         return do_pipe(cpu_env, arg1, 0, 0);
7618 #endif
7619 #ifdef TARGET_NR_pipe2
7620     case TARGET_NR_pipe2:
7621         return do_pipe(cpu_env, arg1,
7622                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7623 #endif
7624     case TARGET_NR_times:
7625         {
7626             struct target_tms *tmsp;
7627             struct tms tms;
7628             ret = get_errno(times(&tms));
7629             if (arg1) {
7630                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7631                 if (!tmsp)
7632                     return -TARGET_EFAULT;
7633                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7634                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7635                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7636                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7637             }
7638             if (!is_error(ret))
7639                 ret = host_to_target_clock_t(ret);
7640         }
7641         return ret;
7642     case TARGET_NR_acct:
7643         if (arg1 == 0) {
7644             ret = get_errno(acct(NULL));
7645         } else {
7646             if (!(p = lock_user_string(arg1))) {
7647                 return -TARGET_EFAULT;
7648             }
7649             ret = get_errno(acct(path(p)));
7650             unlock_user(p, arg1, 0);
7651         }
7652         return ret;
7653 #ifdef TARGET_NR_umount2
7654     case TARGET_NR_umount2:
7655         if (!(p = lock_user_string(arg1)))
7656             return -TARGET_EFAULT;
7657         ret = get_errno(umount2(p, arg2));
7658         unlock_user(p, arg1, 0);
7659         return ret;
7660 #endif
7661     case TARGET_NR_ioctl:
7662         return do_ioctl(arg1, arg2, arg3);
7663 #ifdef TARGET_NR_fcntl
7664     case TARGET_NR_fcntl:
7665         return do_fcntl(arg1, arg2, arg3);
7666 #endif
7667     case TARGET_NR_setpgid:
7668         return get_errno(setpgid(arg1, arg2));
7669     case TARGET_NR_umask:
7670         return get_errno(umask(arg1));
7671     case TARGET_NR_chroot:
7672         if (!(p = lock_user_string(arg1)))
7673             return -TARGET_EFAULT;
7674         ret = get_errno(chroot(p));
7675         unlock_user(p, arg1, 0);
7676         return ret;
7677 #ifdef TARGET_NR_dup2
7678     case TARGET_NR_dup2:
7679         ret = get_errno(dup2(arg1, arg2));
7680         if (ret >= 0) {
7681             fd_trans_dup(arg1, arg2);
7682         }
7683         return ret;
7684 #endif
7685 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7686     case TARGET_NR_dup3:
7687     {
7688         int host_flags;
7689 
7690         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7691             return -EINVAL;
7692         }
7693         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7694         ret = get_errno(dup3(arg1, arg2, host_flags));
7695         if (ret >= 0) {
7696             fd_trans_dup(arg1, arg2);
7697         }
7698         return ret;
7699     }
7700 #endif
7701 #ifdef TARGET_NR_getppid /* not on alpha */
7702     case TARGET_NR_getppid:
7703         return get_errno(getppid());
7704 #endif
7705 #ifdef TARGET_NR_getpgrp
7706     case TARGET_NR_getpgrp:
7707         return get_errno(getpgrp());
7708 #endif
7709     case TARGET_NR_setsid:
7710         return get_errno(setsid());
7711 #ifdef TARGET_NR_sigaction
7712     case TARGET_NR_sigaction:
7713         {
7714 #if defined(TARGET_ALPHA)
7715             struct target_sigaction act, oact, *pact = 0;
7716             struct target_old_sigaction *old_act;
7717             if (arg2) {
7718                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7719                     return -TARGET_EFAULT;
7720                 act._sa_handler = old_act->_sa_handler;
7721                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7722                 act.sa_flags = old_act->sa_flags;
7723                 act.sa_restorer = 0;
7724                 unlock_user_struct(old_act, arg2, 0);
7725                 pact = &act;
7726             }
7727             ret = get_errno(do_sigaction(arg1, pact, &oact));
7728             if (!is_error(ret) && arg3) {
7729                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7730                     return -TARGET_EFAULT;
7731                 old_act->_sa_handler = oact._sa_handler;
7732                 old_act->sa_mask = oact.sa_mask.sig[0];
7733                 old_act->sa_flags = oact.sa_flags;
7734                 unlock_user_struct(old_act, arg3, 1);
7735             }
7736 #elif defined(TARGET_MIPS)
7737 	    struct target_sigaction act, oact, *pact, *old_act;
7738 
7739 	    if (arg2) {
7740                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7741                     return -TARGET_EFAULT;
7742 		act._sa_handler = old_act->_sa_handler;
7743 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7744 		act.sa_flags = old_act->sa_flags;
7745 		unlock_user_struct(old_act, arg2, 0);
7746 		pact = &act;
7747 	    } else {
7748 		pact = NULL;
7749 	    }
7750 
7751 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7752 
7753 	    if (!is_error(ret) && arg3) {
7754                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7755                     return -TARGET_EFAULT;
7756 		old_act->_sa_handler = oact._sa_handler;
7757 		old_act->sa_flags = oact.sa_flags;
7758 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7759 		old_act->sa_mask.sig[1] = 0;
7760 		old_act->sa_mask.sig[2] = 0;
7761 		old_act->sa_mask.sig[3] = 0;
7762 		unlock_user_struct(old_act, arg3, 1);
7763 	    }
7764 #else
7765             struct target_old_sigaction *old_act;
7766             struct target_sigaction act, oact, *pact;
7767             if (arg2) {
7768                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7769                     return -TARGET_EFAULT;
7770                 act._sa_handler = old_act->_sa_handler;
7771                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7772                 act.sa_flags = old_act->sa_flags;
7773                 act.sa_restorer = old_act->sa_restorer;
7774 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7775                 act.ka_restorer = 0;
7776 #endif
7777                 unlock_user_struct(old_act, arg2, 0);
7778                 pact = &act;
7779             } else {
7780                 pact = NULL;
7781             }
7782             ret = get_errno(do_sigaction(arg1, pact, &oact));
7783             if (!is_error(ret) && arg3) {
7784                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7785                     return -TARGET_EFAULT;
7786                 old_act->_sa_handler = oact._sa_handler;
7787                 old_act->sa_mask = oact.sa_mask.sig[0];
7788                 old_act->sa_flags = oact.sa_flags;
7789                 old_act->sa_restorer = oact.sa_restorer;
7790                 unlock_user_struct(old_act, arg3, 1);
7791             }
7792 #endif
7793         }
7794         return ret;
7795 #endif
7796     case TARGET_NR_rt_sigaction:
7797         {
7798 #if defined(TARGET_ALPHA)
7799             /* For Alpha and SPARC this is a 5 argument syscall, with
7800              * a 'restorer' parameter which must be copied into the
7801              * sa_restorer field of the sigaction struct.
7802              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7803              * and arg5 is the sigsetsize.
7804              * Alpha also has a separate rt_sigaction struct that it uses
7805              * here; SPARC uses the usual sigaction struct.
7806              */
7807             struct target_rt_sigaction *rt_act;
7808             struct target_sigaction act, oact, *pact = 0;
7809 
7810             if (arg4 != sizeof(target_sigset_t)) {
7811                 return -TARGET_EINVAL;
7812             }
7813             if (arg2) {
7814                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7815                     return -TARGET_EFAULT;
7816                 act._sa_handler = rt_act->_sa_handler;
7817                 act.sa_mask = rt_act->sa_mask;
7818                 act.sa_flags = rt_act->sa_flags;
7819                 act.sa_restorer = arg5;
7820                 unlock_user_struct(rt_act, arg2, 0);
7821                 pact = &act;
7822             }
7823             ret = get_errno(do_sigaction(arg1, pact, &oact));
7824             if (!is_error(ret) && arg3) {
7825                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7826                     return -TARGET_EFAULT;
7827                 rt_act->_sa_handler = oact._sa_handler;
7828                 rt_act->sa_mask = oact.sa_mask;
7829                 rt_act->sa_flags = oact.sa_flags;
7830                 unlock_user_struct(rt_act, arg3, 1);
7831             }
7832 #else
7833 #ifdef TARGET_SPARC
7834             target_ulong restorer = arg4;
7835             target_ulong sigsetsize = arg5;
7836 #else
7837             target_ulong sigsetsize = arg4;
7838 #endif
7839             struct target_sigaction *act;
7840             struct target_sigaction *oact;
7841 
7842             if (sigsetsize != sizeof(target_sigset_t)) {
7843                 return -TARGET_EINVAL;
7844             }
7845             if (arg2) {
7846                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7847                     return -TARGET_EFAULT;
7848                 }
7849 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7850                 act->ka_restorer = restorer;
7851 #endif
7852             } else {
7853                 act = NULL;
7854             }
7855             if (arg3) {
7856                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7857                     ret = -TARGET_EFAULT;
7858                     goto rt_sigaction_fail;
7859                 }
7860             } else
7861                 oact = NULL;
7862             ret = get_errno(do_sigaction(arg1, act, oact));
7863 	rt_sigaction_fail:
7864             if (act)
7865                 unlock_user_struct(act, arg2, 0);
7866             if (oact)
7867                 unlock_user_struct(oact, arg3, 1);
7868 #endif
7869         }
7870         return ret;
7871 #ifdef TARGET_NR_sgetmask /* not on alpha */
7872     case TARGET_NR_sgetmask:
7873         {
7874             sigset_t cur_set;
7875             abi_ulong target_set;
7876             ret = do_sigprocmask(0, NULL, &cur_set);
7877             if (!ret) {
7878                 host_to_target_old_sigset(&target_set, &cur_set);
7879                 ret = target_set;
7880             }
7881         }
7882         return ret;
7883 #endif
7884 #ifdef TARGET_NR_ssetmask /* not on alpha */
7885     case TARGET_NR_ssetmask:
7886         {
7887             sigset_t set, oset;
7888             abi_ulong target_set = arg1;
7889             target_to_host_old_sigset(&set, &target_set);
7890             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7891             if (!ret) {
7892                 host_to_target_old_sigset(&target_set, &oset);
7893                 ret = target_set;
7894             }
7895         }
7896         return ret;
7897 #endif
7898 #ifdef TARGET_NR_sigprocmask
7899     case TARGET_NR_sigprocmask:
7900         {
7901 #if defined(TARGET_ALPHA)
7902             sigset_t set, oldset;
7903             abi_ulong mask;
7904             int how;
7905 
7906             switch (arg1) {
7907             case TARGET_SIG_BLOCK:
7908                 how = SIG_BLOCK;
7909                 break;
7910             case TARGET_SIG_UNBLOCK:
7911                 how = SIG_UNBLOCK;
7912                 break;
7913             case TARGET_SIG_SETMASK:
7914                 how = SIG_SETMASK;
7915                 break;
7916             default:
7917                 return -TARGET_EINVAL;
7918             }
7919             mask = arg2;
7920             target_to_host_old_sigset(&set, &mask);
7921 
7922             ret = do_sigprocmask(how, &set, &oldset);
7923             if (!is_error(ret)) {
7924                 host_to_target_old_sigset(&mask, &oldset);
7925                 ret = mask;
7926                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7927             }
7928 #else
7929             sigset_t set, oldset, *set_ptr;
7930             int how;
7931 
7932             if (arg2) {
7933                 switch (arg1) {
7934                 case TARGET_SIG_BLOCK:
7935                     how = SIG_BLOCK;
7936                     break;
7937                 case TARGET_SIG_UNBLOCK:
7938                     how = SIG_UNBLOCK;
7939                     break;
7940                 case TARGET_SIG_SETMASK:
7941                     how = SIG_SETMASK;
7942                     break;
7943                 default:
7944                     return -TARGET_EINVAL;
7945                 }
7946                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7947                     return -TARGET_EFAULT;
7948                 target_to_host_old_sigset(&set, p);
7949                 unlock_user(p, arg2, 0);
7950                 set_ptr = &set;
7951             } else {
7952                 how = 0;
7953                 set_ptr = NULL;
7954             }
7955             ret = do_sigprocmask(how, set_ptr, &oldset);
7956             if (!is_error(ret) && arg3) {
7957                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7958                     return -TARGET_EFAULT;
7959                 host_to_target_old_sigset(p, &oldset);
7960                 unlock_user(p, arg3, sizeof(target_sigset_t));
7961             }
7962 #endif
7963         }
7964         return ret;
7965 #endif
7966     case TARGET_NR_rt_sigprocmask:
7967         {
7968             int how = arg1;
7969             sigset_t set, oldset, *set_ptr;
7970 
7971             if (arg4 != sizeof(target_sigset_t)) {
7972                 return -TARGET_EINVAL;
7973             }
7974 
7975             if (arg2) {
7976                 switch(how) {
7977                 case TARGET_SIG_BLOCK:
7978                     how = SIG_BLOCK;
7979                     break;
7980                 case TARGET_SIG_UNBLOCK:
7981                     how = SIG_UNBLOCK;
7982                     break;
7983                 case TARGET_SIG_SETMASK:
7984                     how = SIG_SETMASK;
7985                     break;
7986                 default:
7987                     return -TARGET_EINVAL;
7988                 }
7989                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7990                     return -TARGET_EFAULT;
7991                 target_to_host_sigset(&set, p);
7992                 unlock_user(p, arg2, 0);
7993                 set_ptr = &set;
7994             } else {
7995                 how = 0;
7996                 set_ptr = NULL;
7997             }
7998             ret = do_sigprocmask(how, set_ptr, &oldset);
7999             if (!is_error(ret) && arg3) {
8000                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8001                     return -TARGET_EFAULT;
8002                 host_to_target_sigset(p, &oldset);
8003                 unlock_user(p, arg3, sizeof(target_sigset_t));
8004             }
8005         }
8006         return ret;
8007 #ifdef TARGET_NR_sigpending
8008     case TARGET_NR_sigpending:
8009         {
8010             sigset_t set;
8011             ret = get_errno(sigpending(&set));
8012             if (!is_error(ret)) {
8013                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8014                     return -TARGET_EFAULT;
8015                 host_to_target_old_sigset(p, &set);
8016                 unlock_user(p, arg1, sizeof(target_sigset_t));
8017             }
8018         }
8019         return ret;
8020 #endif
8021     case TARGET_NR_rt_sigpending:
8022         {
8023             sigset_t set;
8024 
8025             /* Yes, this check is >, not != like most. We follow the kernel's
8026              * logic and it does it like this because it implements
8027              * NR_sigpending through the same code path, and in that case
8028              * the old_sigset_t is smaller in size.
8029              */
8030             if (arg2 > sizeof(target_sigset_t)) {
8031                 return -TARGET_EINVAL;
8032             }
8033 
8034             ret = get_errno(sigpending(&set));
8035             if (!is_error(ret)) {
8036                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8037                     return -TARGET_EFAULT;
8038                 host_to_target_sigset(p, &set);
8039                 unlock_user(p, arg1, sizeof(target_sigset_t));
8040             }
8041         }
8042         return ret;
8043 #ifdef TARGET_NR_sigsuspend
8044     case TARGET_NR_sigsuspend:
8045         {
8046             TaskState *ts = cpu->opaque;
8047 #if defined(TARGET_ALPHA)
8048             abi_ulong mask = arg1;
8049             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8050 #else
8051             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8052                 return -TARGET_EFAULT;
8053             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8054             unlock_user(p, arg1, 0);
8055 #endif
8056             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8057                                                SIGSET_T_SIZE));
8058             if (ret != -TARGET_ERESTARTSYS) {
8059                 ts->in_sigsuspend = 1;
8060             }
8061         }
8062         return ret;
8063 #endif
8064     case TARGET_NR_rt_sigsuspend:
8065         {
8066             TaskState *ts = cpu->opaque;
8067 
8068             if (arg2 != sizeof(target_sigset_t)) {
8069                 return -TARGET_EINVAL;
8070             }
8071             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8072                 return -TARGET_EFAULT;
8073             target_to_host_sigset(&ts->sigsuspend_mask, p);
8074             unlock_user(p, arg1, 0);
8075             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8076                                                SIGSET_T_SIZE));
8077             if (ret != -TARGET_ERESTARTSYS) {
8078                 ts->in_sigsuspend = 1;
8079             }
8080         }
8081         return ret;
8082     case TARGET_NR_rt_sigtimedwait:
8083         {
8084             sigset_t set;
8085             struct timespec uts, *puts;
8086             siginfo_t uinfo;
8087 
8088             if (arg4 != sizeof(target_sigset_t)) {
8089                 return -TARGET_EINVAL;
8090             }
8091 
8092             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8093                 return -TARGET_EFAULT;
8094             target_to_host_sigset(&set, p);
8095             unlock_user(p, arg1, 0);
8096             if (arg3) {
8097                 puts = &uts;
8098                 target_to_host_timespec(puts, arg3);
8099             } else {
8100                 puts = NULL;
8101             }
8102             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8103                                                  SIGSET_T_SIZE));
8104             if (!is_error(ret)) {
8105                 if (arg2) {
8106                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8107                                   0);
8108                     if (!p) {
8109                         return -TARGET_EFAULT;
8110                     }
8111                     host_to_target_siginfo(p, &uinfo);
8112                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8113                 }
8114                 ret = host_to_target_signal(ret);
8115             }
8116         }
8117         return ret;
8118     case TARGET_NR_rt_sigqueueinfo:
8119         {
8120             siginfo_t uinfo;
8121 
8122             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8123             if (!p) {
8124                 return -TARGET_EFAULT;
8125             }
8126             target_to_host_siginfo(&uinfo, p);
8127             unlock_user(p, arg3, 0);
8128             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8129         }
8130         return ret;
8131     case TARGET_NR_rt_tgsigqueueinfo:
8132         {
8133             siginfo_t uinfo;
8134 
8135             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8136             if (!p) {
8137                 return -TARGET_EFAULT;
8138             }
8139             target_to_host_siginfo(&uinfo, p);
8140             unlock_user(p, arg4, 0);
8141             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8142         }
8143         return ret;
8144 #ifdef TARGET_NR_sigreturn
8145     case TARGET_NR_sigreturn:
8146         if (block_signals()) {
8147             return -TARGET_ERESTARTSYS;
8148         }
8149         return do_sigreturn(cpu_env);
8150 #endif
8151     case TARGET_NR_rt_sigreturn:
8152         if (block_signals()) {
8153             return -TARGET_ERESTARTSYS;
8154         }
8155         return do_rt_sigreturn(cpu_env);
8156     case TARGET_NR_sethostname:
8157         if (!(p = lock_user_string(arg1)))
8158             return -TARGET_EFAULT;
8159         ret = get_errno(sethostname(p, arg2));
8160         unlock_user(p, arg1, 0);
8161         return ret;
8162 #ifdef TARGET_NR_setrlimit
8163     case TARGET_NR_setrlimit:
8164         {
8165             int resource = target_to_host_resource(arg1);
8166             struct target_rlimit *target_rlim;
8167             struct rlimit rlim;
8168             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8169                 return -TARGET_EFAULT;
8170             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8171             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8172             unlock_user_struct(target_rlim, arg2, 0);
8173             /*
8174              * If we just passed through resource limit settings for memory then
8175              * they would also apply to QEMU's own allocations, and QEMU will
8176              * crash or hang or die if its allocations fail. Ideally we would
8177              * track the guest allocations in QEMU and apply the limits ourselves.
8178              * For now, just tell the guest the call succeeded but don't actually
8179              * limit anything.
8180              */
8181             if (resource != RLIMIT_AS &&
8182                 resource != RLIMIT_DATA &&
8183                 resource != RLIMIT_STACK) {
8184                 return get_errno(setrlimit(resource, &rlim));
8185             } else {
8186                 return 0;
8187             }
8188         }
8189 #endif
8190 #ifdef TARGET_NR_getrlimit
8191     case TARGET_NR_getrlimit:
8192         {
8193             int resource = target_to_host_resource(arg1);
8194             struct target_rlimit *target_rlim;
8195             struct rlimit rlim;
8196 
8197             ret = get_errno(getrlimit(resource, &rlim));
8198             if (!is_error(ret)) {
8199                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8200                     return -TARGET_EFAULT;
8201                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8202                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8203                 unlock_user_struct(target_rlim, arg2, 1);
8204             }
8205         }
8206         return ret;
8207 #endif
8208     case TARGET_NR_getrusage:
8209         {
8210             struct rusage rusage;
8211             ret = get_errno(getrusage(arg1, &rusage));
8212             if (!is_error(ret)) {
8213                 ret = host_to_target_rusage(arg2, &rusage);
8214             }
8215         }
8216         return ret;
8217     case TARGET_NR_gettimeofday:
8218         {
8219             struct timeval tv;
8220             ret = get_errno(gettimeofday(&tv, NULL));
8221             if (!is_error(ret)) {
8222                 if (copy_to_user_timeval(arg1, &tv))
8223                     return -TARGET_EFAULT;
8224             }
8225         }
8226         return ret;
8227     case TARGET_NR_settimeofday:
8228         {
8229             struct timeval tv, *ptv = NULL;
8230             struct timezone tz, *ptz = NULL;
8231 
8232             if (arg1) {
8233                 if (copy_from_user_timeval(&tv, arg1)) {
8234                     return -TARGET_EFAULT;
8235                 }
8236                 ptv = &tv;
8237             }
8238 
8239             if (arg2) {
8240                 if (copy_from_user_timezone(&tz, arg2)) {
8241                     return -TARGET_EFAULT;
8242                 }
8243                 ptz = &tz;
8244             }
8245 
8246             return get_errno(settimeofday(ptv, ptz));
8247         }
8248 #if defined(TARGET_NR_select)
8249     case TARGET_NR_select:
8250 #if defined(TARGET_WANT_NI_OLD_SELECT)
8251         /* some architectures used to have old_select here
8252          * but now ENOSYS it.
8253          */
8254         ret = -TARGET_ENOSYS;
8255 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8256         ret = do_old_select(arg1);
8257 #else
8258         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8259 #endif
8260         return ret;
8261 #endif
8262 #ifdef TARGET_NR_pselect6
8263     case TARGET_NR_pselect6:
8264         {
8265             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8266             fd_set rfds, wfds, efds;
8267             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8268             struct timespec ts, *ts_ptr;
8269 
8270             /*
8271              * The 6th arg is actually two args smashed together,
8272              * so we cannot use the C library.
8273              */
8274             sigset_t set;
8275             struct {
8276                 sigset_t *set;
8277                 size_t size;
8278             } sig, *sig_ptr;
8279 
8280             abi_ulong arg_sigset, arg_sigsize, *arg7;
8281             target_sigset_t *target_sigset;
8282 
8283             n = arg1;
8284             rfd_addr = arg2;
8285             wfd_addr = arg3;
8286             efd_addr = arg4;
8287             ts_addr = arg5;
8288 
8289             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8290             if (ret) {
8291                 return ret;
8292             }
8293             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8294             if (ret) {
8295                 return ret;
8296             }
8297             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8298             if (ret) {
8299                 return ret;
8300             }
8301 
8302             /*
8303              * This takes a timespec, and not a timeval, so we cannot
8304              * use the do_select() helper ...
8305              */
8306             if (ts_addr) {
8307                 if (target_to_host_timespec(&ts, ts_addr)) {
8308                     return -TARGET_EFAULT;
8309                 }
8310                 ts_ptr = &ts;
8311             } else {
8312                 ts_ptr = NULL;
8313             }
8314 
8315             /* Extract the two packed args for the sigset */
8316             if (arg6) {
8317                 sig_ptr = &sig;
8318                 sig.size = SIGSET_T_SIZE;
8319 
8320                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8321                 if (!arg7) {
8322                     return -TARGET_EFAULT;
8323                 }
8324                 arg_sigset = tswapal(arg7[0]);
8325                 arg_sigsize = tswapal(arg7[1]);
8326                 unlock_user(arg7, arg6, 0);
8327 
8328                 if (arg_sigset) {
8329                     sig.set = &set;
8330                     if (arg_sigsize != sizeof(*target_sigset)) {
8331                         /* Like the kernel, we enforce correct size sigsets */
8332                         return -TARGET_EINVAL;
8333                     }
8334                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8335                                               sizeof(*target_sigset), 1);
8336                     if (!target_sigset) {
8337                         return -TARGET_EFAULT;
8338                     }
8339                     target_to_host_sigset(&set, target_sigset);
8340                     unlock_user(target_sigset, arg_sigset, 0);
8341                 } else {
8342                     sig.set = NULL;
8343                 }
8344             } else {
8345                 sig_ptr = NULL;
8346             }
8347 
8348             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8349                                           ts_ptr, sig_ptr));
8350 
8351             if (!is_error(ret)) {
8352                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8353                     return -TARGET_EFAULT;
8354                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8355                     return -TARGET_EFAULT;
8356                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8357                     return -TARGET_EFAULT;
8358 
8359                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8360                     return -TARGET_EFAULT;
8361             }
8362         }
8363         return ret;
8364 #endif
8365 #ifdef TARGET_NR_symlink
8366     case TARGET_NR_symlink:
8367         {
8368             void *p2;
8369             p = lock_user_string(arg1);
8370             p2 = lock_user_string(arg2);
8371             if (!p || !p2)
8372                 ret = -TARGET_EFAULT;
8373             else
8374                 ret = get_errno(symlink(p, p2));
8375             unlock_user(p2, arg2, 0);
8376             unlock_user(p, arg1, 0);
8377         }
8378         return ret;
8379 #endif
8380 #if defined(TARGET_NR_symlinkat)
8381     case TARGET_NR_symlinkat:
8382         {
8383             void *p2;
8384             p  = lock_user_string(arg1);
8385             p2 = lock_user_string(arg3);
8386             if (!p || !p2)
8387                 ret = -TARGET_EFAULT;
8388             else
8389                 ret = get_errno(symlinkat(p, arg2, p2));
8390             unlock_user(p2, arg3, 0);
8391             unlock_user(p, arg1, 0);
8392         }
8393         return ret;
8394 #endif
8395 #ifdef TARGET_NR_readlink
8396     case TARGET_NR_readlink:
8397         {
8398             void *p2;
8399             p = lock_user_string(arg1);
8400             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8401             if (!p || !p2) {
8402                 ret = -TARGET_EFAULT;
8403             } else if (!arg3) {
8404                 /* Short circuit this for the magic exe check. */
8405                 ret = -TARGET_EINVAL;
8406             } else if (is_proc_myself((const char *)p, "exe")) {
8407                 char real[PATH_MAX], *temp;
8408                 temp = realpath(exec_path, real);
8409                 /* Return value is # of bytes that we wrote to the buffer. */
8410                 if (temp == NULL) {
8411                     ret = get_errno(-1);
8412                 } else {
8413                     /* Don't worry about sign mismatch as earlier mapping
8414                      * logic would have thrown a bad address error. */
8415                     ret = MIN(strlen(real), arg3);
8416                     /* We cannot NUL terminate the string. */
8417                     memcpy(p2, real, ret);
8418                 }
8419             } else {
8420                 ret = get_errno(readlink(path(p), p2, arg3));
8421             }
8422             unlock_user(p2, arg2, ret);
8423             unlock_user(p, arg1, 0);
8424         }
8425         return ret;
8426 #endif
8427 #if defined(TARGET_NR_readlinkat)
8428     case TARGET_NR_readlinkat:
8429         {
8430             void *p2;
8431             p  = lock_user_string(arg2);
8432             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8433             if (!p || !p2) {
8434                 ret = -TARGET_EFAULT;
8435             } else if (is_proc_myself((const char *)p, "exe")) {
8436                 char real[PATH_MAX], *temp;
8437                 temp = realpath(exec_path, real);
8438                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8439                 snprintf((char *)p2, arg4, "%s", real);
8440             } else {
8441                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8442             }
8443             unlock_user(p2, arg3, ret);
8444             unlock_user(p, arg2, 0);
8445         }
8446         return ret;
8447 #endif
8448 #ifdef TARGET_NR_swapon
8449     case TARGET_NR_swapon:
8450         if (!(p = lock_user_string(arg1)))
8451             return -TARGET_EFAULT;
8452         ret = get_errno(swapon(p, arg2));
8453         unlock_user(p, arg1, 0);
8454         return ret;
8455 #endif
8456     case TARGET_NR_reboot:
8457         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8458            /* arg4 must be ignored in all other cases */
8459            p = lock_user_string(arg4);
8460            if (!p) {
8461                return -TARGET_EFAULT;
8462            }
8463            ret = get_errno(reboot(arg1, arg2, arg3, p));
8464            unlock_user(p, arg4, 0);
8465         } else {
8466            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8467         }
8468         return ret;
8469 #ifdef TARGET_NR_mmap
8470     case TARGET_NR_mmap:
8471 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8472     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8473     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8474     || defined(TARGET_S390X)
8475         {
8476             abi_ulong *v;
8477             abi_ulong v1, v2, v3, v4, v5, v6;
8478             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8479                 return -TARGET_EFAULT;
8480             v1 = tswapal(v[0]);
8481             v2 = tswapal(v[1]);
8482             v3 = tswapal(v[2]);
8483             v4 = tswapal(v[3]);
8484             v5 = tswapal(v[4]);
8485             v6 = tswapal(v[5]);
8486             unlock_user(v, arg1, 0);
8487             ret = get_errno(target_mmap(v1, v2, v3,
8488                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8489                                         v5, v6));
8490         }
8491 #else
8492         ret = get_errno(target_mmap(arg1, arg2, arg3,
8493                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8494                                     arg5,
8495                                     arg6));
8496 #endif
8497         return ret;
8498 #endif
8499 #ifdef TARGET_NR_mmap2
8500     case TARGET_NR_mmap2:
8501 #ifndef MMAP_SHIFT
8502 #define MMAP_SHIFT 12
8503 #endif
8504         ret = target_mmap(arg1, arg2, arg3,
8505                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8506                           arg5, arg6 << MMAP_SHIFT);
8507         return get_errno(ret);
8508 #endif
8509     case TARGET_NR_munmap:
8510         return get_errno(target_munmap(arg1, arg2));
8511     case TARGET_NR_mprotect:
8512         {
8513             TaskState *ts = cpu->opaque;
8514             /* Special hack to detect libc making the stack executable.  */
8515             if ((arg3 & PROT_GROWSDOWN)
8516                 && arg1 >= ts->info->stack_limit
8517                 && arg1 <= ts->info->start_stack) {
8518                 arg3 &= ~PROT_GROWSDOWN;
8519                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8520                 arg1 = ts->info->stack_limit;
8521             }
8522         }
8523         return get_errno(target_mprotect(arg1, arg2, arg3));
8524 #ifdef TARGET_NR_mremap
8525     case TARGET_NR_mremap:
8526         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8527 #endif
8528         /* ??? msync/mlock/munlock are broken for softmmu.  */
8529 #ifdef TARGET_NR_msync
8530     case TARGET_NR_msync:
8531         return get_errno(msync(g2h(arg1), arg2, arg3));
8532 #endif
8533 #ifdef TARGET_NR_mlock
8534     case TARGET_NR_mlock:
8535         return get_errno(mlock(g2h(arg1), arg2));
8536 #endif
8537 #ifdef TARGET_NR_munlock
8538     case TARGET_NR_munlock:
8539         return get_errno(munlock(g2h(arg1), arg2));
8540 #endif
8541 #ifdef TARGET_NR_mlockall
8542     case TARGET_NR_mlockall:
8543         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8544 #endif
8545 #ifdef TARGET_NR_munlockall
8546     case TARGET_NR_munlockall:
8547         return get_errno(munlockall());
8548 #endif
8549 #ifdef TARGET_NR_truncate
8550     case TARGET_NR_truncate:
8551         if (!(p = lock_user_string(arg1)))
8552             return -TARGET_EFAULT;
8553         ret = get_errno(truncate(p, arg2));
8554         unlock_user(p, arg1, 0);
8555         return ret;
8556 #endif
8557 #ifdef TARGET_NR_ftruncate
8558     case TARGET_NR_ftruncate:
8559         return get_errno(ftruncate(arg1, arg2));
8560 #endif
8561     case TARGET_NR_fchmod:
8562         return get_errno(fchmod(arg1, arg2));
8563 #if defined(TARGET_NR_fchmodat)
8564     case TARGET_NR_fchmodat:
8565         if (!(p = lock_user_string(arg2)))
8566             return -TARGET_EFAULT;
8567         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8568         unlock_user(p, arg2, 0);
8569         return ret;
8570 #endif
8571     case TARGET_NR_getpriority:
8572         /* Note that negative values are valid for getpriority, so we must
8573            differentiate based on errno settings.  */
8574         errno = 0;
8575         ret = getpriority(arg1, arg2);
8576         if (ret == -1 && errno != 0) {
8577             return -host_to_target_errno(errno);
8578         }
8579 #ifdef TARGET_ALPHA
8580         /* Return value is the unbiased priority.  Signal no error.  */
8581         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8582 #else
8583         /* Return value is a biased priority to avoid negative numbers.  */
8584         ret = 20 - ret;
8585 #endif
8586         return ret;
8587     case TARGET_NR_setpriority:
8588         return get_errno(setpriority(arg1, arg2, arg3));
8589 #ifdef TARGET_NR_statfs
8590     case TARGET_NR_statfs:
8591         if (!(p = lock_user_string(arg1))) {
8592             return -TARGET_EFAULT;
8593         }
8594         ret = get_errno(statfs(path(p), &stfs));
8595         unlock_user(p, arg1, 0);
8596     convert_statfs:
8597         if (!is_error(ret)) {
8598             struct target_statfs *target_stfs;
8599 
8600             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8601                 return -TARGET_EFAULT;
8602             __put_user(stfs.f_type, &target_stfs->f_type);
8603             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8604             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8605             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8606             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8607             __put_user(stfs.f_files, &target_stfs->f_files);
8608             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8609             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8610             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8611             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8612             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8613 #ifdef _STATFS_F_FLAGS
8614             __put_user(stfs.f_flags, &target_stfs->f_flags);
8615 #else
8616             __put_user(0, &target_stfs->f_flags);
8617 #endif
8618             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8619             unlock_user_struct(target_stfs, arg2, 1);
8620         }
8621         return ret;
8622 #endif
8623 #ifdef TARGET_NR_fstatfs
8624     case TARGET_NR_fstatfs:
8625         ret = get_errno(fstatfs(arg1, &stfs));
8626         goto convert_statfs;
8627 #endif
8628 #ifdef TARGET_NR_statfs64
8629     case TARGET_NR_statfs64:
8630         if (!(p = lock_user_string(arg1))) {
8631             return -TARGET_EFAULT;
8632         }
8633         ret = get_errno(statfs(path(p), &stfs));
8634         unlock_user(p, arg1, 0);
8635     convert_statfs64:
8636         if (!is_error(ret)) {
8637             struct target_statfs64 *target_stfs;
8638 
8639             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8640                 return -TARGET_EFAULT;
8641             __put_user(stfs.f_type, &target_stfs->f_type);
8642             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8643             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8644             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8645             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8646             __put_user(stfs.f_files, &target_stfs->f_files);
8647             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8648             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8649             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8650             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8651             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8652             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8653             unlock_user_struct(target_stfs, arg3, 1);
8654         }
8655         return ret;
8656     case TARGET_NR_fstatfs64:
8657         ret = get_errno(fstatfs(arg1, &stfs));
8658         goto convert_statfs64;
8659 #endif
8660 #ifdef TARGET_NR_socketcall
8661     case TARGET_NR_socketcall:
8662         return do_socketcall(arg1, arg2);
8663 #endif
8664 #ifdef TARGET_NR_accept
8665     case TARGET_NR_accept:
8666         return do_accept4(arg1, arg2, arg3, 0);
8667 #endif
8668 #ifdef TARGET_NR_accept4
8669     case TARGET_NR_accept4:
8670         return do_accept4(arg1, arg2, arg3, arg4);
8671 #endif
8672 #ifdef TARGET_NR_bind
8673     case TARGET_NR_bind:
8674         return do_bind(arg1, arg2, arg3);
8675 #endif
8676 #ifdef TARGET_NR_connect
8677     case TARGET_NR_connect:
8678         return do_connect(arg1, arg2, arg3);
8679 #endif
8680 #ifdef TARGET_NR_getpeername
8681     case TARGET_NR_getpeername:
8682         return do_getpeername(arg1, arg2, arg3);
8683 #endif
8684 #ifdef TARGET_NR_getsockname
8685     case TARGET_NR_getsockname:
8686         return do_getsockname(arg1, arg2, arg3);
8687 #endif
8688 #ifdef TARGET_NR_getsockopt
8689     case TARGET_NR_getsockopt:
8690         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8691 #endif
8692 #ifdef TARGET_NR_listen
8693     case TARGET_NR_listen:
8694         return get_errno(listen(arg1, arg2));
8695 #endif
8696 #ifdef TARGET_NR_recv
8697     case TARGET_NR_recv:
8698         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8699 #endif
8700 #ifdef TARGET_NR_recvfrom
8701     case TARGET_NR_recvfrom:
8702         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8703 #endif
8704 #ifdef TARGET_NR_recvmsg
8705     case TARGET_NR_recvmsg:
8706         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8707 #endif
8708 #ifdef TARGET_NR_send
8709     case TARGET_NR_send:
8710         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8711 #endif
8712 #ifdef TARGET_NR_sendmsg
8713     case TARGET_NR_sendmsg:
8714         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8715 #endif
8716 #ifdef TARGET_NR_sendmmsg
8717     case TARGET_NR_sendmmsg:
8718         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8719     case TARGET_NR_recvmmsg:
8720         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8721 #endif
8722 #ifdef TARGET_NR_sendto
8723     case TARGET_NR_sendto:
8724         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8725 #endif
8726 #ifdef TARGET_NR_shutdown
8727     case TARGET_NR_shutdown:
8728         return get_errno(shutdown(arg1, arg2));
8729 #endif
8730 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8731     case TARGET_NR_getrandom:
8732         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8733         if (!p) {
8734             return -TARGET_EFAULT;
8735         }
8736         ret = get_errno(getrandom(p, arg2, arg3));
8737         unlock_user(p, arg1, ret);
8738         return ret;
8739 #endif
8740 #ifdef TARGET_NR_socket
8741     case TARGET_NR_socket:
8742         return do_socket(arg1, arg2, arg3);
8743 #endif
8744 #ifdef TARGET_NR_socketpair
8745     case TARGET_NR_socketpair:
8746         return do_socketpair(arg1, arg2, arg3, arg4);
8747 #endif
8748 #ifdef TARGET_NR_setsockopt
8749     case TARGET_NR_setsockopt:
8750         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8751 #endif
8752 #if defined(TARGET_NR_syslog)
8753     case TARGET_NR_syslog:
8754         {
8755             int len = arg2;
8756 
8757             switch (arg1) {
8758             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8759             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8760             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8761             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8762             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8763             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8764             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8765             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8766                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8767             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8768             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8769             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8770                 {
8771                     if (len < 0) {
8772                         return -TARGET_EINVAL;
8773                     }
8774                     if (len == 0) {
8775                         return 0;
8776                     }
8777                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8778                     if (!p) {
8779                         return -TARGET_EFAULT;
8780                     }
8781                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8782                     unlock_user(p, arg2, arg3);
8783                 }
8784                 return ret;
8785             default:
8786                 return -TARGET_EINVAL;
8787             }
8788         }
8789         break;
8790 #endif
8791     case TARGET_NR_setitimer:
8792         {
8793             struct itimerval value, ovalue, *pvalue;
8794 
8795             if (arg2) {
8796                 pvalue = &value;
8797                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8798                     || copy_from_user_timeval(&pvalue->it_value,
8799                                               arg2 + sizeof(struct target_timeval)))
8800                     return -TARGET_EFAULT;
8801             } else {
8802                 pvalue = NULL;
8803             }
8804             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8805             if (!is_error(ret) && arg3) {
8806                 if (copy_to_user_timeval(arg3,
8807                                          &ovalue.it_interval)
8808                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8809                                             &ovalue.it_value))
8810                     return -TARGET_EFAULT;
8811             }
8812         }
8813         return ret;
8814     case TARGET_NR_getitimer:
8815         {
8816             struct itimerval value;
8817 
8818             ret = get_errno(getitimer(arg1, &value));
8819             if (!is_error(ret) && arg2) {
8820                 if (copy_to_user_timeval(arg2,
8821                                          &value.it_interval)
8822                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8823                                             &value.it_value))
8824                     return -TARGET_EFAULT;
8825             }
8826         }
8827         return ret;
8828 #ifdef TARGET_NR_stat
8829     case TARGET_NR_stat:
8830         if (!(p = lock_user_string(arg1))) {
8831             return -TARGET_EFAULT;
8832         }
8833         ret = get_errno(stat(path(p), &st));
8834         unlock_user(p, arg1, 0);
8835         goto do_stat;
8836 #endif
8837 #ifdef TARGET_NR_lstat
8838     case TARGET_NR_lstat:
8839         if (!(p = lock_user_string(arg1))) {
8840             return -TARGET_EFAULT;
8841         }
8842         ret = get_errno(lstat(path(p), &st));
8843         unlock_user(p, arg1, 0);
8844         goto do_stat;
8845 #endif
8846 #ifdef TARGET_NR_fstat
8847     case TARGET_NR_fstat:
8848         {
8849             ret = get_errno(fstat(arg1, &st));
8850 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8851         do_stat:
8852 #endif
8853             if (!is_error(ret)) {
8854                 struct target_stat *target_st;
8855 
8856                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8857                     return -TARGET_EFAULT;
8858                 memset(target_st, 0, sizeof(*target_st));
8859                 __put_user(st.st_dev, &target_st->st_dev);
8860                 __put_user(st.st_ino, &target_st->st_ino);
8861                 __put_user(st.st_mode, &target_st->st_mode);
8862                 __put_user(st.st_uid, &target_st->st_uid);
8863                 __put_user(st.st_gid, &target_st->st_gid);
8864                 __put_user(st.st_nlink, &target_st->st_nlink);
8865                 __put_user(st.st_rdev, &target_st->st_rdev);
8866                 __put_user(st.st_size, &target_st->st_size);
8867                 __put_user(st.st_blksize, &target_st->st_blksize);
8868                 __put_user(st.st_blocks, &target_st->st_blocks);
8869                 __put_user(st.st_atime, &target_st->target_st_atime);
8870                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8871                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8872                 unlock_user_struct(target_st, arg2, 1);
8873             }
8874         }
8875         return ret;
8876 #endif
8877     case TARGET_NR_vhangup:
8878         return get_errno(vhangup());
8879 #ifdef TARGET_NR_syscall
8880     case TARGET_NR_syscall:
8881         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8882                           arg6, arg7, arg8, 0);
8883 #endif
8884     case TARGET_NR_wait4:
8885         {
8886             int status;
8887             abi_long status_ptr = arg2;
8888             struct rusage rusage, *rusage_ptr;
8889             abi_ulong target_rusage = arg4;
8890             abi_long rusage_err;
8891             if (target_rusage)
8892                 rusage_ptr = &rusage;
8893             else
8894                 rusage_ptr = NULL;
8895             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8896             if (!is_error(ret)) {
8897                 if (status_ptr && ret) {
8898                     status = host_to_target_waitstatus(status);
8899                     if (put_user_s32(status, status_ptr))
8900                         return -TARGET_EFAULT;
8901                 }
8902                 if (target_rusage) {
8903                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8904                     if (rusage_err) {
8905                         ret = rusage_err;
8906                     }
8907                 }
8908             }
8909         }
8910         return ret;
8911 #ifdef TARGET_NR_swapoff
8912     case TARGET_NR_swapoff:
8913         if (!(p = lock_user_string(arg1)))
8914             return -TARGET_EFAULT;
8915         ret = get_errno(swapoff(p));
8916         unlock_user(p, arg1, 0);
8917         return ret;
8918 #endif
8919     case TARGET_NR_sysinfo:
8920         {
8921             struct target_sysinfo *target_value;
8922             struct sysinfo value;
8923             ret = get_errno(sysinfo(&value));
8924             if (!is_error(ret) && arg1)
8925             {
8926                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8927                     return -TARGET_EFAULT;
8928                 __put_user(value.uptime, &target_value->uptime);
8929                 __put_user(value.loads[0], &target_value->loads[0]);
8930                 __put_user(value.loads[1], &target_value->loads[1]);
8931                 __put_user(value.loads[2], &target_value->loads[2]);
8932                 __put_user(value.totalram, &target_value->totalram);
8933                 __put_user(value.freeram, &target_value->freeram);
8934                 __put_user(value.sharedram, &target_value->sharedram);
8935                 __put_user(value.bufferram, &target_value->bufferram);
8936                 __put_user(value.totalswap, &target_value->totalswap);
8937                 __put_user(value.freeswap, &target_value->freeswap);
8938                 __put_user(value.procs, &target_value->procs);
8939                 __put_user(value.totalhigh, &target_value->totalhigh);
8940                 __put_user(value.freehigh, &target_value->freehigh);
8941                 __put_user(value.mem_unit, &target_value->mem_unit);
8942                 unlock_user_struct(target_value, arg1, 1);
8943             }
8944         }
8945         return ret;
8946 #ifdef TARGET_NR_ipc
8947     case TARGET_NR_ipc:
8948         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8949 #endif
8950 #ifdef TARGET_NR_semget
8951     case TARGET_NR_semget:
8952         return get_errno(semget(arg1, arg2, arg3));
8953 #endif
8954 #ifdef TARGET_NR_semop
8955     case TARGET_NR_semop:
8956         return do_semop(arg1, arg2, arg3);
8957 #endif
8958 #ifdef TARGET_NR_semctl
8959     case TARGET_NR_semctl:
8960         return do_semctl(arg1, arg2, arg3, arg4);
8961 #endif
8962 #ifdef TARGET_NR_msgctl
8963     case TARGET_NR_msgctl:
8964         return do_msgctl(arg1, arg2, arg3);
8965 #endif
8966 #ifdef TARGET_NR_msgget
8967     case TARGET_NR_msgget:
8968         return get_errno(msgget(arg1, arg2));
8969 #endif
8970 #ifdef TARGET_NR_msgrcv
8971     case TARGET_NR_msgrcv:
8972         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8973 #endif
8974 #ifdef TARGET_NR_msgsnd
8975     case TARGET_NR_msgsnd:
8976         return do_msgsnd(arg1, arg2, arg3, arg4);
8977 #endif
8978 #ifdef TARGET_NR_shmget
8979     case TARGET_NR_shmget:
8980         return get_errno(shmget(arg1, arg2, arg3));
8981 #endif
8982 #ifdef TARGET_NR_shmctl
8983     case TARGET_NR_shmctl:
8984         return do_shmctl(arg1, arg2, arg3);
8985 #endif
8986 #ifdef TARGET_NR_shmat
8987     case TARGET_NR_shmat:
8988         return do_shmat(cpu_env, arg1, arg2, arg3);
8989 #endif
8990 #ifdef TARGET_NR_shmdt
8991     case TARGET_NR_shmdt:
8992         return do_shmdt(arg1);
8993 #endif
8994     case TARGET_NR_fsync:
8995         return get_errno(fsync(arg1));
8996     case TARGET_NR_clone:
8997         /* Linux manages to have three different orderings for its
8998          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8999          * match the kernel's CONFIG_CLONE_* settings.
9000          * Microblaze is further special in that it uses a sixth
9001          * implicit argument to clone for the TLS pointer.
9002          */
9003 #if defined(TARGET_MICROBLAZE)
9004         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9005 #elif defined(TARGET_CLONE_BACKWARDS)
9006         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9007 #elif defined(TARGET_CLONE_BACKWARDS2)
9008         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9009 #else
9010         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9011 #endif
9012         return ret;
9013 #ifdef __NR_exit_group
9014         /* new thread calls */
9015     case TARGET_NR_exit_group:
9016         preexit_cleanup(cpu_env, arg1);
9017         return get_errno(exit_group(arg1));
9018 #endif
9019     case TARGET_NR_setdomainname:
9020         if (!(p = lock_user_string(arg1)))
9021             return -TARGET_EFAULT;
9022         ret = get_errno(setdomainname(p, arg2));
9023         unlock_user(p, arg1, 0);
9024         return ret;
9025     case TARGET_NR_uname:
9026         /* no need to transcode because we use the linux syscall */
9027         {
9028             struct new_utsname * buf;
9029 
9030             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9031                 return -TARGET_EFAULT;
9032             ret = get_errno(sys_uname(buf));
9033             if (!is_error(ret)) {
9034                 /* Overwrite the native machine name with whatever is being
9035                    emulated. */
9036                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9037                           sizeof(buf->machine));
9038                 /* Allow the user to override the reported release.  */
9039                 if (qemu_uname_release && *qemu_uname_release) {
9040                     g_strlcpy(buf->release, qemu_uname_release,
9041                               sizeof(buf->release));
9042                 }
9043             }
9044             unlock_user_struct(buf, arg1, 1);
9045         }
9046         return ret;
9047 #ifdef TARGET_I386
9048     case TARGET_NR_modify_ldt:
9049         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9050 #if !defined(TARGET_X86_64)
9051     case TARGET_NR_vm86:
9052         return do_vm86(cpu_env, arg1, arg2);
9053 #endif
9054 #endif
9055     case TARGET_NR_adjtimex:
9056         {
9057             struct timex host_buf;
9058 
9059             if (target_to_host_timex(&host_buf, arg1) != 0) {
9060                 return -TARGET_EFAULT;
9061             }
9062             ret = get_errno(adjtimex(&host_buf));
9063             if (!is_error(ret)) {
9064                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9065                     return -TARGET_EFAULT;
9066                 }
9067             }
9068         }
9069         return ret;
9070 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9071     case TARGET_NR_clock_adjtime:
9072         {
9073             struct timex htx, *phtx = &htx;
9074 
9075             if (target_to_host_timex(phtx, arg2) != 0) {
9076                 return -TARGET_EFAULT;
9077             }
9078             ret = get_errno(clock_adjtime(arg1, phtx));
9079             if (!is_error(ret) && phtx) {
9080                 if (host_to_target_timex(arg2, phtx) != 0) {
9081                     return -TARGET_EFAULT;
9082                 }
9083             }
9084         }
9085         return ret;
9086 #endif
9087     case TARGET_NR_getpgid:
9088         return get_errno(getpgid(arg1));
9089     case TARGET_NR_fchdir:
9090         return get_errno(fchdir(arg1));
9091     case TARGET_NR_personality:
9092         return get_errno(personality(arg1));
9093 #ifdef TARGET_NR__llseek /* Not on alpha */
9094     case TARGET_NR__llseek:
9095         {
9096             int64_t res;
9097 #if !defined(__NR_llseek)
9098             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9099             if (res == -1) {
9100                 ret = get_errno(res);
9101             } else {
9102                 ret = 0;
9103             }
9104 #else
9105             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9106 #endif
9107             if ((ret == 0) && put_user_s64(res, arg4)) {
9108                 return -TARGET_EFAULT;
9109             }
9110         }
9111         return ret;
9112 #endif
9113 #ifdef TARGET_NR_getdents
9114     case TARGET_NR_getdents:
9115 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9116 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9117         {
9118             struct target_dirent *target_dirp;
9119             struct linux_dirent *dirp;
9120             abi_long count = arg3;
9121 
9122             dirp = g_try_malloc(count);
9123             if (!dirp) {
9124                 return -TARGET_ENOMEM;
9125             }
9126 
9127             ret = get_errno(sys_getdents(arg1, dirp, count));
9128             if (!is_error(ret)) {
9129                 struct linux_dirent *de;
9130 		struct target_dirent *tde;
9131                 int len = ret;
9132                 int reclen, treclen;
9133 		int count1, tnamelen;
9134 
9135 		count1 = 0;
9136                 de = dirp;
9137                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9138                     return -TARGET_EFAULT;
9139 		tde = target_dirp;
9140                 while (len > 0) {
9141                     reclen = de->d_reclen;
9142                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9143                     assert(tnamelen >= 0);
9144                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9145                     assert(count1 + treclen <= count);
9146                     tde->d_reclen = tswap16(treclen);
9147                     tde->d_ino = tswapal(de->d_ino);
9148                     tde->d_off = tswapal(de->d_off);
9149                     memcpy(tde->d_name, de->d_name, tnamelen);
9150                     de = (struct linux_dirent *)((char *)de + reclen);
9151                     len -= reclen;
9152                     tde = (struct target_dirent *)((char *)tde + treclen);
9153 		    count1 += treclen;
9154                 }
9155 		ret = count1;
9156                 unlock_user(target_dirp, arg2, ret);
9157             }
9158             g_free(dirp);
9159         }
9160 #else
9161         {
9162             struct linux_dirent *dirp;
9163             abi_long count = arg3;
9164 
9165             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9166                 return -TARGET_EFAULT;
9167             ret = get_errno(sys_getdents(arg1, dirp, count));
9168             if (!is_error(ret)) {
9169                 struct linux_dirent *de;
9170                 int len = ret;
9171                 int reclen;
9172                 de = dirp;
9173                 while (len > 0) {
9174                     reclen = de->d_reclen;
9175                     if (reclen > len)
9176                         break;
9177                     de->d_reclen = tswap16(reclen);
9178                     tswapls(&de->d_ino);
9179                     tswapls(&de->d_off);
9180                     de = (struct linux_dirent *)((char *)de + reclen);
9181                     len -= reclen;
9182                 }
9183             }
9184             unlock_user(dirp, arg2, ret);
9185         }
9186 #endif
9187 #else
9188         /* Implement getdents in terms of getdents64 */
9189         {
9190             struct linux_dirent64 *dirp;
9191             abi_long count = arg3;
9192 
9193             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9194             if (!dirp) {
9195                 return -TARGET_EFAULT;
9196             }
9197             ret = get_errno(sys_getdents64(arg1, dirp, count));
9198             if (!is_error(ret)) {
9199                 /* Convert the dirent64 structs to target dirent.  We do this
9200                  * in-place, since we can guarantee that a target_dirent is no
9201                  * larger than a dirent64; however this means we have to be
9202                  * careful to read everything before writing in the new format.
9203                  */
9204                 struct linux_dirent64 *de;
9205                 struct target_dirent *tde;
9206                 int len = ret;
9207                 int tlen = 0;
9208 
9209                 de = dirp;
9210                 tde = (struct target_dirent *)dirp;
9211                 while (len > 0) {
9212                     int namelen, treclen;
9213                     int reclen = de->d_reclen;
9214                     uint64_t ino = de->d_ino;
9215                     int64_t off = de->d_off;
9216                     uint8_t type = de->d_type;
9217 
9218                     namelen = strlen(de->d_name);
9219                     treclen = offsetof(struct target_dirent, d_name)
9220                         + namelen + 2;
9221                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9222 
9223                     memmove(tde->d_name, de->d_name, namelen + 1);
9224                     tde->d_ino = tswapal(ino);
9225                     tde->d_off = tswapal(off);
9226                     tde->d_reclen = tswap16(treclen);
9227                     /* The target_dirent type is in what was formerly a padding
9228                      * byte at the end of the structure:
9229                      */
9230                     *(((char *)tde) + treclen - 1) = type;
9231 
9232                     de = (struct linux_dirent64 *)((char *)de + reclen);
9233                     tde = (struct target_dirent *)((char *)tde + treclen);
9234                     len -= reclen;
9235                     tlen += treclen;
9236                 }
9237                 ret = tlen;
9238             }
9239             unlock_user(dirp, arg2, ret);
9240         }
9241 #endif
9242         return ret;
9243 #endif /* TARGET_NR_getdents */
9244 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9245     case TARGET_NR_getdents64:
9246         {
9247             struct linux_dirent64 *dirp;
9248             abi_long count = arg3;
9249             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9250                 return -TARGET_EFAULT;
9251             ret = get_errno(sys_getdents64(arg1, dirp, count));
9252             if (!is_error(ret)) {
9253                 struct linux_dirent64 *de;
9254                 int len = ret;
9255                 int reclen;
9256                 de = dirp;
9257                 while (len > 0) {
9258                     reclen = de->d_reclen;
9259                     if (reclen > len)
9260                         break;
9261                     de->d_reclen = tswap16(reclen);
9262                     tswap64s((uint64_t *)&de->d_ino);
9263                     tswap64s((uint64_t *)&de->d_off);
9264                     de = (struct linux_dirent64 *)((char *)de + reclen);
9265                     len -= reclen;
9266                 }
9267             }
9268             unlock_user(dirp, arg2, ret);
9269         }
9270         return ret;
9271 #endif /* TARGET_NR_getdents64 */
9272 #if defined(TARGET_NR__newselect)
9273     case TARGET_NR__newselect:
9274         return do_select(arg1, arg2, arg3, arg4, arg5);
9275 #endif
9276 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9277 # ifdef TARGET_NR_poll
9278     case TARGET_NR_poll:
9279 # endif
9280 # ifdef TARGET_NR_ppoll
9281     case TARGET_NR_ppoll:
9282 # endif
9283         {
9284             struct target_pollfd *target_pfd;
9285             unsigned int nfds = arg2;
9286             struct pollfd *pfd;
9287             unsigned int i;
9288 
9289             pfd = NULL;
9290             target_pfd = NULL;
9291             if (nfds) {
9292                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9293                     return -TARGET_EINVAL;
9294                 }
9295 
9296                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9297                                        sizeof(struct target_pollfd) * nfds, 1);
9298                 if (!target_pfd) {
9299                     return -TARGET_EFAULT;
9300                 }
9301 
9302                 pfd = alloca(sizeof(struct pollfd) * nfds);
9303                 for (i = 0; i < nfds; i++) {
9304                     pfd[i].fd = tswap32(target_pfd[i].fd);
9305                     pfd[i].events = tswap16(target_pfd[i].events);
9306                 }
9307             }
9308 
9309             switch (num) {
9310 # ifdef TARGET_NR_ppoll
9311             case TARGET_NR_ppoll:
9312             {
9313                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9314                 target_sigset_t *target_set;
9315                 sigset_t _set, *set = &_set;
9316 
9317                 if (arg3) {
9318                     if (target_to_host_timespec(timeout_ts, arg3)) {
9319                         unlock_user(target_pfd, arg1, 0);
9320                         return -TARGET_EFAULT;
9321                     }
9322                 } else {
9323                     timeout_ts = NULL;
9324                 }
9325 
9326                 if (arg4) {
9327                     if (arg5 != sizeof(target_sigset_t)) {
9328                         unlock_user(target_pfd, arg1, 0);
9329                         return -TARGET_EINVAL;
9330                     }
9331 
9332                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9333                     if (!target_set) {
9334                         unlock_user(target_pfd, arg1, 0);
9335                         return -TARGET_EFAULT;
9336                     }
9337                     target_to_host_sigset(set, target_set);
9338                 } else {
9339                     set = NULL;
9340                 }
9341 
9342                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9343                                            set, SIGSET_T_SIZE));
9344 
9345                 if (!is_error(ret) && arg3) {
9346                     host_to_target_timespec(arg3, timeout_ts);
9347                 }
9348                 if (arg4) {
9349                     unlock_user(target_set, arg4, 0);
9350                 }
9351                 break;
9352             }
9353 # endif
9354 # ifdef TARGET_NR_poll
9355             case TARGET_NR_poll:
9356             {
9357                 struct timespec ts, *pts;
9358 
9359                 if (arg3 >= 0) {
9360                     /* Convert ms to secs, ns */
9361                     ts.tv_sec = arg3 / 1000;
9362                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9363                     pts = &ts;
9364                 } else {
9365                     /* -ve poll() timeout means "infinite" */
9366                     pts = NULL;
9367                 }
9368                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9369                 break;
9370             }
9371 # endif
9372             default:
9373                 g_assert_not_reached();
9374             }
9375 
9376             if (!is_error(ret)) {
9377                 for(i = 0; i < nfds; i++) {
9378                     target_pfd[i].revents = tswap16(pfd[i].revents);
9379                 }
9380             }
9381             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9382         }
9383         return ret;
9384 #endif
9385     case TARGET_NR_flock:
9386         /* NOTE: the flock constant seems to be the same for every
9387            Linux platform */
9388         return get_errno(safe_flock(arg1, arg2));
9389     case TARGET_NR_readv:
9390         {
9391             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9392             if (vec != NULL) {
9393                 ret = get_errno(safe_readv(arg1, vec, arg3));
9394                 unlock_iovec(vec, arg2, arg3, 1);
9395             } else {
9396                 ret = -host_to_target_errno(errno);
9397             }
9398         }
9399         return ret;
9400     case TARGET_NR_writev:
9401         {
9402             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9403             if (vec != NULL) {
9404                 ret = get_errno(safe_writev(arg1, vec, arg3));
9405                 unlock_iovec(vec, arg2, arg3, 0);
9406             } else {
9407                 ret = -host_to_target_errno(errno);
9408             }
9409         }
9410         return ret;
9411 #if defined(TARGET_NR_preadv)
9412     case TARGET_NR_preadv:
9413         {
9414             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9415             if (vec != NULL) {
9416                 unsigned long low, high;
9417 
9418                 target_to_host_low_high(arg4, arg5, &low, &high);
9419                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9420                 unlock_iovec(vec, arg2, arg3, 1);
9421             } else {
9422                 ret = -host_to_target_errno(errno);
9423            }
9424         }
9425         return ret;
9426 #endif
9427 #if defined(TARGET_NR_pwritev)
9428     case TARGET_NR_pwritev:
9429         {
9430             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9431             if (vec != NULL) {
9432                 unsigned long low, high;
9433 
9434                 target_to_host_low_high(arg4, arg5, &low, &high);
9435                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9436                 unlock_iovec(vec, arg2, arg3, 0);
9437             } else {
9438                 ret = -host_to_target_errno(errno);
9439            }
9440         }
9441         return ret;
9442 #endif
9443     case TARGET_NR_getsid:
9444         return get_errno(getsid(arg1));
9445 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9446     case TARGET_NR_fdatasync:
9447         return get_errno(fdatasync(arg1));
9448 #endif
9449 #ifdef TARGET_NR__sysctl
9450     case TARGET_NR__sysctl:
9451         /* We don't implement this, but ENOTDIR is always a safe
9452            return value. */
9453         return -TARGET_ENOTDIR;
9454 #endif
9455     case TARGET_NR_sched_getaffinity:
9456         {
9457             unsigned int mask_size;
9458             unsigned long *mask;
9459 
9460             /*
9461              * sched_getaffinity needs multiples of ulong, so need to take
9462              * care of mismatches between target ulong and host ulong sizes.
9463              */
9464             if (arg2 & (sizeof(abi_ulong) - 1)) {
9465                 return -TARGET_EINVAL;
9466             }
9467             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9468 
9469             mask = alloca(mask_size);
9470             memset(mask, 0, mask_size);
9471             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9472 
9473             if (!is_error(ret)) {
9474                 if (ret > arg2) {
9475                     /* More data returned than the caller's buffer will fit.
9476                      * This only happens if sizeof(abi_long) < sizeof(long)
9477                      * and the caller passed us a buffer holding an odd number
9478                      * of abi_longs. If the host kernel is actually using the
9479                      * extra 4 bytes then fail EINVAL; otherwise we can just
9480                      * ignore them and only copy the interesting part.
9481                      */
9482                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9483                     if (numcpus > arg2 * 8) {
9484                         return -TARGET_EINVAL;
9485                     }
9486                     ret = arg2;
9487                 }
9488 
9489                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9490                     return -TARGET_EFAULT;
9491                 }
9492             }
9493         }
9494         return ret;
9495     case TARGET_NR_sched_setaffinity:
9496         {
9497             unsigned int mask_size;
9498             unsigned long *mask;
9499 
9500             /*
9501              * sched_setaffinity needs multiples of ulong, so need to take
9502              * care of mismatches between target ulong and host ulong sizes.
9503              */
9504             if (arg2 & (sizeof(abi_ulong) - 1)) {
9505                 return -TARGET_EINVAL;
9506             }
9507             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9508             mask = alloca(mask_size);
9509 
9510             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9511             if (ret) {
9512                 return ret;
9513             }
9514 
9515             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9516         }
9517     case TARGET_NR_getcpu:
9518         {
9519             unsigned cpu, node;
9520             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9521                                        arg2 ? &node : NULL,
9522                                        NULL));
9523             if (is_error(ret)) {
9524                 return ret;
9525             }
9526             if (arg1 && put_user_u32(cpu, arg1)) {
9527                 return -TARGET_EFAULT;
9528             }
9529             if (arg2 && put_user_u32(node, arg2)) {
9530                 return -TARGET_EFAULT;
9531             }
9532         }
9533         return ret;
9534     case TARGET_NR_sched_setparam:
9535         {
9536             struct sched_param *target_schp;
9537             struct sched_param schp;
9538 
9539             if (arg2 == 0) {
9540                 return -TARGET_EINVAL;
9541             }
9542             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9543                 return -TARGET_EFAULT;
9544             schp.sched_priority = tswap32(target_schp->sched_priority);
9545             unlock_user_struct(target_schp, arg2, 0);
9546             return get_errno(sched_setparam(arg1, &schp));
9547         }
9548     case TARGET_NR_sched_getparam:
9549         {
9550             struct sched_param *target_schp;
9551             struct sched_param schp;
9552 
9553             if (arg2 == 0) {
9554                 return -TARGET_EINVAL;
9555             }
9556             ret = get_errno(sched_getparam(arg1, &schp));
9557             if (!is_error(ret)) {
9558                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9559                     return -TARGET_EFAULT;
9560                 target_schp->sched_priority = tswap32(schp.sched_priority);
9561                 unlock_user_struct(target_schp, arg2, 1);
9562             }
9563         }
9564         return ret;
9565     case TARGET_NR_sched_setscheduler:
9566         {
9567             struct sched_param *target_schp;
9568             struct sched_param schp;
9569             if (arg3 == 0) {
9570                 return -TARGET_EINVAL;
9571             }
9572             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9573                 return -TARGET_EFAULT;
9574             schp.sched_priority = tswap32(target_schp->sched_priority);
9575             unlock_user_struct(target_schp, arg3, 0);
9576             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9577         }
9578     case TARGET_NR_sched_getscheduler:
9579         return get_errno(sched_getscheduler(arg1));
9580     case TARGET_NR_sched_yield:
9581         return get_errno(sched_yield());
9582     case TARGET_NR_sched_get_priority_max:
9583         return get_errno(sched_get_priority_max(arg1));
9584     case TARGET_NR_sched_get_priority_min:
9585         return get_errno(sched_get_priority_min(arg1));
9586     case TARGET_NR_sched_rr_get_interval:
9587         {
9588             struct timespec ts;
9589             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9590             if (!is_error(ret)) {
9591                 ret = host_to_target_timespec(arg2, &ts);
9592             }
9593         }
9594         return ret;
9595     case TARGET_NR_nanosleep:
9596         {
9597             struct timespec req, rem;
9598             target_to_host_timespec(&req, arg1);
9599             ret = get_errno(safe_nanosleep(&req, &rem));
9600             if (is_error(ret) && arg2) {
9601                 host_to_target_timespec(arg2, &rem);
9602             }
9603         }
9604         return ret;
9605     case TARGET_NR_prctl:
9606         switch (arg1) {
9607         case PR_GET_PDEATHSIG:
9608         {
9609             int deathsig;
9610             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9611             if (!is_error(ret) && arg2
9612                 && put_user_ual(deathsig, arg2)) {
9613                 return -TARGET_EFAULT;
9614             }
9615             return ret;
9616         }
9617 #ifdef PR_GET_NAME
9618         case PR_GET_NAME:
9619         {
9620             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9621             if (!name) {
9622                 return -TARGET_EFAULT;
9623             }
9624             ret = get_errno(prctl(arg1, (unsigned long)name,
9625                                   arg3, arg4, arg5));
9626             unlock_user(name, arg2, 16);
9627             return ret;
9628         }
9629         case PR_SET_NAME:
9630         {
9631             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9632             if (!name) {
9633                 return -TARGET_EFAULT;
9634             }
9635             ret = get_errno(prctl(arg1, (unsigned long)name,
9636                                   arg3, arg4, arg5));
9637             unlock_user(name, arg2, 0);
9638             return ret;
9639         }
9640 #endif
9641 #ifdef TARGET_MIPS
9642         case TARGET_PR_GET_FP_MODE:
9643         {
9644             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9645             ret = 0;
9646             if (env->CP0_Status & (1 << CP0St_FR)) {
9647                 ret |= TARGET_PR_FP_MODE_FR;
9648             }
9649             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9650                 ret |= TARGET_PR_FP_MODE_FRE;
9651             }
9652             return ret;
9653         }
9654         case TARGET_PR_SET_FP_MODE:
9655         {
9656             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9657             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9658             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9659             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9660             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9661 
9662             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9663                                             TARGET_PR_FP_MODE_FRE;
9664 
9665             /* If nothing to change, return right away, successfully.  */
9666             if (old_fr == new_fr && old_fre == new_fre) {
9667                 return 0;
9668             }
9669             /* Check the value is valid */
9670             if (arg2 & ~known_bits) {
9671                 return -TARGET_EOPNOTSUPP;
9672             }
9673             /* Setting FRE without FR is not supported.  */
9674             if (new_fre && !new_fr) {
9675                 return -TARGET_EOPNOTSUPP;
9676             }
9677             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9678                 /* FR1 is not supported */
9679                 return -TARGET_EOPNOTSUPP;
9680             }
9681             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9682                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9683                 /* cannot set FR=0 */
9684                 return -TARGET_EOPNOTSUPP;
9685             }
9686             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9687                 /* Cannot set FRE=1 */
9688                 return -TARGET_EOPNOTSUPP;
9689             }
9690 
9691             int i;
9692             fpr_t *fpr = env->active_fpu.fpr;
9693             for (i = 0; i < 32 ; i += 2) {
9694                 if (!old_fr && new_fr) {
9695                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9696                 } else if (old_fr && !new_fr) {
9697                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9698                 }
9699             }
9700 
9701             if (new_fr) {
9702                 env->CP0_Status |= (1 << CP0St_FR);
9703                 env->hflags |= MIPS_HFLAG_F64;
9704             } else {
9705                 env->CP0_Status &= ~(1 << CP0St_FR);
9706                 env->hflags &= ~MIPS_HFLAG_F64;
9707             }
9708             if (new_fre) {
9709                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9710                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9711                     env->hflags |= MIPS_HFLAG_FRE;
9712                 }
9713             } else {
9714                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9715                 env->hflags &= ~MIPS_HFLAG_FRE;
9716             }
9717 
9718             return 0;
9719         }
9720 #endif /* MIPS */
9721 #ifdef TARGET_AARCH64
9722         case TARGET_PR_SVE_SET_VL:
9723             /*
9724              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9725              * PR_SVE_VL_INHERIT.  Note the kernel definition
9726              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9727              * even though the current architectural maximum is VQ=16.
9728              */
9729             ret = -TARGET_EINVAL;
9730             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9731                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9732                 CPUARMState *env = cpu_env;
9733                 ARMCPU *cpu = arm_env_get_cpu(env);
9734                 uint32_t vq, old_vq;
9735 
9736                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9737                 vq = MAX(arg2 / 16, 1);
9738                 vq = MIN(vq, cpu->sve_max_vq);
9739 
9740                 if (vq < old_vq) {
9741                     aarch64_sve_narrow_vq(env, vq);
9742                 }
9743                 env->vfp.zcr_el[1] = vq - 1;
9744                 ret = vq * 16;
9745             }
9746             return ret;
9747         case TARGET_PR_SVE_GET_VL:
9748             ret = -TARGET_EINVAL;
9749             {
9750                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9751                 if (cpu_isar_feature(aa64_sve, cpu)) {
9752                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9753                 }
9754             }
9755             return ret;
9756         case TARGET_PR_PAC_RESET_KEYS:
9757             {
9758                 CPUARMState *env = cpu_env;
9759                 ARMCPU *cpu = arm_env_get_cpu(env);
9760 
9761                 if (arg3 || arg4 || arg5) {
9762                     return -TARGET_EINVAL;
9763                 }
9764                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9765                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9766                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9767                                TARGET_PR_PAC_APGAKEY);
9768                     if (arg2 == 0) {
9769                         arg2 = all;
9770                     } else if (arg2 & ~all) {
9771                         return -TARGET_EINVAL;
9772                     }
9773                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9774                         arm_init_pauth_key(&env->apia_key);
9775                     }
9776                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9777                         arm_init_pauth_key(&env->apib_key);
9778                     }
9779                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9780                         arm_init_pauth_key(&env->apda_key);
9781                     }
9782                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9783                         arm_init_pauth_key(&env->apdb_key);
9784                     }
9785                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9786                         arm_init_pauth_key(&env->apga_key);
9787                     }
9788                     return 0;
9789                 }
9790             }
9791             return -TARGET_EINVAL;
9792 #endif /* AARCH64 */
9793         case PR_GET_SECCOMP:
9794         case PR_SET_SECCOMP:
9795             /* Disable seccomp to prevent the target disabling syscalls we
9796              * need. */
9797             return -TARGET_EINVAL;
9798         default:
9799             /* Most prctl options have no pointer arguments */
9800             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9801         }
9802         break;
9803 #ifdef TARGET_NR_arch_prctl
9804     case TARGET_NR_arch_prctl:
9805 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9806         return do_arch_prctl(cpu_env, arg1, arg2);
9807 #else
9808 #error unreachable
9809 #endif
9810 #endif
9811 #ifdef TARGET_NR_pread64
9812     case TARGET_NR_pread64:
9813         if (regpairs_aligned(cpu_env, num)) {
9814             arg4 = arg5;
9815             arg5 = arg6;
9816         }
9817         if (arg2 == 0 && arg3 == 0) {
9818             /* Special-case NULL buffer and zero length, which should succeed */
9819             p = 0;
9820         } else {
9821             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9822             if (!p) {
9823                 return -TARGET_EFAULT;
9824             }
9825         }
9826         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9827         unlock_user(p, arg2, ret);
9828         return ret;
9829     case TARGET_NR_pwrite64:
9830         if (regpairs_aligned(cpu_env, num)) {
9831             arg4 = arg5;
9832             arg5 = arg6;
9833         }
9834         if (arg2 == 0 && arg3 == 0) {
9835             /* Special-case NULL buffer and zero length, which should succeed */
9836             p = 0;
9837         } else {
9838             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9839             if (!p) {
9840                 return -TARGET_EFAULT;
9841             }
9842         }
9843         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9844         unlock_user(p, arg2, 0);
9845         return ret;
9846 #endif
9847     case TARGET_NR_getcwd:
9848         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9849             return -TARGET_EFAULT;
9850         ret = get_errno(sys_getcwd1(p, arg2));
9851         unlock_user(p, arg1, ret);
9852         return ret;
9853     case TARGET_NR_capget:
9854     case TARGET_NR_capset:
9855     {
9856         struct target_user_cap_header *target_header;
9857         struct target_user_cap_data *target_data = NULL;
9858         struct __user_cap_header_struct header;
9859         struct __user_cap_data_struct data[2];
9860         struct __user_cap_data_struct *dataptr = NULL;
9861         int i, target_datalen;
9862         int data_items = 1;
9863 
9864         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9865             return -TARGET_EFAULT;
9866         }
9867         header.version = tswap32(target_header->version);
9868         header.pid = tswap32(target_header->pid);
9869 
9870         if (header.version != _LINUX_CAPABILITY_VERSION) {
9871             /* Version 2 and up takes pointer to two user_data structs */
9872             data_items = 2;
9873         }
9874 
9875         target_datalen = sizeof(*target_data) * data_items;
9876 
9877         if (arg2) {
9878             if (num == TARGET_NR_capget) {
9879                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9880             } else {
9881                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9882             }
9883             if (!target_data) {
9884                 unlock_user_struct(target_header, arg1, 0);
9885                 return -TARGET_EFAULT;
9886             }
9887 
9888             if (num == TARGET_NR_capset) {
9889                 for (i = 0; i < data_items; i++) {
9890                     data[i].effective = tswap32(target_data[i].effective);
9891                     data[i].permitted = tswap32(target_data[i].permitted);
9892                     data[i].inheritable = tswap32(target_data[i].inheritable);
9893                 }
9894             }
9895 
9896             dataptr = data;
9897         }
9898 
9899         if (num == TARGET_NR_capget) {
9900             ret = get_errno(capget(&header, dataptr));
9901         } else {
9902             ret = get_errno(capset(&header, dataptr));
9903         }
9904 
9905         /* The kernel always updates version for both capget and capset */
9906         target_header->version = tswap32(header.version);
9907         unlock_user_struct(target_header, arg1, 1);
9908 
9909         if (arg2) {
9910             if (num == TARGET_NR_capget) {
9911                 for (i = 0; i < data_items; i++) {
9912                     target_data[i].effective = tswap32(data[i].effective);
9913                     target_data[i].permitted = tswap32(data[i].permitted);
9914                     target_data[i].inheritable = tswap32(data[i].inheritable);
9915                 }
9916                 unlock_user(target_data, arg2, target_datalen);
9917             } else {
9918                 unlock_user(target_data, arg2, 0);
9919             }
9920         }
9921         return ret;
9922     }
9923     case TARGET_NR_sigaltstack:
9924         return do_sigaltstack(arg1, arg2,
9925                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9926 
9927 #ifdef CONFIG_SENDFILE
9928 #ifdef TARGET_NR_sendfile
9929     case TARGET_NR_sendfile:
9930     {
9931         off_t *offp = NULL;
9932         off_t off;
9933         if (arg3) {
9934             ret = get_user_sal(off, arg3);
9935             if (is_error(ret)) {
9936                 return ret;
9937             }
9938             offp = &off;
9939         }
9940         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9941         if (!is_error(ret) && arg3) {
9942             abi_long ret2 = put_user_sal(off, arg3);
9943             if (is_error(ret2)) {
9944                 ret = ret2;
9945             }
9946         }
9947         return ret;
9948     }
9949 #endif
9950 #ifdef TARGET_NR_sendfile64
9951     case TARGET_NR_sendfile64:
9952     {
9953         off_t *offp = NULL;
9954         off_t off;
9955         if (arg3) {
9956             ret = get_user_s64(off, arg3);
9957             if (is_error(ret)) {
9958                 return ret;
9959             }
9960             offp = &off;
9961         }
9962         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9963         if (!is_error(ret) && arg3) {
9964             abi_long ret2 = put_user_s64(off, arg3);
9965             if (is_error(ret2)) {
9966                 ret = ret2;
9967             }
9968         }
9969         return ret;
9970     }
9971 #endif
9972 #endif
9973 #ifdef TARGET_NR_vfork
9974     case TARGET_NR_vfork:
9975         return get_errno(do_fork(cpu_env,
9976                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9977                          0, 0, 0, 0));
9978 #endif
9979 #ifdef TARGET_NR_ugetrlimit
9980     case TARGET_NR_ugetrlimit:
9981     {
9982 	struct rlimit rlim;
9983 	int resource = target_to_host_resource(arg1);
9984 	ret = get_errno(getrlimit(resource, &rlim));
9985 	if (!is_error(ret)) {
9986 	    struct target_rlimit *target_rlim;
9987             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9988                 return -TARGET_EFAULT;
9989 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9990 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9991             unlock_user_struct(target_rlim, arg2, 1);
9992 	}
9993         return ret;
9994     }
9995 #endif
9996 #ifdef TARGET_NR_truncate64
9997     case TARGET_NR_truncate64:
9998         if (!(p = lock_user_string(arg1)))
9999             return -TARGET_EFAULT;
10000 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10001         unlock_user(p, arg1, 0);
10002         return ret;
10003 #endif
10004 #ifdef TARGET_NR_ftruncate64
10005     case TARGET_NR_ftruncate64:
10006         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10007 #endif
10008 #ifdef TARGET_NR_stat64
10009     case TARGET_NR_stat64:
10010         if (!(p = lock_user_string(arg1))) {
10011             return -TARGET_EFAULT;
10012         }
10013         ret = get_errno(stat(path(p), &st));
10014         unlock_user(p, arg1, 0);
10015         if (!is_error(ret))
10016             ret = host_to_target_stat64(cpu_env, arg2, &st);
10017         return ret;
10018 #endif
10019 #ifdef TARGET_NR_lstat64
10020     case TARGET_NR_lstat64:
10021         if (!(p = lock_user_string(arg1))) {
10022             return -TARGET_EFAULT;
10023         }
10024         ret = get_errno(lstat(path(p), &st));
10025         unlock_user(p, arg1, 0);
10026         if (!is_error(ret))
10027             ret = host_to_target_stat64(cpu_env, arg2, &st);
10028         return ret;
10029 #endif
10030 #ifdef TARGET_NR_fstat64
10031     case TARGET_NR_fstat64:
10032         ret = get_errno(fstat(arg1, &st));
10033         if (!is_error(ret))
10034             ret = host_to_target_stat64(cpu_env, arg2, &st);
10035         return ret;
10036 #endif
10037 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10038 #ifdef TARGET_NR_fstatat64
10039     case TARGET_NR_fstatat64:
10040 #endif
10041 #ifdef TARGET_NR_newfstatat
10042     case TARGET_NR_newfstatat:
10043 #endif
10044         if (!(p = lock_user_string(arg2))) {
10045             return -TARGET_EFAULT;
10046         }
10047         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10048         unlock_user(p, arg2, 0);
10049         if (!is_error(ret))
10050             ret = host_to_target_stat64(cpu_env, arg3, &st);
10051         return ret;
10052 #endif
10053 #ifdef TARGET_NR_lchown
10054     case TARGET_NR_lchown:
10055         if (!(p = lock_user_string(arg1)))
10056             return -TARGET_EFAULT;
10057         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10058         unlock_user(p, arg1, 0);
10059         return ret;
10060 #endif
10061 #ifdef TARGET_NR_getuid
10062     case TARGET_NR_getuid:
10063         return get_errno(high2lowuid(getuid()));
10064 #endif
10065 #ifdef TARGET_NR_getgid
10066     case TARGET_NR_getgid:
10067         return get_errno(high2lowgid(getgid()));
10068 #endif
10069 #ifdef TARGET_NR_geteuid
10070     case TARGET_NR_geteuid:
10071         return get_errno(high2lowuid(geteuid()));
10072 #endif
10073 #ifdef TARGET_NR_getegid
10074     case TARGET_NR_getegid:
10075         return get_errno(high2lowgid(getegid()));
10076 #endif
10077     case TARGET_NR_setreuid:
10078         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10079     case TARGET_NR_setregid:
10080         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10081     case TARGET_NR_getgroups:
10082         {
10083             int gidsetsize = arg1;
10084             target_id *target_grouplist;
10085             gid_t *grouplist;
10086             int i;
10087 
10088             grouplist = alloca(gidsetsize * sizeof(gid_t));
10089             ret = get_errno(getgroups(gidsetsize, grouplist));
10090             if (gidsetsize == 0)
10091                 return ret;
10092             if (!is_error(ret)) {
10093                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10094                 if (!target_grouplist)
10095                     return -TARGET_EFAULT;
10096                 for(i = 0;i < ret; i++)
10097                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10098                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10099             }
10100         }
10101         return ret;
10102     case TARGET_NR_setgroups:
10103         {
10104             int gidsetsize = arg1;
10105             target_id *target_grouplist;
10106             gid_t *grouplist = NULL;
10107             int i;
10108             if (gidsetsize) {
10109                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10110                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10111                 if (!target_grouplist) {
10112                     return -TARGET_EFAULT;
10113                 }
10114                 for (i = 0; i < gidsetsize; i++) {
10115                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10116                 }
10117                 unlock_user(target_grouplist, arg2, 0);
10118             }
10119             return get_errno(setgroups(gidsetsize, grouplist));
10120         }
10121     case TARGET_NR_fchown:
10122         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10123 #if defined(TARGET_NR_fchownat)
10124     case TARGET_NR_fchownat:
10125         if (!(p = lock_user_string(arg2)))
10126             return -TARGET_EFAULT;
10127         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10128                                  low2highgid(arg4), arg5));
10129         unlock_user(p, arg2, 0);
10130         return ret;
10131 #endif
10132 #ifdef TARGET_NR_setresuid
10133     case TARGET_NR_setresuid:
10134         return get_errno(sys_setresuid(low2highuid(arg1),
10135                                        low2highuid(arg2),
10136                                        low2highuid(arg3)));
10137 #endif
10138 #ifdef TARGET_NR_getresuid
10139     case TARGET_NR_getresuid:
10140         {
10141             uid_t ruid, euid, suid;
10142             ret = get_errno(getresuid(&ruid, &euid, &suid));
10143             if (!is_error(ret)) {
10144                 if (put_user_id(high2lowuid(ruid), arg1)
10145                     || put_user_id(high2lowuid(euid), arg2)
10146                     || put_user_id(high2lowuid(suid), arg3))
10147                     return -TARGET_EFAULT;
10148             }
10149         }
10150         return ret;
10151 #endif
10152 #ifdef TARGET_NR_getresgid
10153     case TARGET_NR_setresgid:
10154         return get_errno(sys_setresgid(low2highgid(arg1),
10155                                        low2highgid(arg2),
10156                                        low2highgid(arg3)));
10157 #endif
10158 #ifdef TARGET_NR_getresgid
10159     case TARGET_NR_getresgid:
10160         {
10161             gid_t rgid, egid, sgid;
10162             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10163             if (!is_error(ret)) {
10164                 if (put_user_id(high2lowgid(rgid), arg1)
10165                     || put_user_id(high2lowgid(egid), arg2)
10166                     || put_user_id(high2lowgid(sgid), arg3))
10167                     return -TARGET_EFAULT;
10168             }
10169         }
10170         return ret;
10171 #endif
10172 #ifdef TARGET_NR_chown
10173     case TARGET_NR_chown:
10174         if (!(p = lock_user_string(arg1)))
10175             return -TARGET_EFAULT;
10176         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10177         unlock_user(p, arg1, 0);
10178         return ret;
10179 #endif
10180     case TARGET_NR_setuid:
10181         return get_errno(sys_setuid(low2highuid(arg1)));
10182     case TARGET_NR_setgid:
10183         return get_errno(sys_setgid(low2highgid(arg1)));
10184     case TARGET_NR_setfsuid:
10185         return get_errno(setfsuid(arg1));
10186     case TARGET_NR_setfsgid:
10187         return get_errno(setfsgid(arg1));
10188 
10189 #ifdef TARGET_NR_lchown32
10190     case TARGET_NR_lchown32:
10191         if (!(p = lock_user_string(arg1)))
10192             return -TARGET_EFAULT;
10193         ret = get_errno(lchown(p, arg2, arg3));
10194         unlock_user(p, arg1, 0);
10195         return ret;
10196 #endif
10197 #ifdef TARGET_NR_getuid32
10198     case TARGET_NR_getuid32:
10199         return get_errno(getuid());
10200 #endif
10201 
10202 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10203    /* Alpha specific */
10204     case TARGET_NR_getxuid:
10205          {
10206             uid_t euid;
10207             euid=geteuid();
10208             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10209          }
10210         return get_errno(getuid());
10211 #endif
10212 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10213    /* Alpha specific */
10214     case TARGET_NR_getxgid:
10215          {
10216             uid_t egid;
10217             egid=getegid();
10218             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10219          }
10220         return get_errno(getgid());
10221 #endif
10222 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10223     /* Alpha specific */
10224     case TARGET_NR_osf_getsysinfo:
10225         ret = -TARGET_EOPNOTSUPP;
10226         switch (arg1) {
10227           case TARGET_GSI_IEEE_FP_CONTROL:
10228             {
10229                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10230                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10231 
10232                 swcr &= ~SWCR_STATUS_MASK;
10233                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10234 
10235                 if (put_user_u64 (swcr, arg2))
10236                         return -TARGET_EFAULT;
10237                 ret = 0;
10238             }
10239             break;
10240 
10241           /* case GSI_IEEE_STATE_AT_SIGNAL:
10242              -- Not implemented in linux kernel.
10243              case GSI_UACPROC:
10244              -- Retrieves current unaligned access state; not much used.
10245              case GSI_PROC_TYPE:
10246              -- Retrieves implver information; surely not used.
10247              case GSI_GET_HWRPB:
10248              -- Grabs a copy of the HWRPB; surely not used.
10249           */
10250         }
10251         return ret;
10252 #endif
10253 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10254     /* Alpha specific */
10255     case TARGET_NR_osf_setsysinfo:
10256         ret = -TARGET_EOPNOTSUPP;
10257         switch (arg1) {
10258           case TARGET_SSI_IEEE_FP_CONTROL:
10259             {
10260                 uint64_t swcr, fpcr;
10261 
10262                 if (get_user_u64 (swcr, arg2)) {
10263                     return -TARGET_EFAULT;
10264                 }
10265 
10266                 /*
10267                  * The kernel calls swcr_update_status to update the
10268                  * status bits from the fpcr at every point that it
10269                  * could be queried.  Therefore, we store the status
10270                  * bits only in FPCR.
10271                  */
10272                 ((CPUAlphaState *)cpu_env)->swcr
10273                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10274 
10275                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10276                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10277                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10278                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10279                 ret = 0;
10280             }
10281             break;
10282 
10283           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10284             {
10285                 uint64_t exc, fpcr, fex;
10286 
10287                 if (get_user_u64(exc, arg2)) {
10288                     return -TARGET_EFAULT;
10289                 }
10290                 exc &= SWCR_STATUS_MASK;
10291                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10292 
10293                 /* Old exceptions are not signaled.  */
10294                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10295                 fex = exc & ~fex;
10296                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10297                 fex &= ((CPUArchState *)cpu_env)->swcr;
10298 
10299                 /* Update the hardware fpcr.  */
10300                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10301                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10302 
10303                 if (fex) {
10304                     int si_code = TARGET_FPE_FLTUNK;
10305                     target_siginfo_t info;
10306 
10307                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10308                         si_code = TARGET_FPE_FLTUND;
10309                     }
10310                     if (fex & SWCR_TRAP_ENABLE_INE) {
10311                         si_code = TARGET_FPE_FLTRES;
10312                     }
10313                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10314                         si_code = TARGET_FPE_FLTUND;
10315                     }
10316                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10317                         si_code = TARGET_FPE_FLTOVF;
10318                     }
10319                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10320                         si_code = TARGET_FPE_FLTDIV;
10321                     }
10322                     if (fex & SWCR_TRAP_ENABLE_INV) {
10323                         si_code = TARGET_FPE_FLTINV;
10324                     }
10325 
10326                     info.si_signo = SIGFPE;
10327                     info.si_errno = 0;
10328                     info.si_code = si_code;
10329                     info._sifields._sigfault._addr
10330                         = ((CPUArchState *)cpu_env)->pc;
10331                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10332                                  QEMU_SI_FAULT, &info);
10333                 }
10334                 ret = 0;
10335             }
10336             break;
10337 
10338           /* case SSI_NVPAIRS:
10339              -- Used with SSIN_UACPROC to enable unaligned accesses.
10340              case SSI_IEEE_STATE_AT_SIGNAL:
10341              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10342              -- Not implemented in linux kernel
10343           */
10344         }
10345         return ret;
10346 #endif
10347 #ifdef TARGET_NR_osf_sigprocmask
10348     /* Alpha specific.  */
10349     case TARGET_NR_osf_sigprocmask:
10350         {
10351             abi_ulong mask;
10352             int how;
10353             sigset_t set, oldset;
10354 
10355             switch(arg1) {
10356             case TARGET_SIG_BLOCK:
10357                 how = SIG_BLOCK;
10358                 break;
10359             case TARGET_SIG_UNBLOCK:
10360                 how = SIG_UNBLOCK;
10361                 break;
10362             case TARGET_SIG_SETMASK:
10363                 how = SIG_SETMASK;
10364                 break;
10365             default:
10366                 return -TARGET_EINVAL;
10367             }
10368             mask = arg2;
10369             target_to_host_old_sigset(&set, &mask);
10370             ret = do_sigprocmask(how, &set, &oldset);
10371             if (!ret) {
10372                 host_to_target_old_sigset(&mask, &oldset);
10373                 ret = mask;
10374             }
10375         }
10376         return ret;
10377 #endif
10378 
10379 #ifdef TARGET_NR_getgid32
10380     case TARGET_NR_getgid32:
10381         return get_errno(getgid());
10382 #endif
10383 #ifdef TARGET_NR_geteuid32
10384     case TARGET_NR_geteuid32:
10385         return get_errno(geteuid());
10386 #endif
10387 #ifdef TARGET_NR_getegid32
10388     case TARGET_NR_getegid32:
10389         return get_errno(getegid());
10390 #endif
10391 #ifdef TARGET_NR_setreuid32
10392     case TARGET_NR_setreuid32:
10393         return get_errno(setreuid(arg1, arg2));
10394 #endif
10395 #ifdef TARGET_NR_setregid32
10396     case TARGET_NR_setregid32:
10397         return get_errno(setregid(arg1, arg2));
10398 #endif
10399 #ifdef TARGET_NR_getgroups32
10400     case TARGET_NR_getgroups32:
10401         {
10402             int gidsetsize = arg1;
10403             uint32_t *target_grouplist;
10404             gid_t *grouplist;
10405             int i;
10406 
10407             grouplist = alloca(gidsetsize * sizeof(gid_t));
10408             ret = get_errno(getgroups(gidsetsize, grouplist));
10409             if (gidsetsize == 0)
10410                 return ret;
10411             if (!is_error(ret)) {
10412                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10413                 if (!target_grouplist) {
10414                     return -TARGET_EFAULT;
10415                 }
10416                 for(i = 0;i < ret; i++)
10417                     target_grouplist[i] = tswap32(grouplist[i]);
10418                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10419             }
10420         }
10421         return ret;
10422 #endif
10423 #ifdef TARGET_NR_setgroups32
10424     case TARGET_NR_setgroups32:
10425         {
10426             int gidsetsize = arg1;
10427             uint32_t *target_grouplist;
10428             gid_t *grouplist;
10429             int i;
10430 
10431             grouplist = alloca(gidsetsize * sizeof(gid_t));
10432             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10433             if (!target_grouplist) {
10434                 return -TARGET_EFAULT;
10435             }
10436             for(i = 0;i < gidsetsize; i++)
10437                 grouplist[i] = tswap32(target_grouplist[i]);
10438             unlock_user(target_grouplist, arg2, 0);
10439             return get_errno(setgroups(gidsetsize, grouplist));
10440         }
10441 #endif
10442 #ifdef TARGET_NR_fchown32
10443     case TARGET_NR_fchown32:
10444         return get_errno(fchown(arg1, arg2, arg3));
10445 #endif
10446 #ifdef TARGET_NR_setresuid32
10447     case TARGET_NR_setresuid32:
10448         return get_errno(sys_setresuid(arg1, arg2, arg3));
10449 #endif
10450 #ifdef TARGET_NR_getresuid32
10451     case TARGET_NR_getresuid32:
10452         {
10453             uid_t ruid, euid, suid;
10454             ret = get_errno(getresuid(&ruid, &euid, &suid));
10455             if (!is_error(ret)) {
10456                 if (put_user_u32(ruid, arg1)
10457                     || put_user_u32(euid, arg2)
10458                     || put_user_u32(suid, arg3))
10459                     return -TARGET_EFAULT;
10460             }
10461         }
10462         return ret;
10463 #endif
10464 #ifdef TARGET_NR_setresgid32
10465     case TARGET_NR_setresgid32:
10466         return get_errno(sys_setresgid(arg1, arg2, arg3));
10467 #endif
10468 #ifdef TARGET_NR_getresgid32
10469     case TARGET_NR_getresgid32:
10470         {
10471             gid_t rgid, egid, sgid;
10472             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10473             if (!is_error(ret)) {
10474                 if (put_user_u32(rgid, arg1)
10475                     || put_user_u32(egid, arg2)
10476                     || put_user_u32(sgid, arg3))
10477                     return -TARGET_EFAULT;
10478             }
10479         }
10480         return ret;
10481 #endif
10482 #ifdef TARGET_NR_chown32
10483     case TARGET_NR_chown32:
10484         if (!(p = lock_user_string(arg1)))
10485             return -TARGET_EFAULT;
10486         ret = get_errno(chown(p, arg2, arg3));
10487         unlock_user(p, arg1, 0);
10488         return ret;
10489 #endif
10490 #ifdef TARGET_NR_setuid32
10491     case TARGET_NR_setuid32:
10492         return get_errno(sys_setuid(arg1));
10493 #endif
10494 #ifdef TARGET_NR_setgid32
10495     case TARGET_NR_setgid32:
10496         return get_errno(sys_setgid(arg1));
10497 #endif
10498 #ifdef TARGET_NR_setfsuid32
10499     case TARGET_NR_setfsuid32:
10500         return get_errno(setfsuid(arg1));
10501 #endif
10502 #ifdef TARGET_NR_setfsgid32
10503     case TARGET_NR_setfsgid32:
10504         return get_errno(setfsgid(arg1));
10505 #endif
10506 #ifdef TARGET_NR_mincore
10507     case TARGET_NR_mincore:
10508         {
10509             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10510             if (!a) {
10511                 return -TARGET_ENOMEM;
10512             }
10513             p = lock_user_string(arg3);
10514             if (!p) {
10515                 ret = -TARGET_EFAULT;
10516             } else {
10517                 ret = get_errno(mincore(a, arg2, p));
10518                 unlock_user(p, arg3, ret);
10519             }
10520             unlock_user(a, arg1, 0);
10521         }
10522         return ret;
10523 #endif
10524 #ifdef TARGET_NR_arm_fadvise64_64
10525     case TARGET_NR_arm_fadvise64_64:
10526         /* arm_fadvise64_64 looks like fadvise64_64 but
10527          * with different argument order: fd, advice, offset, len
10528          * rather than the usual fd, offset, len, advice.
10529          * Note that offset and len are both 64-bit so appear as
10530          * pairs of 32-bit registers.
10531          */
10532         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10533                             target_offset64(arg5, arg6), arg2);
10534         return -host_to_target_errno(ret);
10535 #endif
10536 
10537 #if TARGET_ABI_BITS == 32
10538 
10539 #ifdef TARGET_NR_fadvise64_64
10540     case TARGET_NR_fadvise64_64:
10541 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10542         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10543         ret = arg2;
10544         arg2 = arg3;
10545         arg3 = arg4;
10546         arg4 = arg5;
10547         arg5 = arg6;
10548         arg6 = ret;
10549 #else
10550         /* 6 args: fd, offset (high, low), len (high, low), advice */
10551         if (regpairs_aligned(cpu_env, num)) {
10552             /* offset is in (3,4), len in (5,6) and advice in 7 */
10553             arg2 = arg3;
10554             arg3 = arg4;
10555             arg4 = arg5;
10556             arg5 = arg6;
10557             arg6 = arg7;
10558         }
10559 #endif
10560         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10561                             target_offset64(arg4, arg5), arg6);
10562         return -host_to_target_errno(ret);
10563 #endif
10564 
10565 #ifdef TARGET_NR_fadvise64
10566     case TARGET_NR_fadvise64:
10567         /* 5 args: fd, offset (high, low), len, advice */
10568         if (regpairs_aligned(cpu_env, num)) {
10569             /* offset is in (3,4), len in 5 and advice in 6 */
10570             arg2 = arg3;
10571             arg3 = arg4;
10572             arg4 = arg5;
10573             arg5 = arg6;
10574         }
10575         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10576         return -host_to_target_errno(ret);
10577 #endif
10578 
10579 #else /* not a 32-bit ABI */
10580 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10581 #ifdef TARGET_NR_fadvise64_64
10582     case TARGET_NR_fadvise64_64:
10583 #endif
10584 #ifdef TARGET_NR_fadvise64
10585     case TARGET_NR_fadvise64:
10586 #endif
10587 #ifdef TARGET_S390X
10588         switch (arg4) {
10589         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10590         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10591         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10592         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10593         default: break;
10594         }
10595 #endif
10596         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10597 #endif
10598 #endif /* end of 64-bit ABI fadvise handling */
10599 
10600 #ifdef TARGET_NR_madvise
10601     case TARGET_NR_madvise:
10602         /* A straight passthrough may not be safe because qemu sometimes
10603            turns private file-backed mappings into anonymous mappings.
10604            This will break MADV_DONTNEED.
10605            This is a hint, so ignoring and returning success is ok.  */
10606         return 0;
10607 #endif
10608 #if TARGET_ABI_BITS == 32
10609     case TARGET_NR_fcntl64:
10610     {
10611 	int cmd;
10612 	struct flock64 fl;
10613         from_flock64_fn *copyfrom = copy_from_user_flock64;
10614         to_flock64_fn *copyto = copy_to_user_flock64;
10615 
10616 #ifdef TARGET_ARM
10617         if (!((CPUARMState *)cpu_env)->eabi) {
10618             copyfrom = copy_from_user_oabi_flock64;
10619             copyto = copy_to_user_oabi_flock64;
10620         }
10621 #endif
10622 
10623 	cmd = target_to_host_fcntl_cmd(arg2);
10624         if (cmd == -TARGET_EINVAL) {
10625             return cmd;
10626         }
10627 
10628         switch(arg2) {
10629         case TARGET_F_GETLK64:
10630             ret = copyfrom(&fl, arg3);
10631             if (ret) {
10632                 break;
10633             }
10634             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10635             if (ret == 0) {
10636                 ret = copyto(arg3, &fl);
10637             }
10638 	    break;
10639 
10640         case TARGET_F_SETLK64:
10641         case TARGET_F_SETLKW64:
10642             ret = copyfrom(&fl, arg3);
10643             if (ret) {
10644                 break;
10645             }
10646             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10647 	    break;
10648         default:
10649             ret = do_fcntl(arg1, arg2, arg3);
10650             break;
10651         }
10652         return ret;
10653     }
10654 #endif
10655 #ifdef TARGET_NR_cacheflush
10656     case TARGET_NR_cacheflush:
10657         /* self-modifying code is handled automatically, so nothing needed */
10658         return 0;
10659 #endif
10660 #ifdef TARGET_NR_getpagesize
10661     case TARGET_NR_getpagesize:
10662         return TARGET_PAGE_SIZE;
10663 #endif
10664     case TARGET_NR_gettid:
10665         return get_errno(sys_gettid());
10666 #ifdef TARGET_NR_readahead
10667     case TARGET_NR_readahead:
10668 #if TARGET_ABI_BITS == 32
10669         if (regpairs_aligned(cpu_env, num)) {
10670             arg2 = arg3;
10671             arg3 = arg4;
10672             arg4 = arg5;
10673         }
10674         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10675 #else
10676         ret = get_errno(readahead(arg1, arg2, arg3));
10677 #endif
10678         return ret;
10679 #endif
10680 #ifdef CONFIG_ATTR
10681 #ifdef TARGET_NR_setxattr
10682     case TARGET_NR_listxattr:
10683     case TARGET_NR_llistxattr:
10684     {
10685         void *p, *b = 0;
10686         if (arg2) {
10687             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10688             if (!b) {
10689                 return -TARGET_EFAULT;
10690             }
10691         }
10692         p = lock_user_string(arg1);
10693         if (p) {
10694             if (num == TARGET_NR_listxattr) {
10695                 ret = get_errno(listxattr(p, b, arg3));
10696             } else {
10697                 ret = get_errno(llistxattr(p, b, arg3));
10698             }
10699         } else {
10700             ret = -TARGET_EFAULT;
10701         }
10702         unlock_user(p, arg1, 0);
10703         unlock_user(b, arg2, arg3);
10704         return ret;
10705     }
10706     case TARGET_NR_flistxattr:
10707     {
10708         void *b = 0;
10709         if (arg2) {
10710             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10711             if (!b) {
10712                 return -TARGET_EFAULT;
10713             }
10714         }
10715         ret = get_errno(flistxattr(arg1, b, arg3));
10716         unlock_user(b, arg2, arg3);
10717         return ret;
10718     }
10719     case TARGET_NR_setxattr:
10720     case TARGET_NR_lsetxattr:
10721         {
10722             void *p, *n, *v = 0;
10723             if (arg3) {
10724                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10725                 if (!v) {
10726                     return -TARGET_EFAULT;
10727                 }
10728             }
10729             p = lock_user_string(arg1);
10730             n = lock_user_string(arg2);
10731             if (p && n) {
10732                 if (num == TARGET_NR_setxattr) {
10733                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10734                 } else {
10735                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10736                 }
10737             } else {
10738                 ret = -TARGET_EFAULT;
10739             }
10740             unlock_user(p, arg1, 0);
10741             unlock_user(n, arg2, 0);
10742             unlock_user(v, arg3, 0);
10743         }
10744         return ret;
10745     case TARGET_NR_fsetxattr:
10746         {
10747             void *n, *v = 0;
10748             if (arg3) {
10749                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10750                 if (!v) {
10751                     return -TARGET_EFAULT;
10752                 }
10753             }
10754             n = lock_user_string(arg2);
10755             if (n) {
10756                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10757             } else {
10758                 ret = -TARGET_EFAULT;
10759             }
10760             unlock_user(n, arg2, 0);
10761             unlock_user(v, arg3, 0);
10762         }
10763         return ret;
10764     case TARGET_NR_getxattr:
10765     case TARGET_NR_lgetxattr:
10766         {
10767             void *p, *n, *v = 0;
10768             if (arg3) {
10769                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10770                 if (!v) {
10771                     return -TARGET_EFAULT;
10772                 }
10773             }
10774             p = lock_user_string(arg1);
10775             n = lock_user_string(arg2);
10776             if (p && n) {
10777                 if (num == TARGET_NR_getxattr) {
10778                     ret = get_errno(getxattr(p, n, v, arg4));
10779                 } else {
10780                     ret = get_errno(lgetxattr(p, n, v, arg4));
10781                 }
10782             } else {
10783                 ret = -TARGET_EFAULT;
10784             }
10785             unlock_user(p, arg1, 0);
10786             unlock_user(n, arg2, 0);
10787             unlock_user(v, arg3, arg4);
10788         }
10789         return ret;
10790     case TARGET_NR_fgetxattr:
10791         {
10792             void *n, *v = 0;
10793             if (arg3) {
10794                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10795                 if (!v) {
10796                     return -TARGET_EFAULT;
10797                 }
10798             }
10799             n = lock_user_string(arg2);
10800             if (n) {
10801                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10802             } else {
10803                 ret = -TARGET_EFAULT;
10804             }
10805             unlock_user(n, arg2, 0);
10806             unlock_user(v, arg3, arg4);
10807         }
10808         return ret;
10809     case TARGET_NR_removexattr:
10810     case TARGET_NR_lremovexattr:
10811         {
10812             void *p, *n;
10813             p = lock_user_string(arg1);
10814             n = lock_user_string(arg2);
10815             if (p && n) {
10816                 if (num == TARGET_NR_removexattr) {
10817                     ret = get_errno(removexattr(p, n));
10818                 } else {
10819                     ret = get_errno(lremovexattr(p, n));
10820                 }
10821             } else {
10822                 ret = -TARGET_EFAULT;
10823             }
10824             unlock_user(p, arg1, 0);
10825             unlock_user(n, arg2, 0);
10826         }
10827         return ret;
10828     case TARGET_NR_fremovexattr:
10829         {
10830             void *n;
10831             n = lock_user_string(arg2);
10832             if (n) {
10833                 ret = get_errno(fremovexattr(arg1, n));
10834             } else {
10835                 ret = -TARGET_EFAULT;
10836             }
10837             unlock_user(n, arg2, 0);
10838         }
10839         return ret;
10840 #endif
10841 #endif /* CONFIG_ATTR */
10842 #ifdef TARGET_NR_set_thread_area
10843     case TARGET_NR_set_thread_area:
10844 #if defined(TARGET_MIPS)
10845       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10846       return 0;
10847 #elif defined(TARGET_CRIS)
10848       if (arg1 & 0xff)
10849           ret = -TARGET_EINVAL;
10850       else {
10851           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10852           ret = 0;
10853       }
10854       return ret;
10855 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10856       return do_set_thread_area(cpu_env, arg1);
10857 #elif defined(TARGET_M68K)
10858       {
10859           TaskState *ts = cpu->opaque;
10860           ts->tp_value = arg1;
10861           return 0;
10862       }
10863 #else
10864       return -TARGET_ENOSYS;
10865 #endif
10866 #endif
10867 #ifdef TARGET_NR_get_thread_area
10868     case TARGET_NR_get_thread_area:
10869 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10870         return do_get_thread_area(cpu_env, arg1);
10871 #elif defined(TARGET_M68K)
10872         {
10873             TaskState *ts = cpu->opaque;
10874             return ts->tp_value;
10875         }
10876 #else
10877         return -TARGET_ENOSYS;
10878 #endif
10879 #endif
10880 #ifdef TARGET_NR_getdomainname
10881     case TARGET_NR_getdomainname:
10882         return -TARGET_ENOSYS;
10883 #endif
10884 
10885 #ifdef TARGET_NR_clock_settime
10886     case TARGET_NR_clock_settime:
10887     {
10888         struct timespec ts;
10889 
10890         ret = target_to_host_timespec(&ts, arg2);
10891         if (!is_error(ret)) {
10892             ret = get_errno(clock_settime(arg1, &ts));
10893         }
10894         return ret;
10895     }
10896 #endif
10897 #ifdef TARGET_NR_clock_gettime
10898     case TARGET_NR_clock_gettime:
10899     {
10900         struct timespec ts;
10901         ret = get_errno(clock_gettime(arg1, &ts));
10902         if (!is_error(ret)) {
10903             ret = host_to_target_timespec(arg2, &ts);
10904         }
10905         return ret;
10906     }
10907 #endif
10908 #ifdef TARGET_NR_clock_getres
10909     case TARGET_NR_clock_getres:
10910     {
10911         struct timespec ts;
10912         ret = get_errno(clock_getres(arg1, &ts));
10913         if (!is_error(ret)) {
10914             host_to_target_timespec(arg2, &ts);
10915         }
10916         return ret;
10917     }
10918 #endif
10919 #ifdef TARGET_NR_clock_nanosleep
10920     case TARGET_NR_clock_nanosleep:
10921     {
10922         struct timespec ts;
10923         target_to_host_timespec(&ts, arg3);
10924         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10925                                              &ts, arg4 ? &ts : NULL));
10926         if (arg4)
10927             host_to_target_timespec(arg4, &ts);
10928 
10929 #if defined(TARGET_PPC)
10930         /* clock_nanosleep is odd in that it returns positive errno values.
10931          * On PPC, CR0 bit 3 should be set in such a situation. */
10932         if (ret && ret != -TARGET_ERESTARTSYS) {
10933             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10934         }
10935 #endif
10936         return ret;
10937     }
10938 #endif
10939 
10940 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10941     case TARGET_NR_set_tid_address:
10942         return get_errno(set_tid_address((int *)g2h(arg1)));
10943 #endif
10944 
10945     case TARGET_NR_tkill:
10946         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10947 
10948     case TARGET_NR_tgkill:
10949         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10950                          target_to_host_signal(arg3)));
10951 
10952 #ifdef TARGET_NR_set_robust_list
10953     case TARGET_NR_set_robust_list:
10954     case TARGET_NR_get_robust_list:
10955         /* The ABI for supporting robust futexes has userspace pass
10956          * the kernel a pointer to a linked list which is updated by
10957          * userspace after the syscall; the list is walked by the kernel
10958          * when the thread exits. Since the linked list in QEMU guest
10959          * memory isn't a valid linked list for the host and we have
10960          * no way to reliably intercept the thread-death event, we can't
10961          * support these. Silently return ENOSYS so that guest userspace
10962          * falls back to a non-robust futex implementation (which should
10963          * be OK except in the corner case of the guest crashing while
10964          * holding a mutex that is shared with another process via
10965          * shared memory).
10966          */
10967         return -TARGET_ENOSYS;
10968 #endif
10969 
10970 #if defined(TARGET_NR_utimensat)
10971     case TARGET_NR_utimensat:
10972         {
10973             struct timespec *tsp, ts[2];
10974             if (!arg3) {
10975                 tsp = NULL;
10976             } else {
10977                 target_to_host_timespec(ts, arg3);
10978                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10979                 tsp = ts;
10980             }
10981             if (!arg2)
10982                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10983             else {
10984                 if (!(p = lock_user_string(arg2))) {
10985                     return -TARGET_EFAULT;
10986                 }
10987                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10988                 unlock_user(p, arg2, 0);
10989             }
10990         }
10991         return ret;
10992 #endif
10993     case TARGET_NR_futex:
10994         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10995 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10996     case TARGET_NR_inotify_init:
10997         ret = get_errno(sys_inotify_init());
10998         if (ret >= 0) {
10999             fd_trans_register(ret, &target_inotify_trans);
11000         }
11001         return ret;
11002 #endif
11003 #ifdef CONFIG_INOTIFY1
11004 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11005     case TARGET_NR_inotify_init1:
11006         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11007                                           fcntl_flags_tbl)));
11008         if (ret >= 0) {
11009             fd_trans_register(ret, &target_inotify_trans);
11010         }
11011         return ret;
11012 #endif
11013 #endif
11014 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11015     case TARGET_NR_inotify_add_watch:
11016         p = lock_user_string(arg2);
11017         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11018         unlock_user(p, arg2, 0);
11019         return ret;
11020 #endif
11021 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11022     case TARGET_NR_inotify_rm_watch:
11023         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11024 #endif
11025 
11026 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11027     case TARGET_NR_mq_open:
11028         {
11029             struct mq_attr posix_mq_attr;
11030             struct mq_attr *pposix_mq_attr;
11031             int host_flags;
11032 
11033             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11034             pposix_mq_attr = NULL;
11035             if (arg4) {
11036                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11037                     return -TARGET_EFAULT;
11038                 }
11039                 pposix_mq_attr = &posix_mq_attr;
11040             }
11041             p = lock_user_string(arg1 - 1);
11042             if (!p) {
11043                 return -TARGET_EFAULT;
11044             }
11045             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11046             unlock_user (p, arg1, 0);
11047         }
11048         return ret;
11049 
11050     case TARGET_NR_mq_unlink:
11051         p = lock_user_string(arg1 - 1);
11052         if (!p) {
11053             return -TARGET_EFAULT;
11054         }
11055         ret = get_errno(mq_unlink(p));
11056         unlock_user (p, arg1, 0);
11057         return ret;
11058 
11059     case TARGET_NR_mq_timedsend:
11060         {
11061             struct timespec ts;
11062 
11063             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11064             if (arg5 != 0) {
11065                 target_to_host_timespec(&ts, arg5);
11066                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11067                 host_to_target_timespec(arg5, &ts);
11068             } else {
11069                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11070             }
11071             unlock_user (p, arg2, arg3);
11072         }
11073         return ret;
11074 
11075     case TARGET_NR_mq_timedreceive:
11076         {
11077             struct timespec ts;
11078             unsigned int prio;
11079 
11080             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11081             if (arg5 != 0) {
11082                 target_to_host_timespec(&ts, arg5);
11083                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11084                                                      &prio, &ts));
11085                 host_to_target_timespec(arg5, &ts);
11086             } else {
11087                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11088                                                      &prio, NULL));
11089             }
11090             unlock_user (p, arg2, arg3);
11091             if (arg4 != 0)
11092                 put_user_u32(prio, arg4);
11093         }
11094         return ret;
11095 
11096     /* Not implemented for now... */
11097 /*     case TARGET_NR_mq_notify: */
11098 /*         break; */
11099 
11100     case TARGET_NR_mq_getsetattr:
11101         {
11102             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11103             ret = 0;
11104             if (arg2 != 0) {
11105                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11106                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11107                                            &posix_mq_attr_out));
11108             } else if (arg3 != 0) {
11109                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11110             }
11111             if (ret == 0 && arg3 != 0) {
11112                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11113             }
11114         }
11115         return ret;
11116 #endif
11117 
11118 #ifdef CONFIG_SPLICE
11119 #ifdef TARGET_NR_tee
11120     case TARGET_NR_tee:
11121         {
11122             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11123         }
11124         return ret;
11125 #endif
11126 #ifdef TARGET_NR_splice
11127     case TARGET_NR_splice:
11128         {
11129             loff_t loff_in, loff_out;
11130             loff_t *ploff_in = NULL, *ploff_out = NULL;
11131             if (arg2) {
11132                 if (get_user_u64(loff_in, arg2)) {
11133                     return -TARGET_EFAULT;
11134                 }
11135                 ploff_in = &loff_in;
11136             }
11137             if (arg4) {
11138                 if (get_user_u64(loff_out, arg4)) {
11139                     return -TARGET_EFAULT;
11140                 }
11141                 ploff_out = &loff_out;
11142             }
11143             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11144             if (arg2) {
11145                 if (put_user_u64(loff_in, arg2)) {
11146                     return -TARGET_EFAULT;
11147                 }
11148             }
11149             if (arg4) {
11150                 if (put_user_u64(loff_out, arg4)) {
11151                     return -TARGET_EFAULT;
11152                 }
11153             }
11154         }
11155         return ret;
11156 #endif
11157 #ifdef TARGET_NR_vmsplice
11158 	case TARGET_NR_vmsplice:
11159         {
11160             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11161             if (vec != NULL) {
11162                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11163                 unlock_iovec(vec, arg2, arg3, 0);
11164             } else {
11165                 ret = -host_to_target_errno(errno);
11166             }
11167         }
11168         return ret;
11169 #endif
11170 #endif /* CONFIG_SPLICE */
11171 #ifdef CONFIG_EVENTFD
11172 #if defined(TARGET_NR_eventfd)
11173     case TARGET_NR_eventfd:
11174         ret = get_errno(eventfd(arg1, 0));
11175         if (ret >= 0) {
11176             fd_trans_register(ret, &target_eventfd_trans);
11177         }
11178         return ret;
11179 #endif
11180 #if defined(TARGET_NR_eventfd2)
11181     case TARGET_NR_eventfd2:
11182     {
11183         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11184         if (arg2 & TARGET_O_NONBLOCK) {
11185             host_flags |= O_NONBLOCK;
11186         }
11187         if (arg2 & TARGET_O_CLOEXEC) {
11188             host_flags |= O_CLOEXEC;
11189         }
11190         ret = get_errno(eventfd(arg1, host_flags));
11191         if (ret >= 0) {
11192             fd_trans_register(ret, &target_eventfd_trans);
11193         }
11194         return ret;
11195     }
11196 #endif
11197 #endif /* CONFIG_EVENTFD  */
11198 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11199     case TARGET_NR_fallocate:
11200 #if TARGET_ABI_BITS == 32
11201         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11202                                   target_offset64(arg5, arg6)));
11203 #else
11204         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11205 #endif
11206         return ret;
11207 #endif
11208 #if defined(CONFIG_SYNC_FILE_RANGE)
11209 #if defined(TARGET_NR_sync_file_range)
11210     case TARGET_NR_sync_file_range:
11211 #if TARGET_ABI_BITS == 32
11212 #if defined(TARGET_MIPS)
11213         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11214                                         target_offset64(arg5, arg6), arg7));
11215 #else
11216         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11217                                         target_offset64(arg4, arg5), arg6));
11218 #endif /* !TARGET_MIPS */
11219 #else
11220         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11221 #endif
11222         return ret;
11223 #endif
11224 #if defined(TARGET_NR_sync_file_range2)
11225     case TARGET_NR_sync_file_range2:
11226         /* This is like sync_file_range but the arguments are reordered */
11227 #if TARGET_ABI_BITS == 32
11228         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11229                                         target_offset64(arg5, arg6), arg2));
11230 #else
11231         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11232 #endif
11233         return ret;
11234 #endif
11235 #endif
11236 #if defined(TARGET_NR_signalfd4)
11237     case TARGET_NR_signalfd4:
11238         return do_signalfd4(arg1, arg2, arg4);
11239 #endif
11240 #if defined(TARGET_NR_signalfd)
11241     case TARGET_NR_signalfd:
11242         return do_signalfd4(arg1, arg2, 0);
11243 #endif
11244 #if defined(CONFIG_EPOLL)
11245 #if defined(TARGET_NR_epoll_create)
11246     case TARGET_NR_epoll_create:
11247         return get_errno(epoll_create(arg1));
11248 #endif
11249 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11250     case TARGET_NR_epoll_create1:
11251         return get_errno(epoll_create1(arg1));
11252 #endif
11253 #if defined(TARGET_NR_epoll_ctl)
11254     case TARGET_NR_epoll_ctl:
11255     {
11256         struct epoll_event ep;
11257         struct epoll_event *epp = 0;
11258         if (arg4) {
11259             struct target_epoll_event *target_ep;
11260             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11261                 return -TARGET_EFAULT;
11262             }
11263             ep.events = tswap32(target_ep->events);
11264             /* The epoll_data_t union is just opaque data to the kernel,
11265              * so we transfer all 64 bits across and need not worry what
11266              * actual data type it is.
11267              */
11268             ep.data.u64 = tswap64(target_ep->data.u64);
11269             unlock_user_struct(target_ep, arg4, 0);
11270             epp = &ep;
11271         }
11272         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11273     }
11274 #endif
11275 
11276 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11277 #if defined(TARGET_NR_epoll_wait)
11278     case TARGET_NR_epoll_wait:
11279 #endif
11280 #if defined(TARGET_NR_epoll_pwait)
11281     case TARGET_NR_epoll_pwait:
11282 #endif
11283     {
11284         struct target_epoll_event *target_ep;
11285         struct epoll_event *ep;
11286         int epfd = arg1;
11287         int maxevents = arg3;
11288         int timeout = arg4;
11289 
11290         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11291             return -TARGET_EINVAL;
11292         }
11293 
11294         target_ep = lock_user(VERIFY_WRITE, arg2,
11295                               maxevents * sizeof(struct target_epoll_event), 1);
11296         if (!target_ep) {
11297             return -TARGET_EFAULT;
11298         }
11299 
11300         ep = g_try_new(struct epoll_event, maxevents);
11301         if (!ep) {
11302             unlock_user(target_ep, arg2, 0);
11303             return -TARGET_ENOMEM;
11304         }
11305 
11306         switch (num) {
11307 #if defined(TARGET_NR_epoll_pwait)
11308         case TARGET_NR_epoll_pwait:
11309         {
11310             target_sigset_t *target_set;
11311             sigset_t _set, *set = &_set;
11312 
11313             if (arg5) {
11314                 if (arg6 != sizeof(target_sigset_t)) {
11315                     ret = -TARGET_EINVAL;
11316                     break;
11317                 }
11318 
11319                 target_set = lock_user(VERIFY_READ, arg5,
11320                                        sizeof(target_sigset_t), 1);
11321                 if (!target_set) {
11322                     ret = -TARGET_EFAULT;
11323                     break;
11324                 }
11325                 target_to_host_sigset(set, target_set);
11326                 unlock_user(target_set, arg5, 0);
11327             } else {
11328                 set = NULL;
11329             }
11330 
11331             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11332                                              set, SIGSET_T_SIZE));
11333             break;
11334         }
11335 #endif
11336 #if defined(TARGET_NR_epoll_wait)
11337         case TARGET_NR_epoll_wait:
11338             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11339                                              NULL, 0));
11340             break;
11341 #endif
11342         default:
11343             ret = -TARGET_ENOSYS;
11344         }
11345         if (!is_error(ret)) {
11346             int i;
11347             for (i = 0; i < ret; i++) {
11348                 target_ep[i].events = tswap32(ep[i].events);
11349                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11350             }
11351             unlock_user(target_ep, arg2,
11352                         ret * sizeof(struct target_epoll_event));
11353         } else {
11354             unlock_user(target_ep, arg2, 0);
11355         }
11356         g_free(ep);
11357         return ret;
11358     }
11359 #endif
11360 #endif
11361 #ifdef TARGET_NR_prlimit64
11362     case TARGET_NR_prlimit64:
11363     {
11364         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11365         struct target_rlimit64 *target_rnew, *target_rold;
11366         struct host_rlimit64 rnew, rold, *rnewp = 0;
11367         int resource = target_to_host_resource(arg2);
11368         if (arg3) {
11369             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11370                 return -TARGET_EFAULT;
11371             }
11372             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11373             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11374             unlock_user_struct(target_rnew, arg3, 0);
11375             rnewp = &rnew;
11376         }
11377 
11378         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11379         if (!is_error(ret) && arg4) {
11380             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11381                 return -TARGET_EFAULT;
11382             }
11383             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11384             target_rold->rlim_max = tswap64(rold.rlim_max);
11385             unlock_user_struct(target_rold, arg4, 1);
11386         }
11387         return ret;
11388     }
11389 #endif
11390 #ifdef TARGET_NR_gethostname
11391     case TARGET_NR_gethostname:
11392     {
11393         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11394         if (name) {
11395             ret = get_errno(gethostname(name, arg2));
11396             unlock_user(name, arg1, arg2);
11397         } else {
11398             ret = -TARGET_EFAULT;
11399         }
11400         return ret;
11401     }
11402 #endif
11403 #ifdef TARGET_NR_atomic_cmpxchg_32
11404     case TARGET_NR_atomic_cmpxchg_32:
11405     {
11406         /* should use start_exclusive from main.c */
11407         abi_ulong mem_value;
11408         if (get_user_u32(mem_value, arg6)) {
11409             target_siginfo_t info;
11410             info.si_signo = SIGSEGV;
11411             info.si_errno = 0;
11412             info.si_code = TARGET_SEGV_MAPERR;
11413             info._sifields._sigfault._addr = arg6;
11414             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11415                          QEMU_SI_FAULT, &info);
11416             ret = 0xdeadbeef;
11417 
11418         }
11419         if (mem_value == arg2)
11420             put_user_u32(arg1, arg6);
11421         return mem_value;
11422     }
11423 #endif
11424 #ifdef TARGET_NR_atomic_barrier
11425     case TARGET_NR_atomic_barrier:
11426         /* Like the kernel implementation and the
11427            qemu arm barrier, no-op this? */
11428         return 0;
11429 #endif
11430 
11431 #ifdef TARGET_NR_timer_create
11432     case TARGET_NR_timer_create:
11433     {
11434         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11435 
11436         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11437 
11438         int clkid = arg1;
11439         int timer_index = next_free_host_timer();
11440 
11441         if (timer_index < 0) {
11442             ret = -TARGET_EAGAIN;
11443         } else {
11444             timer_t *phtimer = g_posix_timers  + timer_index;
11445 
11446             if (arg2) {
11447                 phost_sevp = &host_sevp;
11448                 ret = target_to_host_sigevent(phost_sevp, arg2);
11449                 if (ret != 0) {
11450                     return ret;
11451                 }
11452             }
11453 
11454             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11455             if (ret) {
11456                 phtimer = NULL;
11457             } else {
11458                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11459                     return -TARGET_EFAULT;
11460                 }
11461             }
11462         }
11463         return ret;
11464     }
11465 #endif
11466 
11467 #ifdef TARGET_NR_timer_settime
11468     case TARGET_NR_timer_settime:
11469     {
11470         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11471          * struct itimerspec * old_value */
11472         target_timer_t timerid = get_timer_id(arg1);
11473 
11474         if (timerid < 0) {
11475             ret = timerid;
11476         } else if (arg3 == 0) {
11477             ret = -TARGET_EINVAL;
11478         } else {
11479             timer_t htimer = g_posix_timers[timerid];
11480             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11481 
11482             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11483                 return -TARGET_EFAULT;
11484             }
11485             ret = get_errno(
11486                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11487             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11488                 return -TARGET_EFAULT;
11489             }
11490         }
11491         return ret;
11492     }
11493 #endif
11494 
11495 #ifdef TARGET_NR_timer_gettime
11496     case TARGET_NR_timer_gettime:
11497     {
11498         /* args: timer_t timerid, struct itimerspec *curr_value */
11499         target_timer_t timerid = get_timer_id(arg1);
11500 
11501         if (timerid < 0) {
11502             ret = timerid;
11503         } else if (!arg2) {
11504             ret = -TARGET_EFAULT;
11505         } else {
11506             timer_t htimer = g_posix_timers[timerid];
11507             struct itimerspec hspec;
11508             ret = get_errno(timer_gettime(htimer, &hspec));
11509 
11510             if (host_to_target_itimerspec(arg2, &hspec)) {
11511                 ret = -TARGET_EFAULT;
11512             }
11513         }
11514         return ret;
11515     }
11516 #endif
11517 
11518 #ifdef TARGET_NR_timer_getoverrun
11519     case TARGET_NR_timer_getoverrun:
11520     {
11521         /* args: timer_t timerid */
11522         target_timer_t timerid = get_timer_id(arg1);
11523 
11524         if (timerid < 0) {
11525             ret = timerid;
11526         } else {
11527             timer_t htimer = g_posix_timers[timerid];
11528             ret = get_errno(timer_getoverrun(htimer));
11529         }
11530         fd_trans_unregister(ret);
11531         return ret;
11532     }
11533 #endif
11534 
11535 #ifdef TARGET_NR_timer_delete
11536     case TARGET_NR_timer_delete:
11537     {
11538         /* args: timer_t timerid */
11539         target_timer_t timerid = get_timer_id(arg1);
11540 
11541         if (timerid < 0) {
11542             ret = timerid;
11543         } else {
11544             timer_t htimer = g_posix_timers[timerid];
11545             ret = get_errno(timer_delete(htimer));
11546             g_posix_timers[timerid] = 0;
11547         }
11548         return ret;
11549     }
11550 #endif
11551 
11552 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11553     case TARGET_NR_timerfd_create:
11554         return get_errno(timerfd_create(arg1,
11555                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11556 #endif
11557 
11558 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11559     case TARGET_NR_timerfd_gettime:
11560         {
11561             struct itimerspec its_curr;
11562 
11563             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11564 
11565             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11566                 return -TARGET_EFAULT;
11567             }
11568         }
11569         return ret;
11570 #endif
11571 
11572 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11573     case TARGET_NR_timerfd_settime:
11574         {
11575             struct itimerspec its_new, its_old, *p_new;
11576 
11577             if (arg3) {
11578                 if (target_to_host_itimerspec(&its_new, arg3)) {
11579                     return -TARGET_EFAULT;
11580                 }
11581                 p_new = &its_new;
11582             } else {
11583                 p_new = NULL;
11584             }
11585 
11586             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11587 
11588             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11589                 return -TARGET_EFAULT;
11590             }
11591         }
11592         return ret;
11593 #endif
11594 
11595 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11596     case TARGET_NR_ioprio_get:
11597         return get_errno(ioprio_get(arg1, arg2));
11598 #endif
11599 
11600 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11601     case TARGET_NR_ioprio_set:
11602         return get_errno(ioprio_set(arg1, arg2, arg3));
11603 #endif
11604 
11605 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11606     case TARGET_NR_setns:
11607         return get_errno(setns(arg1, arg2));
11608 #endif
11609 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11610     case TARGET_NR_unshare:
11611         return get_errno(unshare(arg1));
11612 #endif
11613 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11614     case TARGET_NR_kcmp:
11615         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11616 #endif
11617 #ifdef TARGET_NR_swapcontext
11618     case TARGET_NR_swapcontext:
11619         /* PowerPC specific.  */
11620         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11621 #endif
11622 
11623     default:
11624         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11625         return -TARGET_ENOSYS;
11626     }
11627     return ret;
11628 }
11629 
11630 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11631                     abi_long arg2, abi_long arg3, abi_long arg4,
11632                     abi_long arg5, abi_long arg6, abi_long arg7,
11633                     abi_long arg8)
11634 {
11635     CPUState *cpu = ENV_GET_CPU(cpu_env);
11636     abi_long ret;
11637 
11638 #ifdef DEBUG_ERESTARTSYS
11639     /* Debug-only code for exercising the syscall-restart code paths
11640      * in the per-architecture cpu main loops: restart every syscall
11641      * the guest makes once before letting it through.
11642      */
11643     {
11644         static bool flag;
11645         flag = !flag;
11646         if (flag) {
11647             return -TARGET_ERESTARTSYS;
11648         }
11649     }
11650 #endif
11651 
11652     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11653                              arg5, arg6, arg7, arg8);
11654 
11655     if (unlikely(do_strace)) {
11656         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11657         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11658                           arg5, arg6, arg7, arg8);
11659         print_syscall_ret(num, ret);
11660     } else {
11661         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11662                           arg5, arg6, arg7, arg8);
11663     }
11664 
11665     trace_guest_user_syscall_ret(cpu, num, ret);
11666     return ret;
11667 }
11668