xref: /openbmc/qemu/linux-user/syscall.c (revision 8dc7fd56)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
113 
114 #ifndef CLONE_IO
115 #define CLONE_IO                0x80000000      /* Clone io context */
116 #endif
117 
118 /* We can't directly call the host clone syscall, because this will
119  * badly confuse libc (breaking mutexes, for example). So we must
120  * divide clone flags into:
121  *  * flag combinations that look like pthread_create()
122  *  * flag combinations that look like fork()
123  *  * flags we can implement within QEMU itself
124  *  * flags we can't support and will return an error for
125  */
126 /* For thread creation, all these flags must be present; for
127  * fork, none must be present.
128  */
129 #define CLONE_THREAD_FLAGS                              \
130     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
131      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
132 
133 /* These flags are ignored:
134  * CLONE_DETACHED is now ignored by the kernel;
135  * CLONE_IO is just an optimisation hint to the I/O scheduler
136  */
137 #define CLONE_IGNORED_FLAGS                     \
138     (CLONE_DETACHED | CLONE_IO)
139 
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS               \
142     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
143      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
144 
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
149 
150 #define CLONE_INVALID_FORK_FLAGS                                        \
151     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
152 
153 #define CLONE_INVALID_THREAD_FLAGS                                      \
154     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
155        CLONE_IGNORED_FLAGS))
156 
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158  * have almost all been allocated. We cannot support any of
159  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161  * The checks against the invalid thread masks above will catch these.
162  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
163  */
164 
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166  * once. This exercises the codepaths for restart.
167  */
168 //#define DEBUG_ERESTARTSYS
169 
170 //#include <linux/msdos_fs.h>
171 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
172 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
173 
174 #undef _syscall0
175 #undef _syscall1
176 #undef _syscall2
177 #undef _syscall3
178 #undef _syscall4
179 #undef _syscall5
180 #undef _syscall6
181 
182 #define _syscall0(type,name)		\
183 static type name (void)			\
184 {					\
185 	return syscall(__NR_##name);	\
186 }
187 
188 #define _syscall1(type,name,type1,arg1)		\
189 static type name (type1 arg1)			\
190 {						\
191 	return syscall(__NR_##name, arg1);	\
192 }
193 
194 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
195 static type name (type1 arg1,type2 arg2)		\
196 {							\
197 	return syscall(__NR_##name, arg1, arg2);	\
198 }
199 
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
201 static type name (type1 arg1,type2 arg2,type3 arg3)		\
202 {								\
203 	return syscall(__NR_##name, arg1, arg2, arg3);		\
204 }
205 
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
208 {										\
209 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
210 }
211 
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
213 		  type5,arg5)							\
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
215 {										\
216 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
217 }
218 
219 
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
221 		  type5,arg5,type6,arg6)					\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
223                   type6 arg6)							\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
226 }
227 
228 
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
241 
242 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
243 #define __NR__llseek __NR_lseek
244 #endif
245 
246 /* Newer kernel ports have llseek() instead of _llseek() */
247 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
248 #define TARGET_NR__llseek TARGET_NR_llseek
249 #endif
250 
251 #define __NR_sys_gettid __NR_gettid
252 _syscall0(int, sys_gettid)
253 
254 /* For the 64-bit guest on 32-bit host case we must emulate
255  * getdents using getdents64, because otherwise the host
256  * might hand us back more dirent records than we can fit
257  * into the guest buffer after structure format conversion.
258  * Otherwise we emulate getdents with getdents if the host has it.
259  */
260 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
261 #define EMULATE_GETDENTS_WITH_GETDENTS
262 #endif
263 
264 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #endif
267 #if (defined(TARGET_NR_getdents) && \
268       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
269     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
270 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
271 #endif
272 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
273 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
274           loff_t *, res, uint, wh);
275 #endif
276 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
277 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
278           siginfo_t *, uinfo)
279 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
280 #ifdef __NR_exit_group
281 _syscall1(int,exit_group,int,error_code)
282 #endif
283 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
284 _syscall1(int,set_tid_address,int *,tidptr)
285 #endif
286 #if defined(TARGET_NR_futex) && defined(__NR_futex)
287 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
288           const struct timespec *,timeout,int *,uaddr2,int,val3)
289 #endif
290 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
291 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
292           unsigned long *, user_mask_ptr);
293 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
294 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
295           unsigned long *, user_mask_ptr);
296 #define __NR_sys_getcpu __NR_getcpu
297 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
298 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
299           void *, arg);
300 _syscall2(int, capget, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 _syscall2(int, capset, struct __user_cap_header_struct *, header,
303           struct __user_cap_data_struct *, data);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get, int, which, int, who)
306 #endif
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
309 #endif
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
312 #endif
313 
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
316           unsigned long, idx1, unsigned long, idx2)
317 #endif
318 
319 static bitmask_transtbl fcntl_flags_tbl[] = {
320   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
321   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
322   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
323   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
324   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
325   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
326   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
327   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
328   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
329   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
330   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
331   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
332   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
333 #if defined(O_DIRECT)
334   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
335 #endif
336 #if defined(O_NOATIME)
337   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
338 #endif
339 #if defined(O_CLOEXEC)
340   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
341 #endif
342 #if defined(O_PATH)
343   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
344 #endif
345 #if defined(O_TMPFILE)
346   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
347 #endif
348   /* Don't terminate the list prematurely on 64-bit host+guest.  */
349 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
350   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
351 #endif
352   { 0, 0, 0, 0 }
353 };
354 
355 static int sys_getcwd1(char *buf, size_t size)
356 {
357   if (getcwd(buf, size) == NULL) {
358       /* getcwd() sets errno */
359       return (-1);
360   }
361   return strlen(buf)+1;
362 }
363 
364 #ifdef TARGET_NR_utimensat
365 #if defined(__NR_utimensat)
366 #define __NR_sys_utimensat __NR_utimensat
367 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
368           const struct timespec *,tsp,int,flags)
369 #else
370 static int sys_utimensat(int dirfd, const char *pathname,
371                          const struct timespec times[2], int flags)
372 {
373     errno = ENOSYS;
374     return -1;
375 }
376 #endif
377 #endif /* TARGET_NR_utimensat */
378 
379 #ifdef TARGET_NR_renameat2
380 #if defined(__NR_renameat2)
381 #define __NR_sys_renameat2 __NR_renameat2
382 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
383           const char *, new, unsigned int, flags)
384 #else
385 static int sys_renameat2(int oldfd, const char *old,
386                          int newfd, const char *new, int flags)
387 {
388     if (flags == 0) {
389         return renameat(oldfd, old, newfd, new);
390     }
391     errno = ENOSYS;
392     return -1;
393 }
394 #endif
395 #endif /* TARGET_NR_renameat2 */
396 
397 #ifdef CONFIG_INOTIFY
398 #include <sys/inotify.h>
399 
400 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
401 static int sys_inotify_init(void)
402 {
403   return (inotify_init());
404 }
405 #endif
406 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
407 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
408 {
409   return (inotify_add_watch(fd, pathname, mask));
410 }
411 #endif
412 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
413 static int sys_inotify_rm_watch(int fd, int32_t wd)
414 {
415   return (inotify_rm_watch(fd, wd));
416 }
417 #endif
418 #ifdef CONFIG_INOTIFY1
419 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
420 static int sys_inotify_init1(int flags)
421 {
422   return (inotify_init1(flags));
423 }
424 #endif
425 #endif
426 #else
427 /* Userspace can usually survive runtime without inotify */
428 #undef TARGET_NR_inotify_init
429 #undef TARGET_NR_inotify_init1
430 #undef TARGET_NR_inotify_add_watch
431 #undef TARGET_NR_inotify_rm_watch
432 #endif /* CONFIG_INOTIFY  */
433 
434 #if defined(TARGET_NR_prlimit64)
435 #ifndef __NR_prlimit64
436 # define __NR_prlimit64 -1
437 #endif
438 #define __NR_sys_prlimit64 __NR_prlimit64
439 /* The glibc rlimit structure may not be that used by the underlying syscall */
440 struct host_rlimit64 {
441     uint64_t rlim_cur;
442     uint64_t rlim_max;
443 };
444 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
445           const struct host_rlimit64 *, new_limit,
446           struct host_rlimit64 *, old_limit)
447 #endif
448 
449 
450 #if defined(TARGET_NR_timer_create)
451 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
452 static timer_t g_posix_timers[32] = { 0, } ;
453 
454 static inline int next_free_host_timer(void)
455 {
456     int k ;
457     /* FIXME: Does finding the next free slot require a lock? */
458     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
459         if (g_posix_timers[k] == 0) {
460             g_posix_timers[k] = (timer_t) 1;
461             return k;
462         }
463     }
464     return -1;
465 }
466 #endif
467 
468 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
469 #ifdef TARGET_ARM
470 static inline int regpairs_aligned(void *cpu_env, int num)
471 {
472     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
473 }
474 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
475 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
476 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
477 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
478  * of registers which translates to the same as ARM/MIPS, because we start with
479  * r3 as arg1 */
480 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
481 #elif defined(TARGET_SH4)
482 /* SH4 doesn't align register pairs, except for p{read,write}64 */
483 static inline int regpairs_aligned(void *cpu_env, int num)
484 {
485     switch (num) {
486     case TARGET_NR_pread64:
487     case TARGET_NR_pwrite64:
488         return 1;
489 
490     default:
491         return 0;
492     }
493 }
494 #elif defined(TARGET_XTENSA)
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
496 #else
497 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
498 #endif
499 
500 #define ERRNO_TABLE_SIZE 1200
501 
502 /* target_to_host_errno_table[] is initialized from
503  * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
505 };
506 
507 /*
508  * This list is the union of errno values overridden in asm-<arch>/errno.h
509  * minus the errnos that are not actually generic to all archs.
510  */
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512     [EAGAIN]		= TARGET_EAGAIN,
513     [EIDRM]		= TARGET_EIDRM,
514     [ECHRNG]		= TARGET_ECHRNG,
515     [EL2NSYNC]		= TARGET_EL2NSYNC,
516     [EL3HLT]		= TARGET_EL3HLT,
517     [EL3RST]		= TARGET_EL3RST,
518     [ELNRNG]		= TARGET_ELNRNG,
519     [EUNATCH]		= TARGET_EUNATCH,
520     [ENOCSI]		= TARGET_ENOCSI,
521     [EL2HLT]		= TARGET_EL2HLT,
522     [EDEADLK]		= TARGET_EDEADLK,
523     [ENOLCK]		= TARGET_ENOLCK,
524     [EBADE]		= TARGET_EBADE,
525     [EBADR]		= TARGET_EBADR,
526     [EXFULL]		= TARGET_EXFULL,
527     [ENOANO]		= TARGET_ENOANO,
528     [EBADRQC]		= TARGET_EBADRQC,
529     [EBADSLT]		= TARGET_EBADSLT,
530     [EBFONT]		= TARGET_EBFONT,
531     [ENOSTR]		= TARGET_ENOSTR,
532     [ENODATA]		= TARGET_ENODATA,
533     [ETIME]		= TARGET_ETIME,
534     [ENOSR]		= TARGET_ENOSR,
535     [ENONET]		= TARGET_ENONET,
536     [ENOPKG]		= TARGET_ENOPKG,
537     [EREMOTE]		= TARGET_EREMOTE,
538     [ENOLINK]		= TARGET_ENOLINK,
539     [EADV]		= TARGET_EADV,
540     [ESRMNT]		= TARGET_ESRMNT,
541     [ECOMM]		= TARGET_ECOMM,
542     [EPROTO]		= TARGET_EPROTO,
543     [EDOTDOT]		= TARGET_EDOTDOT,
544     [EMULTIHOP]		= TARGET_EMULTIHOP,
545     [EBADMSG]		= TARGET_EBADMSG,
546     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
547     [EOVERFLOW]		= TARGET_EOVERFLOW,
548     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
549     [EBADFD]		= TARGET_EBADFD,
550     [EREMCHG]		= TARGET_EREMCHG,
551     [ELIBACC]		= TARGET_ELIBACC,
552     [ELIBBAD]		= TARGET_ELIBBAD,
553     [ELIBSCN]		= TARGET_ELIBSCN,
554     [ELIBMAX]		= TARGET_ELIBMAX,
555     [ELIBEXEC]		= TARGET_ELIBEXEC,
556     [EILSEQ]		= TARGET_EILSEQ,
557     [ENOSYS]		= TARGET_ENOSYS,
558     [ELOOP]		= TARGET_ELOOP,
559     [ERESTART]		= TARGET_ERESTART,
560     [ESTRPIPE]		= TARGET_ESTRPIPE,
561     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
562     [EUSERS]		= TARGET_EUSERS,
563     [ENOTSOCK]		= TARGET_ENOTSOCK,
564     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
565     [EMSGSIZE]		= TARGET_EMSGSIZE,
566     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
567     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
568     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
569     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
570     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
571     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
572     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
573     [EADDRINUSE]	= TARGET_EADDRINUSE,
574     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
575     [ENETDOWN]		= TARGET_ENETDOWN,
576     [ENETUNREACH]	= TARGET_ENETUNREACH,
577     [ENETRESET]		= TARGET_ENETRESET,
578     [ECONNABORTED]	= TARGET_ECONNABORTED,
579     [ECONNRESET]	= TARGET_ECONNRESET,
580     [ENOBUFS]		= TARGET_ENOBUFS,
581     [EISCONN]		= TARGET_EISCONN,
582     [ENOTCONN]		= TARGET_ENOTCONN,
583     [EUCLEAN]		= TARGET_EUCLEAN,
584     [ENOTNAM]		= TARGET_ENOTNAM,
585     [ENAVAIL]		= TARGET_ENAVAIL,
586     [EISNAM]		= TARGET_EISNAM,
587     [EREMOTEIO]		= TARGET_EREMOTEIO,
588     [EDQUOT]            = TARGET_EDQUOT,
589     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
590     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
591     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
592     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
593     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
594     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
595     [EALREADY]		= TARGET_EALREADY,
596     [EINPROGRESS]	= TARGET_EINPROGRESS,
597     [ESTALE]		= TARGET_ESTALE,
598     [ECANCELED]		= TARGET_ECANCELED,
599     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
600     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
601 #ifdef ENOKEY
602     [ENOKEY]		= TARGET_ENOKEY,
603 #endif
604 #ifdef EKEYEXPIRED
605     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
606 #endif
607 #ifdef EKEYREVOKED
608     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
609 #endif
610 #ifdef EKEYREJECTED
611     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
612 #endif
613 #ifdef EOWNERDEAD
614     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
615 #endif
616 #ifdef ENOTRECOVERABLE
617     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
618 #endif
619 #ifdef ENOMSG
620     [ENOMSG]            = TARGET_ENOMSG,
621 #endif
622 #ifdef ERKFILL
623     [ERFKILL]           = TARGET_ERFKILL,
624 #endif
625 #ifdef EHWPOISON
626     [EHWPOISON]         = TARGET_EHWPOISON,
627 #endif
628 };
629 
630 static inline int host_to_target_errno(int err)
631 {
632     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
633         host_to_target_errno_table[err]) {
634         return host_to_target_errno_table[err];
635     }
636     return err;
637 }
638 
639 static inline int target_to_host_errno(int err)
640 {
641     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
642         target_to_host_errno_table[err]) {
643         return target_to_host_errno_table[err];
644     }
645     return err;
646 }
647 
648 static inline abi_long get_errno(abi_long ret)
649 {
650     if (ret == -1)
651         return -host_to_target_errno(errno);
652     else
653         return ret;
654 }
655 
656 const char *target_strerror(int err)
657 {
658     if (err == TARGET_ERESTARTSYS) {
659         return "To be restarted";
660     }
661     if (err == TARGET_QEMU_ESIGRETURN) {
662         return "Successful exit from sigreturn";
663     }
664 
665     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
666         return NULL;
667     }
668     return strerror(target_to_host_errno(err));
669 }
670 
671 #define safe_syscall0(type, name) \
672 static type safe_##name(void) \
673 { \
674     return safe_syscall(__NR_##name); \
675 }
676 
677 #define safe_syscall1(type, name, type1, arg1) \
678 static type safe_##name(type1 arg1) \
679 { \
680     return safe_syscall(__NR_##name, arg1); \
681 }
682 
683 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
684 static type safe_##name(type1 arg1, type2 arg2) \
685 { \
686     return safe_syscall(__NR_##name, arg1, arg2); \
687 }
688 
689 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
690 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
691 { \
692     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 }
694 
695 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
696     type4, arg4) \
697 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
698 { \
699     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 }
701 
702 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
703     type4, arg4, type5, arg5) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
705     type5 arg5) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 }
709 
710 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5, type6, arg6) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5, type6 arg6) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 }
717 
718 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
719 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
720 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
721               int, flags, mode_t, mode)
722 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
723               struct rusage *, rusage)
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
728               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
729 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
730               struct timespec *, tsp, const sigset_t *, sigmask,
731               size_t, sigsetsize)
732 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
733               int, maxevents, int, timeout, const sigset_t *, sigmask,
734               size_t, sigsetsize)
735 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
736               const struct timespec *,timeout,int *,uaddr2,int,val3)
737 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
738 safe_syscall2(int, kill, pid_t, pid, int, sig)
739 safe_syscall2(int, tkill, int, tid, int, sig)
740 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
741 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
743 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
746               unsigned long, pos_l, unsigned long, pos_h)
747 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
748               socklen_t, addrlen)
749 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
750               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
751 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
752               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
753 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
754 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
755 safe_syscall2(int, flock, int, fd, int, operation)
756 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
757               const struct timespec *, uts, size_t, sigsetsize)
758 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
759               int, flags)
760 safe_syscall2(int, nanosleep, const struct timespec *, req,
761               struct timespec *, rem)
762 #ifdef TARGET_NR_clock_nanosleep
763 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
764               const struct timespec *, req, struct timespec *, rem)
765 #endif
766 #ifdef __NR_msgsnd
767 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
768               int, flags)
769 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
770               long, msgtype, int, flags)
771 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
772               unsigned, nsops, const struct timespec *, timeout)
773 #else
774 /* This host kernel architecture uses a single ipc syscall; fake up
775  * wrappers for the sub-operations to hide this implementation detail.
776  * Annoyingly we can't include linux/ipc.h to get the constant definitions
777  * for the call parameter because some structs in there conflict with the
778  * sys/ipc.h ones. So we just define them here, and rely on them being
779  * the same for all host architectures.
780  */
781 #define Q_SEMTIMEDOP 4
782 #define Q_MSGSND 11
783 #define Q_MSGRCV 12
784 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
785 
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787               void *, ptr, long, fifth)
788 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
789 {
790     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
791 }
792 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
793 {
794     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
795 }
796 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
797                            const struct timespec *timeout)
798 {
799     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
800                     (long)timeout);
801 }
802 #endif
803 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
804 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
805               size_t, len, unsigned, prio, const struct timespec *, timeout)
806 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
807               size_t, len, unsigned *, prio, const struct timespec *, timeout)
808 #endif
809 /* We do ioctl like this rather than via safe_syscall3 to preserve the
810  * "third argument might be integer or pointer or not present" behaviour of
811  * the libc function.
812  */
813 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
814 /* Similarly for fcntl. Note that callers must always:
815  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
816  *  use the flock64 struct rather than unsuffixed flock
817  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
818  */
819 #ifdef __NR_fcntl64
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
821 #else
822 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
823 #endif
824 
825 static inline int host_to_target_sock_type(int host_type)
826 {
827     int target_type;
828 
829     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
830     case SOCK_DGRAM:
831         target_type = TARGET_SOCK_DGRAM;
832         break;
833     case SOCK_STREAM:
834         target_type = TARGET_SOCK_STREAM;
835         break;
836     default:
837         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
838         break;
839     }
840 
841 #if defined(SOCK_CLOEXEC)
842     if (host_type & SOCK_CLOEXEC) {
843         target_type |= TARGET_SOCK_CLOEXEC;
844     }
845 #endif
846 
847 #if defined(SOCK_NONBLOCK)
848     if (host_type & SOCK_NONBLOCK) {
849         target_type |= TARGET_SOCK_NONBLOCK;
850     }
851 #endif
852 
853     return target_type;
854 }
855 
856 static abi_ulong target_brk;
857 static abi_ulong target_original_brk;
858 static abi_ulong brk_page;
859 
860 void target_set_brk(abi_ulong new_brk)
861 {
862     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
863     brk_page = HOST_PAGE_ALIGN(target_brk);
864 }
865 
866 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
867 #define DEBUGF_BRK(message, args...)
868 
869 /* do_brk() must return target values and target errnos. */
870 abi_long do_brk(abi_ulong new_brk)
871 {
872     abi_long mapped_addr;
873     abi_ulong new_alloc_size;
874 
875     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
876 
877     if (!new_brk) {
878         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
879         return target_brk;
880     }
881     if (new_brk < target_original_brk) {
882         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
883                    target_brk);
884         return target_brk;
885     }
886 
887     /* If the new brk is less than the highest page reserved to the
888      * target heap allocation, set it and we're almost done...  */
889     if (new_brk <= brk_page) {
890         /* Heap contents are initialized to zero, as for anonymous
891          * mapped pages.  */
892         if (new_brk > target_brk) {
893             memset(g2h(target_brk), 0, new_brk - target_brk);
894         }
895 	target_brk = new_brk;
896         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
897 	return target_brk;
898     }
899 
900     /* We need to allocate more memory after the brk... Note that
901      * we don't use MAP_FIXED because that will map over the top of
902      * any existing mapping (like the one with the host libc or qemu
903      * itself); instead we treat "mapped but at wrong address" as
904      * a failure and unmap again.
905      */
906     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
907     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
908                                         PROT_READ|PROT_WRITE,
909                                         MAP_ANON|MAP_PRIVATE, 0, 0));
910 
911     if (mapped_addr == brk_page) {
912         /* Heap contents are initialized to zero, as for anonymous
913          * mapped pages.  Technically the new pages are already
914          * initialized to zero since they *are* anonymous mapped
915          * pages, however we have to take care with the contents that
916          * come from the remaining part of the previous page: it may
917          * contains garbage data due to a previous heap usage (grown
918          * then shrunken).  */
919         memset(g2h(target_brk), 0, brk_page - target_brk);
920 
921         target_brk = new_brk;
922         brk_page = HOST_PAGE_ALIGN(target_brk);
923         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
924             target_brk);
925         return target_brk;
926     } else if (mapped_addr != -1) {
927         /* Mapped but at wrong address, meaning there wasn't actually
928          * enough space for this brk.
929          */
930         target_munmap(mapped_addr, new_alloc_size);
931         mapped_addr = -1;
932         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
933     }
934     else {
935         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
936     }
937 
938 #if defined(TARGET_ALPHA)
939     /* We (partially) emulate OSF/1 on Alpha, which requires we
940        return a proper errno, not an unchanged brk value.  */
941     return -TARGET_ENOMEM;
942 #endif
943     /* For everything else, return the previous break. */
944     return target_brk;
945 }
946 
947 static inline abi_long copy_from_user_fdset(fd_set *fds,
948                                             abi_ulong target_fds_addr,
949                                             int n)
950 {
951     int i, nw, j, k;
952     abi_ulong b, *target_fds;
953 
954     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
955     if (!(target_fds = lock_user(VERIFY_READ,
956                                  target_fds_addr,
957                                  sizeof(abi_ulong) * nw,
958                                  1)))
959         return -TARGET_EFAULT;
960 
961     FD_ZERO(fds);
962     k = 0;
963     for (i = 0; i < nw; i++) {
964         /* grab the abi_ulong */
965         __get_user(b, &target_fds[i]);
966         for (j = 0; j < TARGET_ABI_BITS; j++) {
967             /* check the bit inside the abi_ulong */
968             if ((b >> j) & 1)
969                 FD_SET(k, fds);
970             k++;
971         }
972     }
973 
974     unlock_user(target_fds, target_fds_addr, 0);
975 
976     return 0;
977 }
978 
979 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
980                                                  abi_ulong target_fds_addr,
981                                                  int n)
982 {
983     if (target_fds_addr) {
984         if (copy_from_user_fdset(fds, target_fds_addr, n))
985             return -TARGET_EFAULT;
986         *fds_ptr = fds;
987     } else {
988         *fds_ptr = NULL;
989     }
990     return 0;
991 }
992 
993 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
994                                           const fd_set *fds,
995                                           int n)
996 {
997     int i, nw, j, k;
998     abi_long v;
999     abi_ulong *target_fds;
1000 
1001     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1002     if (!(target_fds = lock_user(VERIFY_WRITE,
1003                                  target_fds_addr,
1004                                  sizeof(abi_ulong) * nw,
1005                                  0)))
1006         return -TARGET_EFAULT;
1007 
1008     k = 0;
1009     for (i = 0; i < nw; i++) {
1010         v = 0;
1011         for (j = 0; j < TARGET_ABI_BITS; j++) {
1012             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1013             k++;
1014         }
1015         __put_user(v, &target_fds[i]);
1016     }
1017 
1018     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1019 
1020     return 0;
1021 }
1022 
1023 #if defined(__alpha__)
1024 #define HOST_HZ 1024
1025 #else
1026 #define HOST_HZ 100
1027 #endif
1028 
1029 static inline abi_long host_to_target_clock_t(long ticks)
1030 {
1031 #if HOST_HZ == TARGET_HZ
1032     return ticks;
1033 #else
1034     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1035 #endif
1036 }
1037 
1038 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1039                                              const struct rusage *rusage)
1040 {
1041     struct target_rusage *target_rusage;
1042 
1043     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1044         return -TARGET_EFAULT;
1045     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1046     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1047     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1048     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1049     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1050     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1051     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1052     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1053     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1054     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1055     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1056     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1057     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1058     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1059     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1060     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1061     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1062     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1063     unlock_user_struct(target_rusage, target_addr, 1);
1064 
1065     return 0;
1066 }
1067 
1068 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1069 {
1070     abi_ulong target_rlim_swap;
1071     rlim_t result;
1072 
1073     target_rlim_swap = tswapal(target_rlim);
1074     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1075         return RLIM_INFINITY;
1076 
1077     result = target_rlim_swap;
1078     if (target_rlim_swap != (rlim_t)result)
1079         return RLIM_INFINITY;
1080 
1081     return result;
1082 }
1083 
1084 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1085 {
1086     abi_ulong target_rlim_swap;
1087     abi_ulong result;
1088 
1089     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1090         target_rlim_swap = TARGET_RLIM_INFINITY;
1091     else
1092         target_rlim_swap = rlim;
1093     result = tswapal(target_rlim_swap);
1094 
1095     return result;
1096 }
1097 
1098 static inline int target_to_host_resource(int code)
1099 {
1100     switch (code) {
1101     case TARGET_RLIMIT_AS:
1102         return RLIMIT_AS;
1103     case TARGET_RLIMIT_CORE:
1104         return RLIMIT_CORE;
1105     case TARGET_RLIMIT_CPU:
1106         return RLIMIT_CPU;
1107     case TARGET_RLIMIT_DATA:
1108         return RLIMIT_DATA;
1109     case TARGET_RLIMIT_FSIZE:
1110         return RLIMIT_FSIZE;
1111     case TARGET_RLIMIT_LOCKS:
1112         return RLIMIT_LOCKS;
1113     case TARGET_RLIMIT_MEMLOCK:
1114         return RLIMIT_MEMLOCK;
1115     case TARGET_RLIMIT_MSGQUEUE:
1116         return RLIMIT_MSGQUEUE;
1117     case TARGET_RLIMIT_NICE:
1118         return RLIMIT_NICE;
1119     case TARGET_RLIMIT_NOFILE:
1120         return RLIMIT_NOFILE;
1121     case TARGET_RLIMIT_NPROC:
1122         return RLIMIT_NPROC;
1123     case TARGET_RLIMIT_RSS:
1124         return RLIMIT_RSS;
1125     case TARGET_RLIMIT_RTPRIO:
1126         return RLIMIT_RTPRIO;
1127     case TARGET_RLIMIT_SIGPENDING:
1128         return RLIMIT_SIGPENDING;
1129     case TARGET_RLIMIT_STACK:
1130         return RLIMIT_STACK;
1131     default:
1132         return code;
1133     }
1134 }
1135 
1136 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1137                                               abi_ulong target_tv_addr)
1138 {
1139     struct target_timeval *target_tv;
1140 
1141     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1142         return -TARGET_EFAULT;
1143 
1144     __get_user(tv->tv_sec, &target_tv->tv_sec);
1145     __get_user(tv->tv_usec, &target_tv->tv_usec);
1146 
1147     unlock_user_struct(target_tv, target_tv_addr, 0);
1148 
1149     return 0;
1150 }
1151 
1152 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1153                                             const struct timeval *tv)
1154 {
1155     struct target_timeval *target_tv;
1156 
1157     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1158         return -TARGET_EFAULT;
1159 
1160     __put_user(tv->tv_sec, &target_tv->tv_sec);
1161     __put_user(tv->tv_usec, &target_tv->tv_usec);
1162 
1163     unlock_user_struct(target_tv, target_tv_addr, 1);
1164 
1165     return 0;
1166 }
1167 
1168 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1169                                                abi_ulong target_tz_addr)
1170 {
1171     struct target_timezone *target_tz;
1172 
1173     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1174         return -TARGET_EFAULT;
1175     }
1176 
1177     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1178     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1179 
1180     unlock_user_struct(target_tz, target_tz_addr, 0);
1181 
1182     return 0;
1183 }
1184 
1185 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1186 #include <mqueue.h>
1187 
1188 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1189                                               abi_ulong target_mq_attr_addr)
1190 {
1191     struct target_mq_attr *target_mq_attr;
1192 
1193     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1194                           target_mq_attr_addr, 1))
1195         return -TARGET_EFAULT;
1196 
1197     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1198     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1199     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1200     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1201 
1202     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1203 
1204     return 0;
1205 }
1206 
1207 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1208                                             const struct mq_attr *attr)
1209 {
1210     struct target_mq_attr *target_mq_attr;
1211 
1212     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1213                           target_mq_attr_addr, 0))
1214         return -TARGET_EFAULT;
1215 
1216     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1217     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1218     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1219     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1220 
1221     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1222 
1223     return 0;
1224 }
1225 #endif
1226 
1227 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1228 /* do_select() must return target values and target errnos. */
1229 static abi_long do_select(int n,
1230                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1231                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1232 {
1233     fd_set rfds, wfds, efds;
1234     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1235     struct timeval tv;
1236     struct timespec ts, *ts_ptr;
1237     abi_long ret;
1238 
1239     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1240     if (ret) {
1241         return ret;
1242     }
1243     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1244     if (ret) {
1245         return ret;
1246     }
1247     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251 
1252     if (target_tv_addr) {
1253         if (copy_from_user_timeval(&tv, target_tv_addr))
1254             return -TARGET_EFAULT;
1255         ts.tv_sec = tv.tv_sec;
1256         ts.tv_nsec = tv.tv_usec * 1000;
1257         ts_ptr = &ts;
1258     } else {
1259         ts_ptr = NULL;
1260     }
1261 
1262     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1263                                   ts_ptr, NULL));
1264 
1265     if (!is_error(ret)) {
1266         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1267             return -TARGET_EFAULT;
1268         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1269             return -TARGET_EFAULT;
1270         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1271             return -TARGET_EFAULT;
1272 
1273         if (target_tv_addr) {
1274             tv.tv_sec = ts.tv_sec;
1275             tv.tv_usec = ts.tv_nsec / 1000;
1276             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1277                 return -TARGET_EFAULT;
1278             }
1279         }
1280     }
1281 
1282     return ret;
1283 }
1284 
1285 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1286 static abi_long do_old_select(abi_ulong arg1)
1287 {
1288     struct target_sel_arg_struct *sel;
1289     abi_ulong inp, outp, exp, tvp;
1290     long nsel;
1291 
1292     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1293         return -TARGET_EFAULT;
1294     }
1295 
1296     nsel = tswapal(sel->n);
1297     inp = tswapal(sel->inp);
1298     outp = tswapal(sel->outp);
1299     exp = tswapal(sel->exp);
1300     tvp = tswapal(sel->tvp);
1301 
1302     unlock_user_struct(sel, arg1, 0);
1303 
1304     return do_select(nsel, inp, outp, exp, tvp);
1305 }
1306 #endif
1307 #endif
1308 
1309 static abi_long do_pipe2(int host_pipe[], int flags)
1310 {
1311 #ifdef CONFIG_PIPE2
1312     return pipe2(host_pipe, flags);
1313 #else
1314     return -ENOSYS;
1315 #endif
1316 }
1317 
1318 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1319                         int flags, int is_pipe2)
1320 {
1321     int host_pipe[2];
1322     abi_long ret;
1323     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1324 
1325     if (is_error(ret))
1326         return get_errno(ret);
1327 
1328     /* Several targets have special calling conventions for the original
1329        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1330     if (!is_pipe2) {
1331 #if defined(TARGET_ALPHA)
1332         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1333         return host_pipe[0];
1334 #elif defined(TARGET_MIPS)
1335         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1336         return host_pipe[0];
1337 #elif defined(TARGET_SH4)
1338         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1339         return host_pipe[0];
1340 #elif defined(TARGET_SPARC)
1341         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1342         return host_pipe[0];
1343 #endif
1344     }
1345 
1346     if (put_user_s32(host_pipe[0], pipedes)
1347         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1348         return -TARGET_EFAULT;
1349     return get_errno(ret);
1350 }
1351 
1352 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1353                                               abi_ulong target_addr,
1354                                               socklen_t len)
1355 {
1356     struct target_ip_mreqn *target_smreqn;
1357 
1358     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1359     if (!target_smreqn)
1360         return -TARGET_EFAULT;
1361     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1362     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1363     if (len == sizeof(struct target_ip_mreqn))
1364         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1365     unlock_user(target_smreqn, target_addr, 0);
1366 
1367     return 0;
1368 }
1369 
1370 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1371                                                abi_ulong target_addr,
1372                                                socklen_t len)
1373 {
1374     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1375     sa_family_t sa_family;
1376     struct target_sockaddr *target_saddr;
1377 
1378     if (fd_trans_target_to_host_addr(fd)) {
1379         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1380     }
1381 
1382     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1383     if (!target_saddr)
1384         return -TARGET_EFAULT;
1385 
1386     sa_family = tswap16(target_saddr->sa_family);
1387 
1388     /* Oops. The caller might send a incomplete sun_path; sun_path
1389      * must be terminated by \0 (see the manual page), but
1390      * unfortunately it is quite common to specify sockaddr_un
1391      * length as "strlen(x->sun_path)" while it should be
1392      * "strlen(...) + 1". We'll fix that here if needed.
1393      * Linux kernel has a similar feature.
1394      */
1395 
1396     if (sa_family == AF_UNIX) {
1397         if (len < unix_maxlen && len > 0) {
1398             char *cp = (char*)target_saddr;
1399 
1400             if ( cp[len-1] && !cp[len] )
1401                 len++;
1402         }
1403         if (len > unix_maxlen)
1404             len = unix_maxlen;
1405     }
1406 
1407     memcpy(addr, target_saddr, len);
1408     addr->sa_family = sa_family;
1409     if (sa_family == AF_NETLINK) {
1410         struct sockaddr_nl *nladdr;
1411 
1412         nladdr = (struct sockaddr_nl *)addr;
1413         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1414         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1415     } else if (sa_family == AF_PACKET) {
1416 	struct target_sockaddr_ll *lladdr;
1417 
1418 	lladdr = (struct target_sockaddr_ll *)addr;
1419 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1420 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1421     }
1422     unlock_user(target_saddr, target_addr, 0);
1423 
1424     return 0;
1425 }
1426 
1427 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1428                                                struct sockaddr *addr,
1429                                                socklen_t len)
1430 {
1431     struct target_sockaddr *target_saddr;
1432 
1433     if (len == 0) {
1434         return 0;
1435     }
1436     assert(addr);
1437 
1438     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1439     if (!target_saddr)
1440         return -TARGET_EFAULT;
1441     memcpy(target_saddr, addr, len);
1442     if (len >= offsetof(struct target_sockaddr, sa_family) +
1443         sizeof(target_saddr->sa_family)) {
1444         target_saddr->sa_family = tswap16(addr->sa_family);
1445     }
1446     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1447         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1448         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1449         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1450     } else if (addr->sa_family == AF_PACKET) {
1451         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1452         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1453         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1454     } else if (addr->sa_family == AF_INET6 &&
1455                len >= sizeof(struct target_sockaddr_in6)) {
1456         struct target_sockaddr_in6 *target_in6 =
1457                (struct target_sockaddr_in6 *)target_saddr;
1458         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1459     }
1460     unlock_user(target_saddr, target_addr, len);
1461 
1462     return 0;
1463 }
1464 
1465 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1466                                            struct target_msghdr *target_msgh)
1467 {
1468     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1469     abi_long msg_controllen;
1470     abi_ulong target_cmsg_addr;
1471     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1472     socklen_t space = 0;
1473 
1474     msg_controllen = tswapal(target_msgh->msg_controllen);
1475     if (msg_controllen < sizeof (struct target_cmsghdr))
1476         goto the_end;
1477     target_cmsg_addr = tswapal(target_msgh->msg_control);
1478     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1479     target_cmsg_start = target_cmsg;
1480     if (!target_cmsg)
1481         return -TARGET_EFAULT;
1482 
1483     while (cmsg && target_cmsg) {
1484         void *data = CMSG_DATA(cmsg);
1485         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1486 
1487         int len = tswapal(target_cmsg->cmsg_len)
1488             - sizeof(struct target_cmsghdr);
1489 
1490         space += CMSG_SPACE(len);
1491         if (space > msgh->msg_controllen) {
1492             space -= CMSG_SPACE(len);
1493             /* This is a QEMU bug, since we allocated the payload
1494              * area ourselves (unlike overflow in host-to-target
1495              * conversion, which is just the guest giving us a buffer
1496              * that's too small). It can't happen for the payload types
1497              * we currently support; if it becomes an issue in future
1498              * we would need to improve our allocation strategy to
1499              * something more intelligent than "twice the size of the
1500              * target buffer we're reading from".
1501              */
1502             gemu_log("Host cmsg overflow\n");
1503             break;
1504         }
1505 
1506         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1507             cmsg->cmsg_level = SOL_SOCKET;
1508         } else {
1509             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1510         }
1511         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1512         cmsg->cmsg_len = CMSG_LEN(len);
1513 
1514         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1515             int *fd = (int *)data;
1516             int *target_fd = (int *)target_data;
1517             int i, numfds = len / sizeof(int);
1518 
1519             for (i = 0; i < numfds; i++) {
1520                 __get_user(fd[i], target_fd + i);
1521             }
1522         } else if (cmsg->cmsg_level == SOL_SOCKET
1523                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1524             struct ucred *cred = (struct ucred *)data;
1525             struct target_ucred *target_cred =
1526                 (struct target_ucred *)target_data;
1527 
1528             __get_user(cred->pid, &target_cred->pid);
1529             __get_user(cred->uid, &target_cred->uid);
1530             __get_user(cred->gid, &target_cred->gid);
1531         } else {
1532             gemu_log("Unsupported ancillary data: %d/%d\n",
1533                                         cmsg->cmsg_level, cmsg->cmsg_type);
1534             memcpy(data, target_data, len);
1535         }
1536 
1537         cmsg = CMSG_NXTHDR(msgh, cmsg);
1538         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1539                                          target_cmsg_start);
1540     }
1541     unlock_user(target_cmsg, target_cmsg_addr, 0);
1542  the_end:
1543     msgh->msg_controllen = space;
1544     return 0;
1545 }
1546 
1547 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1548                                            struct msghdr *msgh)
1549 {
1550     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1551     abi_long msg_controllen;
1552     abi_ulong target_cmsg_addr;
1553     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1554     socklen_t space = 0;
1555 
1556     msg_controllen = tswapal(target_msgh->msg_controllen);
1557     if (msg_controllen < sizeof (struct target_cmsghdr))
1558         goto the_end;
1559     target_cmsg_addr = tswapal(target_msgh->msg_control);
1560     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1561     target_cmsg_start = target_cmsg;
1562     if (!target_cmsg)
1563         return -TARGET_EFAULT;
1564 
1565     while (cmsg && target_cmsg) {
1566         void *data = CMSG_DATA(cmsg);
1567         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1568 
1569         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1570         int tgt_len, tgt_space;
1571 
1572         /* We never copy a half-header but may copy half-data;
1573          * this is Linux's behaviour in put_cmsg(). Note that
1574          * truncation here is a guest problem (which we report
1575          * to the guest via the CTRUNC bit), unlike truncation
1576          * in target_to_host_cmsg, which is a QEMU bug.
1577          */
1578         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1579             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1580             break;
1581         }
1582 
1583         if (cmsg->cmsg_level == SOL_SOCKET) {
1584             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1585         } else {
1586             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1587         }
1588         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1589 
1590         /* Payload types which need a different size of payload on
1591          * the target must adjust tgt_len here.
1592          */
1593         tgt_len = len;
1594         switch (cmsg->cmsg_level) {
1595         case SOL_SOCKET:
1596             switch (cmsg->cmsg_type) {
1597             case SO_TIMESTAMP:
1598                 tgt_len = sizeof(struct target_timeval);
1599                 break;
1600             default:
1601                 break;
1602             }
1603             break;
1604         default:
1605             break;
1606         }
1607 
1608         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1609             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1610             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1611         }
1612 
1613         /* We must now copy-and-convert len bytes of payload
1614          * into tgt_len bytes of destination space. Bear in mind
1615          * that in both source and destination we may be dealing
1616          * with a truncated value!
1617          */
1618         switch (cmsg->cmsg_level) {
1619         case SOL_SOCKET:
1620             switch (cmsg->cmsg_type) {
1621             case SCM_RIGHTS:
1622             {
1623                 int *fd = (int *)data;
1624                 int *target_fd = (int *)target_data;
1625                 int i, numfds = tgt_len / sizeof(int);
1626 
1627                 for (i = 0; i < numfds; i++) {
1628                     __put_user(fd[i], target_fd + i);
1629                 }
1630                 break;
1631             }
1632             case SO_TIMESTAMP:
1633             {
1634                 struct timeval *tv = (struct timeval *)data;
1635                 struct target_timeval *target_tv =
1636                     (struct target_timeval *)target_data;
1637 
1638                 if (len != sizeof(struct timeval) ||
1639                     tgt_len != sizeof(struct target_timeval)) {
1640                     goto unimplemented;
1641                 }
1642 
1643                 /* copy struct timeval to target */
1644                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1645                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1646                 break;
1647             }
1648             case SCM_CREDENTIALS:
1649             {
1650                 struct ucred *cred = (struct ucred *)data;
1651                 struct target_ucred *target_cred =
1652                     (struct target_ucred *)target_data;
1653 
1654                 __put_user(cred->pid, &target_cred->pid);
1655                 __put_user(cred->uid, &target_cred->uid);
1656                 __put_user(cred->gid, &target_cred->gid);
1657                 break;
1658             }
1659             default:
1660                 goto unimplemented;
1661             }
1662             break;
1663 
1664         case SOL_IP:
1665             switch (cmsg->cmsg_type) {
1666             case IP_TTL:
1667             {
1668                 uint32_t *v = (uint32_t *)data;
1669                 uint32_t *t_int = (uint32_t *)target_data;
1670 
1671                 if (len != sizeof(uint32_t) ||
1672                     tgt_len != sizeof(uint32_t)) {
1673                     goto unimplemented;
1674                 }
1675                 __put_user(*v, t_int);
1676                 break;
1677             }
1678             case IP_RECVERR:
1679             {
1680                 struct errhdr_t {
1681                    struct sock_extended_err ee;
1682                    struct sockaddr_in offender;
1683                 };
1684                 struct errhdr_t *errh = (struct errhdr_t *)data;
1685                 struct errhdr_t *target_errh =
1686                     (struct errhdr_t *)target_data;
1687 
1688                 if (len != sizeof(struct errhdr_t) ||
1689                     tgt_len != sizeof(struct errhdr_t)) {
1690                     goto unimplemented;
1691                 }
1692                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1693                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1694                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1695                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1696                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1697                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1698                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1699                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1700                     (void *) &errh->offender, sizeof(errh->offender));
1701                 break;
1702             }
1703             default:
1704                 goto unimplemented;
1705             }
1706             break;
1707 
1708         case SOL_IPV6:
1709             switch (cmsg->cmsg_type) {
1710             case IPV6_HOPLIMIT:
1711             {
1712                 uint32_t *v = (uint32_t *)data;
1713                 uint32_t *t_int = (uint32_t *)target_data;
1714 
1715                 if (len != sizeof(uint32_t) ||
1716                     tgt_len != sizeof(uint32_t)) {
1717                     goto unimplemented;
1718                 }
1719                 __put_user(*v, t_int);
1720                 break;
1721             }
1722             case IPV6_RECVERR:
1723             {
1724                 struct errhdr6_t {
1725                    struct sock_extended_err ee;
1726                    struct sockaddr_in6 offender;
1727                 };
1728                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1729                 struct errhdr6_t *target_errh =
1730                     (struct errhdr6_t *)target_data;
1731 
1732                 if (len != sizeof(struct errhdr6_t) ||
1733                     tgt_len != sizeof(struct errhdr6_t)) {
1734                     goto unimplemented;
1735                 }
1736                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1737                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1738                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1739                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1740                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1741                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1742                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1743                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1744                     (void *) &errh->offender, sizeof(errh->offender));
1745                 break;
1746             }
1747             default:
1748                 goto unimplemented;
1749             }
1750             break;
1751 
1752         default:
1753         unimplemented:
1754             gemu_log("Unsupported ancillary data: %d/%d\n",
1755                                         cmsg->cmsg_level, cmsg->cmsg_type);
1756             memcpy(target_data, data, MIN(len, tgt_len));
1757             if (tgt_len > len) {
1758                 memset(target_data + len, 0, tgt_len - len);
1759             }
1760         }
1761 
1762         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1763         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1764         if (msg_controllen < tgt_space) {
1765             tgt_space = msg_controllen;
1766         }
1767         msg_controllen -= tgt_space;
1768         space += tgt_space;
1769         cmsg = CMSG_NXTHDR(msgh, cmsg);
1770         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1771                                          target_cmsg_start);
1772     }
1773     unlock_user(target_cmsg, target_cmsg_addr, space);
1774  the_end:
1775     target_msgh->msg_controllen = tswapal(space);
1776     return 0;
1777 }
1778 
1779 /* do_setsockopt() Must return target values and target errnos. */
1780 static abi_long do_setsockopt(int sockfd, int level, int optname,
1781                               abi_ulong optval_addr, socklen_t optlen)
1782 {
1783     abi_long ret;
1784     int val;
1785     struct ip_mreqn *ip_mreq;
1786     struct ip_mreq_source *ip_mreq_source;
1787 
1788     switch(level) {
1789     case SOL_TCP:
1790         /* TCP options all take an 'int' value.  */
1791         if (optlen < sizeof(uint32_t))
1792             return -TARGET_EINVAL;
1793 
1794         if (get_user_u32(val, optval_addr))
1795             return -TARGET_EFAULT;
1796         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1797         break;
1798     case SOL_IP:
1799         switch(optname) {
1800         case IP_TOS:
1801         case IP_TTL:
1802         case IP_HDRINCL:
1803         case IP_ROUTER_ALERT:
1804         case IP_RECVOPTS:
1805         case IP_RETOPTS:
1806         case IP_PKTINFO:
1807         case IP_MTU_DISCOVER:
1808         case IP_RECVERR:
1809         case IP_RECVTTL:
1810         case IP_RECVTOS:
1811 #ifdef IP_FREEBIND
1812         case IP_FREEBIND:
1813 #endif
1814         case IP_MULTICAST_TTL:
1815         case IP_MULTICAST_LOOP:
1816             val = 0;
1817             if (optlen >= sizeof(uint32_t)) {
1818                 if (get_user_u32(val, optval_addr))
1819                     return -TARGET_EFAULT;
1820             } else if (optlen >= 1) {
1821                 if (get_user_u8(val, optval_addr))
1822                     return -TARGET_EFAULT;
1823             }
1824             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1825             break;
1826         case IP_ADD_MEMBERSHIP:
1827         case IP_DROP_MEMBERSHIP:
1828             if (optlen < sizeof (struct target_ip_mreq) ||
1829                 optlen > sizeof (struct target_ip_mreqn))
1830                 return -TARGET_EINVAL;
1831 
1832             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1833             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1834             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1835             break;
1836 
1837         case IP_BLOCK_SOURCE:
1838         case IP_UNBLOCK_SOURCE:
1839         case IP_ADD_SOURCE_MEMBERSHIP:
1840         case IP_DROP_SOURCE_MEMBERSHIP:
1841             if (optlen != sizeof (struct target_ip_mreq_source))
1842                 return -TARGET_EINVAL;
1843 
1844             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1845             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1846             unlock_user (ip_mreq_source, optval_addr, 0);
1847             break;
1848 
1849         default:
1850             goto unimplemented;
1851         }
1852         break;
1853     case SOL_IPV6:
1854         switch (optname) {
1855         case IPV6_MTU_DISCOVER:
1856         case IPV6_MTU:
1857         case IPV6_V6ONLY:
1858         case IPV6_RECVPKTINFO:
1859         case IPV6_UNICAST_HOPS:
1860         case IPV6_MULTICAST_HOPS:
1861         case IPV6_MULTICAST_LOOP:
1862         case IPV6_RECVERR:
1863         case IPV6_RECVHOPLIMIT:
1864         case IPV6_2292HOPLIMIT:
1865         case IPV6_CHECKSUM:
1866         case IPV6_ADDRFORM:
1867         case IPV6_2292PKTINFO:
1868         case IPV6_RECVTCLASS:
1869         case IPV6_RECVRTHDR:
1870         case IPV6_2292RTHDR:
1871         case IPV6_RECVHOPOPTS:
1872         case IPV6_2292HOPOPTS:
1873         case IPV6_RECVDSTOPTS:
1874         case IPV6_2292DSTOPTS:
1875         case IPV6_TCLASS:
1876 #ifdef IPV6_RECVPATHMTU
1877         case IPV6_RECVPATHMTU:
1878 #endif
1879 #ifdef IPV6_TRANSPARENT
1880         case IPV6_TRANSPARENT:
1881 #endif
1882 #ifdef IPV6_FREEBIND
1883         case IPV6_FREEBIND:
1884 #endif
1885 #ifdef IPV6_RECVORIGDSTADDR
1886         case IPV6_RECVORIGDSTADDR:
1887 #endif
1888             val = 0;
1889             if (optlen < sizeof(uint32_t)) {
1890                 return -TARGET_EINVAL;
1891             }
1892             if (get_user_u32(val, optval_addr)) {
1893                 return -TARGET_EFAULT;
1894             }
1895             ret = get_errno(setsockopt(sockfd, level, optname,
1896                                        &val, sizeof(val)));
1897             break;
1898         case IPV6_PKTINFO:
1899         {
1900             struct in6_pktinfo pki;
1901 
1902             if (optlen < sizeof(pki)) {
1903                 return -TARGET_EINVAL;
1904             }
1905 
1906             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1907                 return -TARGET_EFAULT;
1908             }
1909 
1910             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1911 
1912             ret = get_errno(setsockopt(sockfd, level, optname,
1913                                        &pki, sizeof(pki)));
1914             break;
1915         }
1916         default:
1917             goto unimplemented;
1918         }
1919         break;
1920     case SOL_ICMPV6:
1921         switch (optname) {
1922         case ICMPV6_FILTER:
1923         {
1924             struct icmp6_filter icmp6f;
1925 
1926             if (optlen > sizeof(icmp6f)) {
1927                 optlen = sizeof(icmp6f);
1928             }
1929 
1930             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1931                 return -TARGET_EFAULT;
1932             }
1933 
1934             for (val = 0; val < 8; val++) {
1935                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1936             }
1937 
1938             ret = get_errno(setsockopt(sockfd, level, optname,
1939                                        &icmp6f, optlen));
1940             break;
1941         }
1942         default:
1943             goto unimplemented;
1944         }
1945         break;
1946     case SOL_RAW:
1947         switch (optname) {
1948         case ICMP_FILTER:
1949         case IPV6_CHECKSUM:
1950             /* those take an u32 value */
1951             if (optlen < sizeof(uint32_t)) {
1952                 return -TARGET_EINVAL;
1953             }
1954 
1955             if (get_user_u32(val, optval_addr)) {
1956                 return -TARGET_EFAULT;
1957             }
1958             ret = get_errno(setsockopt(sockfd, level, optname,
1959                                        &val, sizeof(val)));
1960             break;
1961 
1962         default:
1963             goto unimplemented;
1964         }
1965         break;
1966     case TARGET_SOL_SOCKET:
1967         switch (optname) {
1968         case TARGET_SO_RCVTIMEO:
1969         {
1970                 struct timeval tv;
1971 
1972                 optname = SO_RCVTIMEO;
1973 
1974 set_timeout:
1975                 if (optlen != sizeof(struct target_timeval)) {
1976                     return -TARGET_EINVAL;
1977                 }
1978 
1979                 if (copy_from_user_timeval(&tv, optval_addr)) {
1980                     return -TARGET_EFAULT;
1981                 }
1982 
1983                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1984                                 &tv, sizeof(tv)));
1985                 return ret;
1986         }
1987         case TARGET_SO_SNDTIMEO:
1988                 optname = SO_SNDTIMEO;
1989                 goto set_timeout;
1990         case TARGET_SO_ATTACH_FILTER:
1991         {
1992                 struct target_sock_fprog *tfprog;
1993                 struct target_sock_filter *tfilter;
1994                 struct sock_fprog fprog;
1995                 struct sock_filter *filter;
1996                 int i;
1997 
1998                 if (optlen != sizeof(*tfprog)) {
1999                     return -TARGET_EINVAL;
2000                 }
2001                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2002                     return -TARGET_EFAULT;
2003                 }
2004                 if (!lock_user_struct(VERIFY_READ, tfilter,
2005                                       tswapal(tfprog->filter), 0)) {
2006                     unlock_user_struct(tfprog, optval_addr, 1);
2007                     return -TARGET_EFAULT;
2008                 }
2009 
2010                 fprog.len = tswap16(tfprog->len);
2011                 filter = g_try_new(struct sock_filter, fprog.len);
2012                 if (filter == NULL) {
2013                     unlock_user_struct(tfilter, tfprog->filter, 1);
2014                     unlock_user_struct(tfprog, optval_addr, 1);
2015                     return -TARGET_ENOMEM;
2016                 }
2017                 for (i = 0; i < fprog.len; i++) {
2018                     filter[i].code = tswap16(tfilter[i].code);
2019                     filter[i].jt = tfilter[i].jt;
2020                     filter[i].jf = tfilter[i].jf;
2021                     filter[i].k = tswap32(tfilter[i].k);
2022                 }
2023                 fprog.filter = filter;
2024 
2025                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2026                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2027                 g_free(filter);
2028 
2029                 unlock_user_struct(tfilter, tfprog->filter, 1);
2030                 unlock_user_struct(tfprog, optval_addr, 1);
2031                 return ret;
2032         }
2033 	case TARGET_SO_BINDTODEVICE:
2034 	{
2035 		char *dev_ifname, *addr_ifname;
2036 
2037 		if (optlen > IFNAMSIZ - 1) {
2038 		    optlen = IFNAMSIZ - 1;
2039 		}
2040 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2041 		if (!dev_ifname) {
2042 		    return -TARGET_EFAULT;
2043 		}
2044 		optname = SO_BINDTODEVICE;
2045 		addr_ifname = alloca(IFNAMSIZ);
2046 		memcpy(addr_ifname, dev_ifname, optlen);
2047 		addr_ifname[optlen] = 0;
2048 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2049                                            addr_ifname, optlen));
2050 		unlock_user (dev_ifname, optval_addr, 0);
2051 		return ret;
2052 	}
2053         case TARGET_SO_LINGER:
2054         {
2055                 struct linger lg;
2056                 struct target_linger *tlg;
2057 
2058                 if (optlen != sizeof(struct target_linger)) {
2059                     return -TARGET_EINVAL;
2060                 }
2061                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2062                     return -TARGET_EFAULT;
2063                 }
2064                 __get_user(lg.l_onoff, &tlg->l_onoff);
2065                 __get_user(lg.l_linger, &tlg->l_linger);
2066                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2067                                 &lg, sizeof(lg)));
2068                 unlock_user_struct(tlg, optval_addr, 0);
2069                 return ret;
2070         }
2071             /* Options with 'int' argument.  */
2072         case TARGET_SO_DEBUG:
2073 		optname = SO_DEBUG;
2074 		break;
2075         case TARGET_SO_REUSEADDR:
2076 		optname = SO_REUSEADDR;
2077 		break;
2078 #ifdef SO_REUSEPORT
2079         case TARGET_SO_REUSEPORT:
2080                 optname = SO_REUSEPORT;
2081                 break;
2082 #endif
2083         case TARGET_SO_TYPE:
2084 		optname = SO_TYPE;
2085 		break;
2086         case TARGET_SO_ERROR:
2087 		optname = SO_ERROR;
2088 		break;
2089         case TARGET_SO_DONTROUTE:
2090 		optname = SO_DONTROUTE;
2091 		break;
2092         case TARGET_SO_BROADCAST:
2093 		optname = SO_BROADCAST;
2094 		break;
2095         case TARGET_SO_SNDBUF:
2096 		optname = SO_SNDBUF;
2097 		break;
2098         case TARGET_SO_SNDBUFFORCE:
2099                 optname = SO_SNDBUFFORCE;
2100                 break;
2101         case TARGET_SO_RCVBUF:
2102 		optname = SO_RCVBUF;
2103 		break;
2104         case TARGET_SO_RCVBUFFORCE:
2105                 optname = SO_RCVBUFFORCE;
2106                 break;
2107         case TARGET_SO_KEEPALIVE:
2108 		optname = SO_KEEPALIVE;
2109 		break;
2110         case TARGET_SO_OOBINLINE:
2111 		optname = SO_OOBINLINE;
2112 		break;
2113         case TARGET_SO_NO_CHECK:
2114 		optname = SO_NO_CHECK;
2115 		break;
2116         case TARGET_SO_PRIORITY:
2117 		optname = SO_PRIORITY;
2118 		break;
2119 #ifdef SO_BSDCOMPAT
2120         case TARGET_SO_BSDCOMPAT:
2121 		optname = SO_BSDCOMPAT;
2122 		break;
2123 #endif
2124         case TARGET_SO_PASSCRED:
2125 		optname = SO_PASSCRED;
2126 		break;
2127         case TARGET_SO_PASSSEC:
2128                 optname = SO_PASSSEC;
2129                 break;
2130         case TARGET_SO_TIMESTAMP:
2131 		optname = SO_TIMESTAMP;
2132 		break;
2133         case TARGET_SO_RCVLOWAT:
2134 		optname = SO_RCVLOWAT;
2135 		break;
2136         default:
2137             goto unimplemented;
2138         }
2139 	if (optlen < sizeof(uint32_t))
2140             return -TARGET_EINVAL;
2141 
2142 	if (get_user_u32(val, optval_addr))
2143             return -TARGET_EFAULT;
2144 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2145         break;
2146     default:
2147     unimplemented:
2148         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2149         ret = -TARGET_ENOPROTOOPT;
2150     }
2151     return ret;
2152 }
2153 
2154 /* do_getsockopt() Must return target values and target errnos. */
2155 static abi_long do_getsockopt(int sockfd, int level, int optname,
2156                               abi_ulong optval_addr, abi_ulong optlen)
2157 {
2158     abi_long ret;
2159     int len, val;
2160     socklen_t lv;
2161 
2162     switch(level) {
2163     case TARGET_SOL_SOCKET:
2164         level = SOL_SOCKET;
2165         switch (optname) {
2166         /* These don't just return a single integer */
2167         case TARGET_SO_RCVTIMEO:
2168         case TARGET_SO_SNDTIMEO:
2169         case TARGET_SO_PEERNAME:
2170             goto unimplemented;
2171         case TARGET_SO_PEERCRED: {
2172             struct ucred cr;
2173             socklen_t crlen;
2174             struct target_ucred *tcr;
2175 
2176             if (get_user_u32(len, optlen)) {
2177                 return -TARGET_EFAULT;
2178             }
2179             if (len < 0) {
2180                 return -TARGET_EINVAL;
2181             }
2182 
2183             crlen = sizeof(cr);
2184             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2185                                        &cr, &crlen));
2186             if (ret < 0) {
2187                 return ret;
2188             }
2189             if (len > crlen) {
2190                 len = crlen;
2191             }
2192             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2193                 return -TARGET_EFAULT;
2194             }
2195             __put_user(cr.pid, &tcr->pid);
2196             __put_user(cr.uid, &tcr->uid);
2197             __put_user(cr.gid, &tcr->gid);
2198             unlock_user_struct(tcr, optval_addr, 1);
2199             if (put_user_u32(len, optlen)) {
2200                 return -TARGET_EFAULT;
2201             }
2202             break;
2203         }
2204         case TARGET_SO_LINGER:
2205         {
2206             struct linger lg;
2207             socklen_t lglen;
2208             struct target_linger *tlg;
2209 
2210             if (get_user_u32(len, optlen)) {
2211                 return -TARGET_EFAULT;
2212             }
2213             if (len < 0) {
2214                 return -TARGET_EINVAL;
2215             }
2216 
2217             lglen = sizeof(lg);
2218             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2219                                        &lg, &lglen));
2220             if (ret < 0) {
2221                 return ret;
2222             }
2223             if (len > lglen) {
2224                 len = lglen;
2225             }
2226             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2227                 return -TARGET_EFAULT;
2228             }
2229             __put_user(lg.l_onoff, &tlg->l_onoff);
2230             __put_user(lg.l_linger, &tlg->l_linger);
2231             unlock_user_struct(tlg, optval_addr, 1);
2232             if (put_user_u32(len, optlen)) {
2233                 return -TARGET_EFAULT;
2234             }
2235             break;
2236         }
2237         /* Options with 'int' argument.  */
2238         case TARGET_SO_DEBUG:
2239             optname = SO_DEBUG;
2240             goto int_case;
2241         case TARGET_SO_REUSEADDR:
2242             optname = SO_REUSEADDR;
2243             goto int_case;
2244 #ifdef SO_REUSEPORT
2245         case TARGET_SO_REUSEPORT:
2246             optname = SO_REUSEPORT;
2247             goto int_case;
2248 #endif
2249         case TARGET_SO_TYPE:
2250             optname = SO_TYPE;
2251             goto int_case;
2252         case TARGET_SO_ERROR:
2253             optname = SO_ERROR;
2254             goto int_case;
2255         case TARGET_SO_DONTROUTE:
2256             optname = SO_DONTROUTE;
2257             goto int_case;
2258         case TARGET_SO_BROADCAST:
2259             optname = SO_BROADCAST;
2260             goto int_case;
2261         case TARGET_SO_SNDBUF:
2262             optname = SO_SNDBUF;
2263             goto int_case;
2264         case TARGET_SO_RCVBUF:
2265             optname = SO_RCVBUF;
2266             goto int_case;
2267         case TARGET_SO_KEEPALIVE:
2268             optname = SO_KEEPALIVE;
2269             goto int_case;
2270         case TARGET_SO_OOBINLINE:
2271             optname = SO_OOBINLINE;
2272             goto int_case;
2273         case TARGET_SO_NO_CHECK:
2274             optname = SO_NO_CHECK;
2275             goto int_case;
2276         case TARGET_SO_PRIORITY:
2277             optname = SO_PRIORITY;
2278             goto int_case;
2279 #ifdef SO_BSDCOMPAT
2280         case TARGET_SO_BSDCOMPAT:
2281             optname = SO_BSDCOMPAT;
2282             goto int_case;
2283 #endif
2284         case TARGET_SO_PASSCRED:
2285             optname = SO_PASSCRED;
2286             goto int_case;
2287         case TARGET_SO_TIMESTAMP:
2288             optname = SO_TIMESTAMP;
2289             goto int_case;
2290         case TARGET_SO_RCVLOWAT:
2291             optname = SO_RCVLOWAT;
2292             goto int_case;
2293         case TARGET_SO_ACCEPTCONN:
2294             optname = SO_ACCEPTCONN;
2295             goto int_case;
2296         default:
2297             goto int_case;
2298         }
2299         break;
2300     case SOL_TCP:
2301         /* TCP options all take an 'int' value.  */
2302     int_case:
2303         if (get_user_u32(len, optlen))
2304             return -TARGET_EFAULT;
2305         if (len < 0)
2306             return -TARGET_EINVAL;
2307         lv = sizeof(lv);
2308         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2309         if (ret < 0)
2310             return ret;
2311         if (optname == SO_TYPE) {
2312             val = host_to_target_sock_type(val);
2313         }
2314         if (len > lv)
2315             len = lv;
2316         if (len == 4) {
2317             if (put_user_u32(val, optval_addr))
2318                 return -TARGET_EFAULT;
2319         } else {
2320             if (put_user_u8(val, optval_addr))
2321                 return -TARGET_EFAULT;
2322         }
2323         if (put_user_u32(len, optlen))
2324             return -TARGET_EFAULT;
2325         break;
2326     case SOL_IP:
2327         switch(optname) {
2328         case IP_TOS:
2329         case IP_TTL:
2330         case IP_HDRINCL:
2331         case IP_ROUTER_ALERT:
2332         case IP_RECVOPTS:
2333         case IP_RETOPTS:
2334         case IP_PKTINFO:
2335         case IP_MTU_DISCOVER:
2336         case IP_RECVERR:
2337         case IP_RECVTOS:
2338 #ifdef IP_FREEBIND
2339         case IP_FREEBIND:
2340 #endif
2341         case IP_MULTICAST_TTL:
2342         case IP_MULTICAST_LOOP:
2343             if (get_user_u32(len, optlen))
2344                 return -TARGET_EFAULT;
2345             if (len < 0)
2346                 return -TARGET_EINVAL;
2347             lv = sizeof(lv);
2348             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2349             if (ret < 0)
2350                 return ret;
2351             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2352                 len = 1;
2353                 if (put_user_u32(len, optlen)
2354                     || put_user_u8(val, optval_addr))
2355                     return -TARGET_EFAULT;
2356             } else {
2357                 if (len > sizeof(int))
2358                     len = sizeof(int);
2359                 if (put_user_u32(len, optlen)
2360                     || put_user_u32(val, optval_addr))
2361                     return -TARGET_EFAULT;
2362             }
2363             break;
2364         default:
2365             ret = -TARGET_ENOPROTOOPT;
2366             break;
2367         }
2368         break;
2369     case SOL_IPV6:
2370         switch (optname) {
2371         case IPV6_MTU_DISCOVER:
2372         case IPV6_MTU:
2373         case IPV6_V6ONLY:
2374         case IPV6_RECVPKTINFO:
2375         case IPV6_UNICAST_HOPS:
2376         case IPV6_MULTICAST_HOPS:
2377         case IPV6_MULTICAST_LOOP:
2378         case IPV6_RECVERR:
2379         case IPV6_RECVHOPLIMIT:
2380         case IPV6_2292HOPLIMIT:
2381         case IPV6_CHECKSUM:
2382         case IPV6_ADDRFORM:
2383         case IPV6_2292PKTINFO:
2384         case IPV6_RECVTCLASS:
2385         case IPV6_RECVRTHDR:
2386         case IPV6_2292RTHDR:
2387         case IPV6_RECVHOPOPTS:
2388         case IPV6_2292HOPOPTS:
2389         case IPV6_RECVDSTOPTS:
2390         case IPV6_2292DSTOPTS:
2391         case IPV6_TCLASS:
2392 #ifdef IPV6_RECVPATHMTU
2393         case IPV6_RECVPATHMTU:
2394 #endif
2395 #ifdef IPV6_TRANSPARENT
2396         case IPV6_TRANSPARENT:
2397 #endif
2398 #ifdef IPV6_FREEBIND
2399         case IPV6_FREEBIND:
2400 #endif
2401 #ifdef IPV6_RECVORIGDSTADDR
2402         case IPV6_RECVORIGDSTADDR:
2403 #endif
2404             if (get_user_u32(len, optlen))
2405                 return -TARGET_EFAULT;
2406             if (len < 0)
2407                 return -TARGET_EINVAL;
2408             lv = sizeof(lv);
2409             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2410             if (ret < 0)
2411                 return ret;
2412             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2413                 len = 1;
2414                 if (put_user_u32(len, optlen)
2415                     || put_user_u8(val, optval_addr))
2416                     return -TARGET_EFAULT;
2417             } else {
2418                 if (len > sizeof(int))
2419                     len = sizeof(int);
2420                 if (put_user_u32(len, optlen)
2421                     || put_user_u32(val, optval_addr))
2422                     return -TARGET_EFAULT;
2423             }
2424             break;
2425         default:
2426             ret = -TARGET_ENOPROTOOPT;
2427             break;
2428         }
2429         break;
2430     default:
2431     unimplemented:
2432         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2433                  level, optname);
2434         ret = -TARGET_EOPNOTSUPP;
2435         break;
2436     }
2437     return ret;
2438 }
2439 
2440 /* Convert target low/high pair representing file offset into the host
2441  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2442  * as the kernel doesn't handle them either.
2443  */
2444 static void target_to_host_low_high(abi_ulong tlow,
2445                                     abi_ulong thigh,
2446                                     unsigned long *hlow,
2447                                     unsigned long *hhigh)
2448 {
2449     uint64_t off = tlow |
2450         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2451         TARGET_LONG_BITS / 2;
2452 
2453     *hlow = off;
2454     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2455 }
2456 
2457 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2458                                 abi_ulong count, int copy)
2459 {
2460     struct target_iovec *target_vec;
2461     struct iovec *vec;
2462     abi_ulong total_len, max_len;
2463     int i;
2464     int err = 0;
2465     bool bad_address = false;
2466 
2467     if (count == 0) {
2468         errno = 0;
2469         return NULL;
2470     }
2471     if (count > IOV_MAX) {
2472         errno = EINVAL;
2473         return NULL;
2474     }
2475 
2476     vec = g_try_new0(struct iovec, count);
2477     if (vec == NULL) {
2478         errno = ENOMEM;
2479         return NULL;
2480     }
2481 
2482     target_vec = lock_user(VERIFY_READ, target_addr,
2483                            count * sizeof(struct target_iovec), 1);
2484     if (target_vec == NULL) {
2485         err = EFAULT;
2486         goto fail2;
2487     }
2488 
2489     /* ??? If host page size > target page size, this will result in a
2490        value larger than what we can actually support.  */
2491     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2492     total_len = 0;
2493 
2494     for (i = 0; i < count; i++) {
2495         abi_ulong base = tswapal(target_vec[i].iov_base);
2496         abi_long len = tswapal(target_vec[i].iov_len);
2497 
2498         if (len < 0) {
2499             err = EINVAL;
2500             goto fail;
2501         } else if (len == 0) {
2502             /* Zero length pointer is ignored.  */
2503             vec[i].iov_base = 0;
2504         } else {
2505             vec[i].iov_base = lock_user(type, base, len, copy);
2506             /* If the first buffer pointer is bad, this is a fault.  But
2507              * subsequent bad buffers will result in a partial write; this
2508              * is realized by filling the vector with null pointers and
2509              * zero lengths. */
2510             if (!vec[i].iov_base) {
2511                 if (i == 0) {
2512                     err = EFAULT;
2513                     goto fail;
2514                 } else {
2515                     bad_address = true;
2516                 }
2517             }
2518             if (bad_address) {
2519                 len = 0;
2520             }
2521             if (len > max_len - total_len) {
2522                 len = max_len - total_len;
2523             }
2524         }
2525         vec[i].iov_len = len;
2526         total_len += len;
2527     }
2528 
2529     unlock_user(target_vec, target_addr, 0);
2530     return vec;
2531 
2532  fail:
2533     while (--i >= 0) {
2534         if (tswapal(target_vec[i].iov_len) > 0) {
2535             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2536         }
2537     }
2538     unlock_user(target_vec, target_addr, 0);
2539  fail2:
2540     g_free(vec);
2541     errno = err;
2542     return NULL;
2543 }
2544 
2545 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2546                          abi_ulong count, int copy)
2547 {
2548     struct target_iovec *target_vec;
2549     int i;
2550 
2551     target_vec = lock_user(VERIFY_READ, target_addr,
2552                            count * sizeof(struct target_iovec), 1);
2553     if (target_vec) {
2554         for (i = 0; i < count; i++) {
2555             abi_ulong base = tswapal(target_vec[i].iov_base);
2556             abi_long len = tswapal(target_vec[i].iov_len);
2557             if (len < 0) {
2558                 break;
2559             }
2560             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2561         }
2562         unlock_user(target_vec, target_addr, 0);
2563     }
2564 
2565     g_free(vec);
2566 }
2567 
2568 static inline int target_to_host_sock_type(int *type)
2569 {
2570     int host_type = 0;
2571     int target_type = *type;
2572 
2573     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2574     case TARGET_SOCK_DGRAM:
2575         host_type = SOCK_DGRAM;
2576         break;
2577     case TARGET_SOCK_STREAM:
2578         host_type = SOCK_STREAM;
2579         break;
2580     default:
2581         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2582         break;
2583     }
2584     if (target_type & TARGET_SOCK_CLOEXEC) {
2585 #if defined(SOCK_CLOEXEC)
2586         host_type |= SOCK_CLOEXEC;
2587 #else
2588         return -TARGET_EINVAL;
2589 #endif
2590     }
2591     if (target_type & TARGET_SOCK_NONBLOCK) {
2592 #if defined(SOCK_NONBLOCK)
2593         host_type |= SOCK_NONBLOCK;
2594 #elif !defined(O_NONBLOCK)
2595         return -TARGET_EINVAL;
2596 #endif
2597     }
2598     *type = host_type;
2599     return 0;
2600 }
2601 
2602 /* Try to emulate socket type flags after socket creation.  */
2603 static int sock_flags_fixup(int fd, int target_type)
2604 {
2605 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2606     if (target_type & TARGET_SOCK_NONBLOCK) {
2607         int flags = fcntl(fd, F_GETFL);
2608         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2609             close(fd);
2610             return -TARGET_EINVAL;
2611         }
2612     }
2613 #endif
2614     return fd;
2615 }
2616 
2617 /* do_socket() Must return target values and target errnos. */
2618 static abi_long do_socket(int domain, int type, int protocol)
2619 {
2620     int target_type = type;
2621     int ret;
2622 
2623     ret = target_to_host_sock_type(&type);
2624     if (ret) {
2625         return ret;
2626     }
2627 
2628     if (domain == PF_NETLINK && !(
2629 #ifdef CONFIG_RTNETLINK
2630          protocol == NETLINK_ROUTE ||
2631 #endif
2632          protocol == NETLINK_KOBJECT_UEVENT ||
2633          protocol == NETLINK_AUDIT)) {
2634         return -EPFNOSUPPORT;
2635     }
2636 
2637     if (domain == AF_PACKET ||
2638         (domain == AF_INET && type == SOCK_PACKET)) {
2639         protocol = tswap16(protocol);
2640     }
2641 
2642     ret = get_errno(socket(domain, type, protocol));
2643     if (ret >= 0) {
2644         ret = sock_flags_fixup(ret, target_type);
2645         if (type == SOCK_PACKET) {
2646             /* Manage an obsolete case :
2647              * if socket type is SOCK_PACKET, bind by name
2648              */
2649             fd_trans_register(ret, &target_packet_trans);
2650         } else if (domain == PF_NETLINK) {
2651             switch (protocol) {
2652 #ifdef CONFIG_RTNETLINK
2653             case NETLINK_ROUTE:
2654                 fd_trans_register(ret, &target_netlink_route_trans);
2655                 break;
2656 #endif
2657             case NETLINK_KOBJECT_UEVENT:
2658                 /* nothing to do: messages are strings */
2659                 break;
2660             case NETLINK_AUDIT:
2661                 fd_trans_register(ret, &target_netlink_audit_trans);
2662                 break;
2663             default:
2664                 g_assert_not_reached();
2665             }
2666         }
2667     }
2668     return ret;
2669 }
2670 
2671 /* do_bind() Must return target values and target errnos. */
2672 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2673                         socklen_t addrlen)
2674 {
2675     void *addr;
2676     abi_long ret;
2677 
2678     if ((int)addrlen < 0) {
2679         return -TARGET_EINVAL;
2680     }
2681 
2682     addr = alloca(addrlen+1);
2683 
2684     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2685     if (ret)
2686         return ret;
2687 
2688     return get_errno(bind(sockfd, addr, addrlen));
2689 }
2690 
2691 /* do_connect() Must return target values and target errnos. */
2692 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2693                            socklen_t addrlen)
2694 {
2695     void *addr;
2696     abi_long ret;
2697 
2698     if ((int)addrlen < 0) {
2699         return -TARGET_EINVAL;
2700     }
2701 
2702     addr = alloca(addrlen+1);
2703 
2704     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2705     if (ret)
2706         return ret;
2707 
2708     return get_errno(safe_connect(sockfd, addr, addrlen));
2709 }
2710 
2711 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2712 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2713                                       int flags, int send)
2714 {
2715     abi_long ret, len;
2716     struct msghdr msg;
2717     abi_ulong count;
2718     struct iovec *vec;
2719     abi_ulong target_vec;
2720 
2721     if (msgp->msg_name) {
2722         msg.msg_namelen = tswap32(msgp->msg_namelen);
2723         msg.msg_name = alloca(msg.msg_namelen+1);
2724         ret = target_to_host_sockaddr(fd, msg.msg_name,
2725                                       tswapal(msgp->msg_name),
2726                                       msg.msg_namelen);
2727         if (ret == -TARGET_EFAULT) {
2728             /* For connected sockets msg_name and msg_namelen must
2729              * be ignored, so returning EFAULT immediately is wrong.
2730              * Instead, pass a bad msg_name to the host kernel, and
2731              * let it decide whether to return EFAULT or not.
2732              */
2733             msg.msg_name = (void *)-1;
2734         } else if (ret) {
2735             goto out2;
2736         }
2737     } else {
2738         msg.msg_name = NULL;
2739         msg.msg_namelen = 0;
2740     }
2741     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2742     msg.msg_control = alloca(msg.msg_controllen);
2743     memset(msg.msg_control, 0, msg.msg_controllen);
2744 
2745     msg.msg_flags = tswap32(msgp->msg_flags);
2746 
2747     count = tswapal(msgp->msg_iovlen);
2748     target_vec = tswapal(msgp->msg_iov);
2749 
2750     if (count > IOV_MAX) {
2751         /* sendrcvmsg returns a different errno for this condition than
2752          * readv/writev, so we must catch it here before lock_iovec() does.
2753          */
2754         ret = -TARGET_EMSGSIZE;
2755         goto out2;
2756     }
2757 
2758     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2759                      target_vec, count, send);
2760     if (vec == NULL) {
2761         ret = -host_to_target_errno(errno);
2762         goto out2;
2763     }
2764     msg.msg_iovlen = count;
2765     msg.msg_iov = vec;
2766 
2767     if (send) {
2768         if (fd_trans_target_to_host_data(fd)) {
2769             void *host_msg;
2770 
2771             host_msg = g_malloc(msg.msg_iov->iov_len);
2772             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2773             ret = fd_trans_target_to_host_data(fd)(host_msg,
2774                                                    msg.msg_iov->iov_len);
2775             if (ret >= 0) {
2776                 msg.msg_iov->iov_base = host_msg;
2777                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2778             }
2779             g_free(host_msg);
2780         } else {
2781             ret = target_to_host_cmsg(&msg, msgp);
2782             if (ret == 0) {
2783                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2784             }
2785         }
2786     } else {
2787         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2788         if (!is_error(ret)) {
2789             len = ret;
2790             if (fd_trans_host_to_target_data(fd)) {
2791                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2792                                                MIN(msg.msg_iov->iov_len, len));
2793             } else {
2794                 ret = host_to_target_cmsg(msgp, &msg);
2795             }
2796             if (!is_error(ret)) {
2797                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2798                 msgp->msg_flags = tswap32(msg.msg_flags);
2799                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2800                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2801                                     msg.msg_name, msg.msg_namelen);
2802                     if (ret) {
2803                         goto out;
2804                     }
2805                 }
2806 
2807                 ret = len;
2808             }
2809         }
2810     }
2811 
2812 out:
2813     unlock_iovec(vec, target_vec, count, !send);
2814 out2:
2815     return ret;
2816 }
2817 
2818 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2819                                int flags, int send)
2820 {
2821     abi_long ret;
2822     struct target_msghdr *msgp;
2823 
2824     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2825                           msgp,
2826                           target_msg,
2827                           send ? 1 : 0)) {
2828         return -TARGET_EFAULT;
2829     }
2830     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2831     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2832     return ret;
2833 }
2834 
2835 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2836  * so it might not have this *mmsg-specific flag either.
2837  */
2838 #ifndef MSG_WAITFORONE
2839 #define MSG_WAITFORONE 0x10000
2840 #endif
2841 
2842 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2843                                 unsigned int vlen, unsigned int flags,
2844                                 int send)
2845 {
2846     struct target_mmsghdr *mmsgp;
2847     abi_long ret = 0;
2848     int i;
2849 
2850     if (vlen > UIO_MAXIOV) {
2851         vlen = UIO_MAXIOV;
2852     }
2853 
2854     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2855     if (!mmsgp) {
2856         return -TARGET_EFAULT;
2857     }
2858 
2859     for (i = 0; i < vlen; i++) {
2860         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2861         if (is_error(ret)) {
2862             break;
2863         }
2864         mmsgp[i].msg_len = tswap32(ret);
2865         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2866         if (flags & MSG_WAITFORONE) {
2867             flags |= MSG_DONTWAIT;
2868         }
2869     }
2870 
2871     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2872 
2873     /* Return number of datagrams sent if we sent any at all;
2874      * otherwise return the error.
2875      */
2876     if (i) {
2877         return i;
2878     }
2879     return ret;
2880 }
2881 
2882 /* do_accept4() Must return target values and target errnos. */
2883 static abi_long do_accept4(int fd, abi_ulong target_addr,
2884                            abi_ulong target_addrlen_addr, int flags)
2885 {
2886     socklen_t addrlen, ret_addrlen;
2887     void *addr;
2888     abi_long ret;
2889     int host_flags;
2890 
2891     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2892 
2893     if (target_addr == 0) {
2894         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2895     }
2896 
2897     /* linux returns EINVAL if addrlen pointer is invalid */
2898     if (get_user_u32(addrlen, target_addrlen_addr))
2899         return -TARGET_EINVAL;
2900 
2901     if ((int)addrlen < 0) {
2902         return -TARGET_EINVAL;
2903     }
2904 
2905     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2906         return -TARGET_EINVAL;
2907 
2908     addr = alloca(addrlen);
2909 
2910     ret_addrlen = addrlen;
2911     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2912     if (!is_error(ret)) {
2913         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2914         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2915             ret = -TARGET_EFAULT;
2916         }
2917     }
2918     return ret;
2919 }
2920 
2921 /* do_getpeername() Must return target values and target errnos. */
2922 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2923                                abi_ulong target_addrlen_addr)
2924 {
2925     socklen_t addrlen, ret_addrlen;
2926     void *addr;
2927     abi_long ret;
2928 
2929     if (get_user_u32(addrlen, target_addrlen_addr))
2930         return -TARGET_EFAULT;
2931 
2932     if ((int)addrlen < 0) {
2933         return -TARGET_EINVAL;
2934     }
2935 
2936     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2937         return -TARGET_EFAULT;
2938 
2939     addr = alloca(addrlen);
2940 
2941     ret_addrlen = addrlen;
2942     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2943     if (!is_error(ret)) {
2944         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2945         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2946             ret = -TARGET_EFAULT;
2947         }
2948     }
2949     return ret;
2950 }
2951 
2952 /* do_getsockname() Must return target values and target errnos. */
2953 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2954                                abi_ulong target_addrlen_addr)
2955 {
2956     socklen_t addrlen, ret_addrlen;
2957     void *addr;
2958     abi_long ret;
2959 
2960     if (get_user_u32(addrlen, target_addrlen_addr))
2961         return -TARGET_EFAULT;
2962 
2963     if ((int)addrlen < 0) {
2964         return -TARGET_EINVAL;
2965     }
2966 
2967     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2968         return -TARGET_EFAULT;
2969 
2970     addr = alloca(addrlen);
2971 
2972     ret_addrlen = addrlen;
2973     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2974     if (!is_error(ret)) {
2975         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2976         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2977             ret = -TARGET_EFAULT;
2978         }
2979     }
2980     return ret;
2981 }
2982 
2983 /* do_socketpair() Must return target values and target errnos. */
2984 static abi_long do_socketpair(int domain, int type, int protocol,
2985                               abi_ulong target_tab_addr)
2986 {
2987     int tab[2];
2988     abi_long ret;
2989 
2990     target_to_host_sock_type(&type);
2991 
2992     ret = get_errno(socketpair(domain, type, protocol, tab));
2993     if (!is_error(ret)) {
2994         if (put_user_s32(tab[0], target_tab_addr)
2995             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2996             ret = -TARGET_EFAULT;
2997     }
2998     return ret;
2999 }
3000 
3001 /* do_sendto() Must return target values and target errnos. */
3002 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3003                           abi_ulong target_addr, socklen_t addrlen)
3004 {
3005     void *addr;
3006     void *host_msg;
3007     void *copy_msg = NULL;
3008     abi_long ret;
3009 
3010     if ((int)addrlen < 0) {
3011         return -TARGET_EINVAL;
3012     }
3013 
3014     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3015     if (!host_msg)
3016         return -TARGET_EFAULT;
3017     if (fd_trans_target_to_host_data(fd)) {
3018         copy_msg = host_msg;
3019         host_msg = g_malloc(len);
3020         memcpy(host_msg, copy_msg, len);
3021         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3022         if (ret < 0) {
3023             goto fail;
3024         }
3025     }
3026     if (target_addr) {
3027         addr = alloca(addrlen+1);
3028         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3029         if (ret) {
3030             goto fail;
3031         }
3032         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3033     } else {
3034         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3035     }
3036 fail:
3037     if (copy_msg) {
3038         g_free(host_msg);
3039         host_msg = copy_msg;
3040     }
3041     unlock_user(host_msg, msg, 0);
3042     return ret;
3043 }
3044 
3045 /* do_recvfrom() Must return target values and target errnos. */
3046 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3047                             abi_ulong target_addr,
3048                             abi_ulong target_addrlen)
3049 {
3050     socklen_t addrlen, ret_addrlen;
3051     void *addr;
3052     void *host_msg;
3053     abi_long ret;
3054 
3055     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3056     if (!host_msg)
3057         return -TARGET_EFAULT;
3058     if (target_addr) {
3059         if (get_user_u32(addrlen, target_addrlen)) {
3060             ret = -TARGET_EFAULT;
3061             goto fail;
3062         }
3063         if ((int)addrlen < 0) {
3064             ret = -TARGET_EINVAL;
3065             goto fail;
3066         }
3067         addr = alloca(addrlen);
3068         ret_addrlen = addrlen;
3069         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3070                                       addr, &ret_addrlen));
3071     } else {
3072         addr = NULL; /* To keep compiler quiet.  */
3073         addrlen = 0; /* To keep compiler quiet.  */
3074         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3075     }
3076     if (!is_error(ret)) {
3077         if (fd_trans_host_to_target_data(fd)) {
3078             abi_long trans;
3079             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3080             if (is_error(trans)) {
3081                 ret = trans;
3082                 goto fail;
3083             }
3084         }
3085         if (target_addr) {
3086             host_to_target_sockaddr(target_addr, addr,
3087                                     MIN(addrlen, ret_addrlen));
3088             if (put_user_u32(ret_addrlen, target_addrlen)) {
3089                 ret = -TARGET_EFAULT;
3090                 goto fail;
3091             }
3092         }
3093         unlock_user(host_msg, msg, len);
3094     } else {
3095 fail:
3096         unlock_user(host_msg, msg, 0);
3097     }
3098     return ret;
3099 }
3100 
3101 #ifdef TARGET_NR_socketcall
3102 /* do_socketcall() must return target values and target errnos. */
3103 static abi_long do_socketcall(int num, abi_ulong vptr)
3104 {
3105     static const unsigned nargs[] = { /* number of arguments per operation */
3106         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3107         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3108         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3109         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3110         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3111         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3112         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3113         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3114         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3115         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3116         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3117         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3118         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3119         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3120         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3121         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3122         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3123         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3124         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3125         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3126     };
3127     abi_long a[6]; /* max 6 args */
3128     unsigned i;
3129 
3130     /* check the range of the first argument num */
3131     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3132     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3133         return -TARGET_EINVAL;
3134     }
3135     /* ensure we have space for args */
3136     if (nargs[num] > ARRAY_SIZE(a)) {
3137         return -TARGET_EINVAL;
3138     }
3139     /* collect the arguments in a[] according to nargs[] */
3140     for (i = 0; i < nargs[num]; ++i) {
3141         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3142             return -TARGET_EFAULT;
3143         }
3144     }
3145     /* now when we have the args, invoke the appropriate underlying function */
3146     switch (num) {
3147     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3148         return do_socket(a[0], a[1], a[2]);
3149     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3150         return do_bind(a[0], a[1], a[2]);
3151     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3152         return do_connect(a[0], a[1], a[2]);
3153     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3154         return get_errno(listen(a[0], a[1]));
3155     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3156         return do_accept4(a[0], a[1], a[2], 0);
3157     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3158         return do_getsockname(a[0], a[1], a[2]);
3159     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3160         return do_getpeername(a[0], a[1], a[2]);
3161     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3162         return do_socketpair(a[0], a[1], a[2], a[3]);
3163     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3164         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3165     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3166         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3167     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3168         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3169     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3170         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3171     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3172         return get_errno(shutdown(a[0], a[1]));
3173     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3174         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3175     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3176         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3177     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3178         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3179     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3180         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3181     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3182         return do_accept4(a[0], a[1], a[2], a[3]);
3183     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3184         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3185     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3186         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3187     default:
3188         gemu_log("Unsupported socketcall: %d\n", num);
3189         return -TARGET_EINVAL;
3190     }
3191 }
3192 #endif
3193 
3194 #define N_SHM_REGIONS	32
3195 
3196 static struct shm_region {
3197     abi_ulong start;
3198     abi_ulong size;
3199     bool in_use;
3200 } shm_regions[N_SHM_REGIONS];
3201 
3202 #ifndef TARGET_SEMID64_DS
3203 /* asm-generic version of this struct */
3204 struct target_semid64_ds
3205 {
3206   struct target_ipc_perm sem_perm;
3207   abi_ulong sem_otime;
3208 #if TARGET_ABI_BITS == 32
3209   abi_ulong __unused1;
3210 #endif
3211   abi_ulong sem_ctime;
3212 #if TARGET_ABI_BITS == 32
3213   abi_ulong __unused2;
3214 #endif
3215   abi_ulong sem_nsems;
3216   abi_ulong __unused3;
3217   abi_ulong __unused4;
3218 };
3219 #endif
3220 
3221 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3222                                                abi_ulong target_addr)
3223 {
3224     struct target_ipc_perm *target_ip;
3225     struct target_semid64_ds *target_sd;
3226 
3227     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3228         return -TARGET_EFAULT;
3229     target_ip = &(target_sd->sem_perm);
3230     host_ip->__key = tswap32(target_ip->__key);
3231     host_ip->uid = tswap32(target_ip->uid);
3232     host_ip->gid = tswap32(target_ip->gid);
3233     host_ip->cuid = tswap32(target_ip->cuid);
3234     host_ip->cgid = tswap32(target_ip->cgid);
3235 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3236     host_ip->mode = tswap32(target_ip->mode);
3237 #else
3238     host_ip->mode = tswap16(target_ip->mode);
3239 #endif
3240 #if defined(TARGET_PPC)
3241     host_ip->__seq = tswap32(target_ip->__seq);
3242 #else
3243     host_ip->__seq = tswap16(target_ip->__seq);
3244 #endif
3245     unlock_user_struct(target_sd, target_addr, 0);
3246     return 0;
3247 }
3248 
3249 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3250                                                struct ipc_perm *host_ip)
3251 {
3252     struct target_ipc_perm *target_ip;
3253     struct target_semid64_ds *target_sd;
3254 
3255     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3256         return -TARGET_EFAULT;
3257     target_ip = &(target_sd->sem_perm);
3258     target_ip->__key = tswap32(host_ip->__key);
3259     target_ip->uid = tswap32(host_ip->uid);
3260     target_ip->gid = tswap32(host_ip->gid);
3261     target_ip->cuid = tswap32(host_ip->cuid);
3262     target_ip->cgid = tswap32(host_ip->cgid);
3263 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3264     target_ip->mode = tswap32(host_ip->mode);
3265 #else
3266     target_ip->mode = tswap16(host_ip->mode);
3267 #endif
3268 #if defined(TARGET_PPC)
3269     target_ip->__seq = tswap32(host_ip->__seq);
3270 #else
3271     target_ip->__seq = tswap16(host_ip->__seq);
3272 #endif
3273     unlock_user_struct(target_sd, target_addr, 1);
3274     return 0;
3275 }
3276 
3277 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3278                                                abi_ulong target_addr)
3279 {
3280     struct target_semid64_ds *target_sd;
3281 
3282     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3283         return -TARGET_EFAULT;
3284     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3285         return -TARGET_EFAULT;
3286     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3287     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3288     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3289     unlock_user_struct(target_sd, target_addr, 0);
3290     return 0;
3291 }
3292 
3293 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3294                                                struct semid_ds *host_sd)
3295 {
3296     struct target_semid64_ds *target_sd;
3297 
3298     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3299         return -TARGET_EFAULT;
3300     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3301         return -TARGET_EFAULT;
3302     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3303     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3304     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3305     unlock_user_struct(target_sd, target_addr, 1);
3306     return 0;
3307 }
3308 
3309 struct target_seminfo {
3310     int semmap;
3311     int semmni;
3312     int semmns;
3313     int semmnu;
3314     int semmsl;
3315     int semopm;
3316     int semume;
3317     int semusz;
3318     int semvmx;
3319     int semaem;
3320 };
3321 
3322 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3323                                               struct seminfo *host_seminfo)
3324 {
3325     struct target_seminfo *target_seminfo;
3326     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3327         return -TARGET_EFAULT;
3328     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3329     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3330     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3331     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3332     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3333     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3334     __put_user(host_seminfo->semume, &target_seminfo->semume);
3335     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3336     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3337     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3338     unlock_user_struct(target_seminfo, target_addr, 1);
3339     return 0;
3340 }
3341 
3342 union semun {
3343 	int val;
3344 	struct semid_ds *buf;
3345 	unsigned short *array;
3346 	struct seminfo *__buf;
3347 };
3348 
3349 union target_semun {
3350 	int val;
3351 	abi_ulong buf;
3352 	abi_ulong array;
3353 	abi_ulong __buf;
3354 };
3355 
3356 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3357                                                abi_ulong target_addr)
3358 {
3359     int nsems;
3360     unsigned short *array;
3361     union semun semun;
3362     struct semid_ds semid_ds;
3363     int i, ret;
3364 
3365     semun.buf = &semid_ds;
3366 
3367     ret = semctl(semid, 0, IPC_STAT, semun);
3368     if (ret == -1)
3369         return get_errno(ret);
3370 
3371     nsems = semid_ds.sem_nsems;
3372 
3373     *host_array = g_try_new(unsigned short, nsems);
3374     if (!*host_array) {
3375         return -TARGET_ENOMEM;
3376     }
3377     array = lock_user(VERIFY_READ, target_addr,
3378                       nsems*sizeof(unsigned short), 1);
3379     if (!array) {
3380         g_free(*host_array);
3381         return -TARGET_EFAULT;
3382     }
3383 
3384     for(i=0; i<nsems; i++) {
3385         __get_user((*host_array)[i], &array[i]);
3386     }
3387     unlock_user(array, target_addr, 0);
3388 
3389     return 0;
3390 }
3391 
3392 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3393                                                unsigned short **host_array)
3394 {
3395     int nsems;
3396     unsigned short *array;
3397     union semun semun;
3398     struct semid_ds semid_ds;
3399     int i, ret;
3400 
3401     semun.buf = &semid_ds;
3402 
3403     ret = semctl(semid, 0, IPC_STAT, semun);
3404     if (ret == -1)
3405         return get_errno(ret);
3406 
3407     nsems = semid_ds.sem_nsems;
3408 
3409     array = lock_user(VERIFY_WRITE, target_addr,
3410                       nsems*sizeof(unsigned short), 0);
3411     if (!array)
3412         return -TARGET_EFAULT;
3413 
3414     for(i=0; i<nsems; i++) {
3415         __put_user((*host_array)[i], &array[i]);
3416     }
3417     g_free(*host_array);
3418     unlock_user(array, target_addr, 1);
3419 
3420     return 0;
3421 }
3422 
3423 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3424                                  abi_ulong target_arg)
3425 {
3426     union target_semun target_su = { .buf = target_arg };
3427     union semun arg;
3428     struct semid_ds dsarg;
3429     unsigned short *array = NULL;
3430     struct seminfo seminfo;
3431     abi_long ret = -TARGET_EINVAL;
3432     abi_long err;
3433     cmd &= 0xff;
3434 
3435     switch( cmd ) {
3436 	case GETVAL:
3437 	case SETVAL:
3438             /* In 64 bit cross-endian situations, we will erroneously pick up
3439              * the wrong half of the union for the "val" element.  To rectify
3440              * this, the entire 8-byte structure is byteswapped, followed by
3441 	     * a swap of the 4 byte val field. In other cases, the data is
3442 	     * already in proper host byte order. */
3443 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3444 		target_su.buf = tswapal(target_su.buf);
3445 		arg.val = tswap32(target_su.val);
3446 	    } else {
3447 		arg.val = target_su.val;
3448 	    }
3449             ret = get_errno(semctl(semid, semnum, cmd, arg));
3450             break;
3451 	case GETALL:
3452 	case SETALL:
3453             err = target_to_host_semarray(semid, &array, target_su.array);
3454             if (err)
3455                 return err;
3456             arg.array = array;
3457             ret = get_errno(semctl(semid, semnum, cmd, arg));
3458             err = host_to_target_semarray(semid, target_su.array, &array);
3459             if (err)
3460                 return err;
3461             break;
3462 	case IPC_STAT:
3463 	case IPC_SET:
3464 	case SEM_STAT:
3465             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3466             if (err)
3467                 return err;
3468             arg.buf = &dsarg;
3469             ret = get_errno(semctl(semid, semnum, cmd, arg));
3470             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3471             if (err)
3472                 return err;
3473             break;
3474 	case IPC_INFO:
3475 	case SEM_INFO:
3476             arg.__buf = &seminfo;
3477             ret = get_errno(semctl(semid, semnum, cmd, arg));
3478             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3479             if (err)
3480                 return err;
3481             break;
3482 	case IPC_RMID:
3483 	case GETPID:
3484 	case GETNCNT:
3485 	case GETZCNT:
3486             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3487             break;
3488     }
3489 
3490     return ret;
3491 }
3492 
3493 struct target_sembuf {
3494     unsigned short sem_num;
3495     short sem_op;
3496     short sem_flg;
3497 };
3498 
3499 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3500                                              abi_ulong target_addr,
3501                                              unsigned nsops)
3502 {
3503     struct target_sembuf *target_sembuf;
3504     int i;
3505 
3506     target_sembuf = lock_user(VERIFY_READ, target_addr,
3507                               nsops*sizeof(struct target_sembuf), 1);
3508     if (!target_sembuf)
3509         return -TARGET_EFAULT;
3510 
3511     for(i=0; i<nsops; i++) {
3512         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3513         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3514         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3515     }
3516 
3517     unlock_user(target_sembuf, target_addr, 0);
3518 
3519     return 0;
3520 }
3521 
3522 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3523 {
3524     struct sembuf sops[nsops];
3525 
3526     if (target_to_host_sembuf(sops, ptr, nsops))
3527         return -TARGET_EFAULT;
3528 
3529     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3530 }
3531 
3532 struct target_msqid_ds
3533 {
3534     struct target_ipc_perm msg_perm;
3535     abi_ulong msg_stime;
3536 #if TARGET_ABI_BITS == 32
3537     abi_ulong __unused1;
3538 #endif
3539     abi_ulong msg_rtime;
3540 #if TARGET_ABI_BITS == 32
3541     abi_ulong __unused2;
3542 #endif
3543     abi_ulong msg_ctime;
3544 #if TARGET_ABI_BITS == 32
3545     abi_ulong __unused3;
3546 #endif
3547     abi_ulong __msg_cbytes;
3548     abi_ulong msg_qnum;
3549     abi_ulong msg_qbytes;
3550     abi_ulong msg_lspid;
3551     abi_ulong msg_lrpid;
3552     abi_ulong __unused4;
3553     abi_ulong __unused5;
3554 };
3555 
3556 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3557                                                abi_ulong target_addr)
3558 {
3559     struct target_msqid_ds *target_md;
3560 
3561     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3562         return -TARGET_EFAULT;
3563     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3564         return -TARGET_EFAULT;
3565     host_md->msg_stime = tswapal(target_md->msg_stime);
3566     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3567     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3568     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3569     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3570     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3571     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3572     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3573     unlock_user_struct(target_md, target_addr, 0);
3574     return 0;
3575 }
3576 
3577 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3578                                                struct msqid_ds *host_md)
3579 {
3580     struct target_msqid_ds *target_md;
3581 
3582     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3583         return -TARGET_EFAULT;
3584     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3585         return -TARGET_EFAULT;
3586     target_md->msg_stime = tswapal(host_md->msg_stime);
3587     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3588     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3589     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3590     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3591     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3592     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3593     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3594     unlock_user_struct(target_md, target_addr, 1);
3595     return 0;
3596 }
3597 
3598 struct target_msginfo {
3599     int msgpool;
3600     int msgmap;
3601     int msgmax;
3602     int msgmnb;
3603     int msgmni;
3604     int msgssz;
3605     int msgtql;
3606     unsigned short int msgseg;
3607 };
3608 
3609 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3610                                               struct msginfo *host_msginfo)
3611 {
3612     struct target_msginfo *target_msginfo;
3613     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3614         return -TARGET_EFAULT;
3615     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3616     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3617     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3618     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3619     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3620     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3621     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3622     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3623     unlock_user_struct(target_msginfo, target_addr, 1);
3624     return 0;
3625 }
3626 
3627 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3628 {
3629     struct msqid_ds dsarg;
3630     struct msginfo msginfo;
3631     abi_long ret = -TARGET_EINVAL;
3632 
3633     cmd &= 0xff;
3634 
3635     switch (cmd) {
3636     case IPC_STAT:
3637     case IPC_SET:
3638     case MSG_STAT:
3639         if (target_to_host_msqid_ds(&dsarg,ptr))
3640             return -TARGET_EFAULT;
3641         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3642         if (host_to_target_msqid_ds(ptr,&dsarg))
3643             return -TARGET_EFAULT;
3644         break;
3645     case IPC_RMID:
3646         ret = get_errno(msgctl(msgid, cmd, NULL));
3647         break;
3648     case IPC_INFO:
3649     case MSG_INFO:
3650         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3651         if (host_to_target_msginfo(ptr, &msginfo))
3652             return -TARGET_EFAULT;
3653         break;
3654     }
3655 
3656     return ret;
3657 }
3658 
3659 struct target_msgbuf {
3660     abi_long mtype;
3661     char	mtext[1];
3662 };
3663 
3664 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3665                                  ssize_t msgsz, int msgflg)
3666 {
3667     struct target_msgbuf *target_mb;
3668     struct msgbuf *host_mb;
3669     abi_long ret = 0;
3670 
3671     if (msgsz < 0) {
3672         return -TARGET_EINVAL;
3673     }
3674 
3675     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3676         return -TARGET_EFAULT;
3677     host_mb = g_try_malloc(msgsz + sizeof(long));
3678     if (!host_mb) {
3679         unlock_user_struct(target_mb, msgp, 0);
3680         return -TARGET_ENOMEM;
3681     }
3682     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3683     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3684     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3685     g_free(host_mb);
3686     unlock_user_struct(target_mb, msgp, 0);
3687 
3688     return ret;
3689 }
3690 
3691 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3692                                  ssize_t msgsz, abi_long msgtyp,
3693                                  int msgflg)
3694 {
3695     struct target_msgbuf *target_mb;
3696     char *target_mtext;
3697     struct msgbuf *host_mb;
3698     abi_long ret = 0;
3699 
3700     if (msgsz < 0) {
3701         return -TARGET_EINVAL;
3702     }
3703 
3704     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3705         return -TARGET_EFAULT;
3706 
3707     host_mb = g_try_malloc(msgsz + sizeof(long));
3708     if (!host_mb) {
3709         ret = -TARGET_ENOMEM;
3710         goto end;
3711     }
3712     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3713 
3714     if (ret > 0) {
3715         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3716         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3717         if (!target_mtext) {
3718             ret = -TARGET_EFAULT;
3719             goto end;
3720         }
3721         memcpy(target_mb->mtext, host_mb->mtext, ret);
3722         unlock_user(target_mtext, target_mtext_addr, ret);
3723     }
3724 
3725     target_mb->mtype = tswapal(host_mb->mtype);
3726 
3727 end:
3728     if (target_mb)
3729         unlock_user_struct(target_mb, msgp, 1);
3730     g_free(host_mb);
3731     return ret;
3732 }
3733 
3734 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3735                                                abi_ulong target_addr)
3736 {
3737     struct target_shmid_ds *target_sd;
3738 
3739     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3740         return -TARGET_EFAULT;
3741     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3742         return -TARGET_EFAULT;
3743     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3744     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3745     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3746     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3747     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3748     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3749     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3750     unlock_user_struct(target_sd, target_addr, 0);
3751     return 0;
3752 }
3753 
3754 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3755                                                struct shmid_ds *host_sd)
3756 {
3757     struct target_shmid_ds *target_sd;
3758 
3759     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3760         return -TARGET_EFAULT;
3761     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3762         return -TARGET_EFAULT;
3763     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3764     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3765     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3766     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3767     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3768     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3769     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3770     unlock_user_struct(target_sd, target_addr, 1);
3771     return 0;
3772 }
3773 
3774 struct  target_shminfo {
3775     abi_ulong shmmax;
3776     abi_ulong shmmin;
3777     abi_ulong shmmni;
3778     abi_ulong shmseg;
3779     abi_ulong shmall;
3780 };
3781 
3782 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3783                                               struct shminfo *host_shminfo)
3784 {
3785     struct target_shminfo *target_shminfo;
3786     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3787         return -TARGET_EFAULT;
3788     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3789     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3790     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3791     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3792     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3793     unlock_user_struct(target_shminfo, target_addr, 1);
3794     return 0;
3795 }
3796 
3797 struct target_shm_info {
3798     int used_ids;
3799     abi_ulong shm_tot;
3800     abi_ulong shm_rss;
3801     abi_ulong shm_swp;
3802     abi_ulong swap_attempts;
3803     abi_ulong swap_successes;
3804 };
3805 
3806 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3807                                                struct shm_info *host_shm_info)
3808 {
3809     struct target_shm_info *target_shm_info;
3810     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3811         return -TARGET_EFAULT;
3812     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3813     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3814     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3815     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3816     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3817     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3818     unlock_user_struct(target_shm_info, target_addr, 1);
3819     return 0;
3820 }
3821 
3822 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3823 {
3824     struct shmid_ds dsarg;
3825     struct shminfo shminfo;
3826     struct shm_info shm_info;
3827     abi_long ret = -TARGET_EINVAL;
3828 
3829     cmd &= 0xff;
3830 
3831     switch(cmd) {
3832     case IPC_STAT:
3833     case IPC_SET:
3834     case SHM_STAT:
3835         if (target_to_host_shmid_ds(&dsarg, buf))
3836             return -TARGET_EFAULT;
3837         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3838         if (host_to_target_shmid_ds(buf, &dsarg))
3839             return -TARGET_EFAULT;
3840         break;
3841     case IPC_INFO:
3842         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3843         if (host_to_target_shminfo(buf, &shminfo))
3844             return -TARGET_EFAULT;
3845         break;
3846     case SHM_INFO:
3847         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3848         if (host_to_target_shm_info(buf, &shm_info))
3849             return -TARGET_EFAULT;
3850         break;
3851     case IPC_RMID:
3852     case SHM_LOCK:
3853     case SHM_UNLOCK:
3854         ret = get_errno(shmctl(shmid, cmd, NULL));
3855         break;
3856     }
3857 
3858     return ret;
3859 }
3860 
3861 #ifndef TARGET_FORCE_SHMLBA
3862 /* For most architectures, SHMLBA is the same as the page size;
3863  * some architectures have larger values, in which case they should
3864  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3865  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3866  * and defining its own value for SHMLBA.
3867  *
3868  * The kernel also permits SHMLBA to be set by the architecture to a
3869  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3870  * this means that addresses are rounded to the large size if
3871  * SHM_RND is set but addresses not aligned to that size are not rejected
3872  * as long as they are at least page-aligned. Since the only architecture
3873  * which uses this is ia64 this code doesn't provide for that oddity.
3874  */
3875 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3876 {
3877     return TARGET_PAGE_SIZE;
3878 }
3879 #endif
3880 
3881 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3882                                  int shmid, abi_ulong shmaddr, int shmflg)
3883 {
3884     abi_long raddr;
3885     void *host_raddr;
3886     struct shmid_ds shm_info;
3887     int i,ret;
3888     abi_ulong shmlba;
3889 
3890     /* find out the length of the shared memory segment */
3891     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3892     if (is_error(ret)) {
3893         /* can't get length, bail out */
3894         return ret;
3895     }
3896 
3897     shmlba = target_shmlba(cpu_env);
3898 
3899     if (shmaddr & (shmlba - 1)) {
3900         if (shmflg & SHM_RND) {
3901             shmaddr &= ~(shmlba - 1);
3902         } else {
3903             return -TARGET_EINVAL;
3904         }
3905     }
3906     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3907         return -TARGET_EINVAL;
3908     }
3909 
3910     mmap_lock();
3911 
3912     if (shmaddr)
3913         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3914     else {
3915         abi_ulong mmap_start;
3916 
3917         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3918 
3919         if (mmap_start == -1) {
3920             errno = ENOMEM;
3921             host_raddr = (void *)-1;
3922         } else
3923             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3924     }
3925 
3926     if (host_raddr == (void *)-1) {
3927         mmap_unlock();
3928         return get_errno((long)host_raddr);
3929     }
3930     raddr=h2g((unsigned long)host_raddr);
3931 
3932     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3933                    PAGE_VALID | PAGE_READ |
3934                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3935 
3936     for (i = 0; i < N_SHM_REGIONS; i++) {
3937         if (!shm_regions[i].in_use) {
3938             shm_regions[i].in_use = true;
3939             shm_regions[i].start = raddr;
3940             shm_regions[i].size = shm_info.shm_segsz;
3941             break;
3942         }
3943     }
3944 
3945     mmap_unlock();
3946     return raddr;
3947 
3948 }
3949 
3950 static inline abi_long do_shmdt(abi_ulong shmaddr)
3951 {
3952     int i;
3953     abi_long rv;
3954 
3955     mmap_lock();
3956 
3957     for (i = 0; i < N_SHM_REGIONS; ++i) {
3958         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3959             shm_regions[i].in_use = false;
3960             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3961             break;
3962         }
3963     }
3964     rv = get_errno(shmdt(g2h(shmaddr)));
3965 
3966     mmap_unlock();
3967 
3968     return rv;
3969 }
3970 
3971 #ifdef TARGET_NR_ipc
3972 /* ??? This only works with linear mappings.  */
3973 /* do_ipc() must return target values and target errnos. */
3974 static abi_long do_ipc(CPUArchState *cpu_env,
3975                        unsigned int call, abi_long first,
3976                        abi_long second, abi_long third,
3977                        abi_long ptr, abi_long fifth)
3978 {
3979     int version;
3980     abi_long ret = 0;
3981 
3982     version = call >> 16;
3983     call &= 0xffff;
3984 
3985     switch (call) {
3986     case IPCOP_semop:
3987         ret = do_semop(first, ptr, second);
3988         break;
3989 
3990     case IPCOP_semget:
3991         ret = get_errno(semget(first, second, third));
3992         break;
3993 
3994     case IPCOP_semctl: {
3995         /* The semun argument to semctl is passed by value, so dereference the
3996          * ptr argument. */
3997         abi_ulong atptr;
3998         get_user_ual(atptr, ptr);
3999         ret = do_semctl(first, second, third, atptr);
4000         break;
4001     }
4002 
4003     case IPCOP_msgget:
4004         ret = get_errno(msgget(first, second));
4005         break;
4006 
4007     case IPCOP_msgsnd:
4008         ret = do_msgsnd(first, ptr, second, third);
4009         break;
4010 
4011     case IPCOP_msgctl:
4012         ret = do_msgctl(first, second, ptr);
4013         break;
4014 
4015     case IPCOP_msgrcv:
4016         switch (version) {
4017         case 0:
4018             {
4019                 struct target_ipc_kludge {
4020                     abi_long msgp;
4021                     abi_long msgtyp;
4022                 } *tmp;
4023 
4024                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4025                     ret = -TARGET_EFAULT;
4026                     break;
4027                 }
4028 
4029                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4030 
4031                 unlock_user_struct(tmp, ptr, 0);
4032                 break;
4033             }
4034         default:
4035             ret = do_msgrcv(first, ptr, second, fifth, third);
4036         }
4037         break;
4038 
4039     case IPCOP_shmat:
4040         switch (version) {
4041         default:
4042         {
4043             abi_ulong raddr;
4044             raddr = do_shmat(cpu_env, first, ptr, second);
4045             if (is_error(raddr))
4046                 return get_errno(raddr);
4047             if (put_user_ual(raddr, third))
4048                 return -TARGET_EFAULT;
4049             break;
4050         }
4051         case 1:
4052             ret = -TARGET_EINVAL;
4053             break;
4054         }
4055 	break;
4056     case IPCOP_shmdt:
4057         ret = do_shmdt(ptr);
4058 	break;
4059 
4060     case IPCOP_shmget:
4061 	/* IPC_* flag values are the same on all linux platforms */
4062 	ret = get_errno(shmget(first, second, third));
4063 	break;
4064 
4065 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4066     case IPCOP_shmctl:
4067         ret = do_shmctl(first, second, ptr);
4068         break;
4069     default:
4070 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4071 	ret = -TARGET_ENOSYS;
4072 	break;
4073     }
4074     return ret;
4075 }
4076 #endif
4077 
4078 /* kernel structure types definitions */
4079 
4080 #define STRUCT(name, ...) STRUCT_ ## name,
4081 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4082 enum {
4083 #include "syscall_types.h"
4084 STRUCT_MAX
4085 };
4086 #undef STRUCT
4087 #undef STRUCT_SPECIAL
4088 
4089 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4090 #define STRUCT_SPECIAL(name)
4091 #include "syscall_types.h"
4092 #undef STRUCT
4093 #undef STRUCT_SPECIAL
4094 
4095 typedef struct IOCTLEntry IOCTLEntry;
4096 
4097 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4098                              int fd, int cmd, abi_long arg);
4099 
4100 struct IOCTLEntry {
4101     int target_cmd;
4102     unsigned int host_cmd;
4103     const char *name;
4104     int access;
4105     do_ioctl_fn *do_ioctl;
4106     const argtype arg_type[5];
4107 };
4108 
4109 #define IOC_R 0x0001
4110 #define IOC_W 0x0002
4111 #define IOC_RW (IOC_R | IOC_W)
4112 
4113 #define MAX_STRUCT_SIZE 4096
4114 
4115 #ifdef CONFIG_FIEMAP
4116 /* So fiemap access checks don't overflow on 32 bit systems.
4117  * This is very slightly smaller than the limit imposed by
4118  * the underlying kernel.
4119  */
4120 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4121                             / sizeof(struct fiemap_extent))
4122 
4123 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4124                                        int fd, int cmd, abi_long arg)
4125 {
4126     /* The parameter for this ioctl is a struct fiemap followed
4127      * by an array of struct fiemap_extent whose size is set
4128      * in fiemap->fm_extent_count. The array is filled in by the
4129      * ioctl.
4130      */
4131     int target_size_in, target_size_out;
4132     struct fiemap *fm;
4133     const argtype *arg_type = ie->arg_type;
4134     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4135     void *argptr, *p;
4136     abi_long ret;
4137     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4138     uint32_t outbufsz;
4139     int free_fm = 0;
4140 
4141     assert(arg_type[0] == TYPE_PTR);
4142     assert(ie->access == IOC_RW);
4143     arg_type++;
4144     target_size_in = thunk_type_size(arg_type, 0);
4145     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4146     if (!argptr) {
4147         return -TARGET_EFAULT;
4148     }
4149     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4150     unlock_user(argptr, arg, 0);
4151     fm = (struct fiemap *)buf_temp;
4152     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4153         return -TARGET_EINVAL;
4154     }
4155 
4156     outbufsz = sizeof (*fm) +
4157         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4158 
4159     if (outbufsz > MAX_STRUCT_SIZE) {
4160         /* We can't fit all the extents into the fixed size buffer.
4161          * Allocate one that is large enough and use it instead.
4162          */
4163         fm = g_try_malloc(outbufsz);
4164         if (!fm) {
4165             return -TARGET_ENOMEM;
4166         }
4167         memcpy(fm, buf_temp, sizeof(struct fiemap));
4168         free_fm = 1;
4169     }
4170     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4171     if (!is_error(ret)) {
4172         target_size_out = target_size_in;
4173         /* An extent_count of 0 means we were only counting the extents
4174          * so there are no structs to copy
4175          */
4176         if (fm->fm_extent_count != 0) {
4177             target_size_out += fm->fm_mapped_extents * extent_size;
4178         }
4179         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4180         if (!argptr) {
4181             ret = -TARGET_EFAULT;
4182         } else {
4183             /* Convert the struct fiemap */
4184             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4185             if (fm->fm_extent_count != 0) {
4186                 p = argptr + target_size_in;
4187                 /* ...and then all the struct fiemap_extents */
4188                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4189                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4190                                   THUNK_TARGET);
4191                     p += extent_size;
4192                 }
4193             }
4194             unlock_user(argptr, arg, target_size_out);
4195         }
4196     }
4197     if (free_fm) {
4198         g_free(fm);
4199     }
4200     return ret;
4201 }
4202 #endif
4203 
4204 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4205                                 int fd, int cmd, abi_long arg)
4206 {
4207     const argtype *arg_type = ie->arg_type;
4208     int target_size;
4209     void *argptr;
4210     int ret;
4211     struct ifconf *host_ifconf;
4212     uint32_t outbufsz;
4213     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4214     int target_ifreq_size;
4215     int nb_ifreq;
4216     int free_buf = 0;
4217     int i;
4218     int target_ifc_len;
4219     abi_long target_ifc_buf;
4220     int host_ifc_len;
4221     char *host_ifc_buf;
4222 
4223     assert(arg_type[0] == TYPE_PTR);
4224     assert(ie->access == IOC_RW);
4225 
4226     arg_type++;
4227     target_size = thunk_type_size(arg_type, 0);
4228 
4229     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4230     if (!argptr)
4231         return -TARGET_EFAULT;
4232     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4233     unlock_user(argptr, arg, 0);
4234 
4235     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4236     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4237     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4238 
4239     if (target_ifc_buf != 0) {
4240         target_ifc_len = host_ifconf->ifc_len;
4241         nb_ifreq = target_ifc_len / target_ifreq_size;
4242         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4243 
4244         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4245         if (outbufsz > MAX_STRUCT_SIZE) {
4246             /*
4247              * We can't fit all the extents into the fixed size buffer.
4248              * Allocate one that is large enough and use it instead.
4249              */
4250             host_ifconf = malloc(outbufsz);
4251             if (!host_ifconf) {
4252                 return -TARGET_ENOMEM;
4253             }
4254             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4255             free_buf = 1;
4256         }
4257         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4258 
4259         host_ifconf->ifc_len = host_ifc_len;
4260     } else {
4261       host_ifc_buf = NULL;
4262     }
4263     host_ifconf->ifc_buf = host_ifc_buf;
4264 
4265     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4266     if (!is_error(ret)) {
4267 	/* convert host ifc_len to target ifc_len */
4268 
4269         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4270         target_ifc_len = nb_ifreq * target_ifreq_size;
4271         host_ifconf->ifc_len = target_ifc_len;
4272 
4273 	/* restore target ifc_buf */
4274 
4275         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4276 
4277 	/* copy struct ifconf to target user */
4278 
4279         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4280         if (!argptr)
4281             return -TARGET_EFAULT;
4282         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4283         unlock_user(argptr, arg, target_size);
4284 
4285         if (target_ifc_buf != 0) {
4286             /* copy ifreq[] to target user */
4287             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4288             for (i = 0; i < nb_ifreq ; i++) {
4289                 thunk_convert(argptr + i * target_ifreq_size,
4290                               host_ifc_buf + i * sizeof(struct ifreq),
4291                               ifreq_arg_type, THUNK_TARGET);
4292             }
4293             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4294         }
4295     }
4296 
4297     if (free_buf) {
4298         free(host_ifconf);
4299     }
4300 
4301     return ret;
4302 }
4303 
4304 #if defined(CONFIG_USBFS)
4305 #if HOST_LONG_BITS > 64
4306 #error USBDEVFS thunks do not support >64 bit hosts yet.
4307 #endif
4308 struct live_urb {
4309     uint64_t target_urb_adr;
4310     uint64_t target_buf_adr;
4311     char *target_buf_ptr;
4312     struct usbdevfs_urb host_urb;
4313 };
4314 
4315 static GHashTable *usbdevfs_urb_hashtable(void)
4316 {
4317     static GHashTable *urb_hashtable;
4318 
4319     if (!urb_hashtable) {
4320         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4321     }
4322     return urb_hashtable;
4323 }
4324 
4325 static void urb_hashtable_insert(struct live_urb *urb)
4326 {
4327     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4328     g_hash_table_insert(urb_hashtable, urb, urb);
4329 }
4330 
4331 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4332 {
4333     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4334     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4335 }
4336 
4337 static void urb_hashtable_remove(struct live_urb *urb)
4338 {
4339     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4340     g_hash_table_remove(urb_hashtable, urb);
4341 }
4342 
4343 static abi_long
4344 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4345                           int fd, int cmd, abi_long arg)
4346 {
4347     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4348     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4349     struct live_urb *lurb;
4350     void *argptr;
4351     uint64_t hurb;
4352     int target_size;
4353     uintptr_t target_urb_adr;
4354     abi_long ret;
4355 
4356     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4357 
4358     memset(buf_temp, 0, sizeof(uint64_t));
4359     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4360     if (is_error(ret)) {
4361         return ret;
4362     }
4363 
4364     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4365     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4366     if (!lurb->target_urb_adr) {
4367         return -TARGET_EFAULT;
4368     }
4369     urb_hashtable_remove(lurb);
4370     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4371         lurb->host_urb.buffer_length);
4372     lurb->target_buf_ptr = NULL;
4373 
4374     /* restore the guest buffer pointer */
4375     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4376 
4377     /* update the guest urb struct */
4378     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4379     if (!argptr) {
4380         g_free(lurb);
4381         return -TARGET_EFAULT;
4382     }
4383     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4384     unlock_user(argptr, lurb->target_urb_adr, target_size);
4385 
4386     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4387     /* write back the urb handle */
4388     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4389     if (!argptr) {
4390         g_free(lurb);
4391         return -TARGET_EFAULT;
4392     }
4393 
4394     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4395     target_urb_adr = lurb->target_urb_adr;
4396     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4397     unlock_user(argptr, arg, target_size);
4398 
4399     g_free(lurb);
4400     return ret;
4401 }
4402 
4403 static abi_long
4404 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4405                              uint8_t *buf_temp __attribute__((unused)),
4406                              int fd, int cmd, abi_long arg)
4407 {
4408     struct live_urb *lurb;
4409 
4410     /* map target address back to host URB with metadata. */
4411     lurb = urb_hashtable_lookup(arg);
4412     if (!lurb) {
4413         return -TARGET_EFAULT;
4414     }
4415     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4416 }
4417 
4418 static abi_long
4419 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4420                             int fd, int cmd, abi_long arg)
4421 {
4422     const argtype *arg_type = ie->arg_type;
4423     int target_size;
4424     abi_long ret;
4425     void *argptr;
4426     int rw_dir;
4427     struct live_urb *lurb;
4428 
4429     /*
4430      * each submitted URB needs to map to a unique ID for the
4431      * kernel, and that unique ID needs to be a pointer to
4432      * host memory.  hence, we need to malloc for each URB.
4433      * isochronous transfers have a variable length struct.
4434      */
4435     arg_type++;
4436     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4437 
4438     /* construct host copy of urb and metadata */
4439     lurb = g_try_malloc0(sizeof(struct live_urb));
4440     if (!lurb) {
4441         return -TARGET_ENOMEM;
4442     }
4443 
4444     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4445     if (!argptr) {
4446         g_free(lurb);
4447         return -TARGET_EFAULT;
4448     }
4449     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4450     unlock_user(argptr, arg, 0);
4451 
4452     lurb->target_urb_adr = arg;
4453     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4454 
4455     /* buffer space used depends on endpoint type so lock the entire buffer */
4456     /* control type urbs should check the buffer contents for true direction */
4457     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4458     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4459         lurb->host_urb.buffer_length, 1);
4460     if (lurb->target_buf_ptr == NULL) {
4461         g_free(lurb);
4462         return -TARGET_EFAULT;
4463     }
4464 
4465     /* update buffer pointer in host copy */
4466     lurb->host_urb.buffer = lurb->target_buf_ptr;
4467 
4468     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4469     if (is_error(ret)) {
4470         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4471         g_free(lurb);
4472     } else {
4473         urb_hashtable_insert(lurb);
4474     }
4475 
4476     return ret;
4477 }
4478 #endif /* CONFIG_USBFS */
4479 
4480 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4481                             int cmd, abi_long arg)
4482 {
4483     void *argptr;
4484     struct dm_ioctl *host_dm;
4485     abi_long guest_data;
4486     uint32_t guest_data_size;
4487     int target_size;
4488     const argtype *arg_type = ie->arg_type;
4489     abi_long ret;
4490     void *big_buf = NULL;
4491     char *host_data;
4492 
4493     arg_type++;
4494     target_size = thunk_type_size(arg_type, 0);
4495     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4496     if (!argptr) {
4497         ret = -TARGET_EFAULT;
4498         goto out;
4499     }
4500     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4501     unlock_user(argptr, arg, 0);
4502 
4503     /* buf_temp is too small, so fetch things into a bigger buffer */
4504     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4505     memcpy(big_buf, buf_temp, target_size);
4506     buf_temp = big_buf;
4507     host_dm = big_buf;
4508 
4509     guest_data = arg + host_dm->data_start;
4510     if ((guest_data - arg) < 0) {
4511         ret = -TARGET_EINVAL;
4512         goto out;
4513     }
4514     guest_data_size = host_dm->data_size - host_dm->data_start;
4515     host_data = (char*)host_dm + host_dm->data_start;
4516 
4517     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4518     if (!argptr) {
4519         ret = -TARGET_EFAULT;
4520         goto out;
4521     }
4522 
4523     switch (ie->host_cmd) {
4524     case DM_REMOVE_ALL:
4525     case DM_LIST_DEVICES:
4526     case DM_DEV_CREATE:
4527     case DM_DEV_REMOVE:
4528     case DM_DEV_SUSPEND:
4529     case DM_DEV_STATUS:
4530     case DM_DEV_WAIT:
4531     case DM_TABLE_STATUS:
4532     case DM_TABLE_CLEAR:
4533     case DM_TABLE_DEPS:
4534     case DM_LIST_VERSIONS:
4535         /* no input data */
4536         break;
4537     case DM_DEV_RENAME:
4538     case DM_DEV_SET_GEOMETRY:
4539         /* data contains only strings */
4540         memcpy(host_data, argptr, guest_data_size);
4541         break;
4542     case DM_TARGET_MSG:
4543         memcpy(host_data, argptr, guest_data_size);
4544         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4545         break;
4546     case DM_TABLE_LOAD:
4547     {
4548         void *gspec = argptr;
4549         void *cur_data = host_data;
4550         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4551         int spec_size = thunk_type_size(arg_type, 0);
4552         int i;
4553 
4554         for (i = 0; i < host_dm->target_count; i++) {
4555             struct dm_target_spec *spec = cur_data;
4556             uint32_t next;
4557             int slen;
4558 
4559             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4560             slen = strlen((char*)gspec + spec_size) + 1;
4561             next = spec->next;
4562             spec->next = sizeof(*spec) + slen;
4563             strcpy((char*)&spec[1], gspec + spec_size);
4564             gspec += next;
4565             cur_data += spec->next;
4566         }
4567         break;
4568     }
4569     default:
4570         ret = -TARGET_EINVAL;
4571         unlock_user(argptr, guest_data, 0);
4572         goto out;
4573     }
4574     unlock_user(argptr, guest_data, 0);
4575 
4576     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4577     if (!is_error(ret)) {
4578         guest_data = arg + host_dm->data_start;
4579         guest_data_size = host_dm->data_size - host_dm->data_start;
4580         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4581         switch (ie->host_cmd) {
4582         case DM_REMOVE_ALL:
4583         case DM_DEV_CREATE:
4584         case DM_DEV_REMOVE:
4585         case DM_DEV_RENAME:
4586         case DM_DEV_SUSPEND:
4587         case DM_DEV_STATUS:
4588         case DM_TABLE_LOAD:
4589         case DM_TABLE_CLEAR:
4590         case DM_TARGET_MSG:
4591         case DM_DEV_SET_GEOMETRY:
4592             /* no return data */
4593             break;
4594         case DM_LIST_DEVICES:
4595         {
4596             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4597             uint32_t remaining_data = guest_data_size;
4598             void *cur_data = argptr;
4599             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4600             int nl_size = 12; /* can't use thunk_size due to alignment */
4601 
4602             while (1) {
4603                 uint32_t next = nl->next;
4604                 if (next) {
4605                     nl->next = nl_size + (strlen(nl->name) + 1);
4606                 }
4607                 if (remaining_data < nl->next) {
4608                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4609                     break;
4610                 }
4611                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4612                 strcpy(cur_data + nl_size, nl->name);
4613                 cur_data += nl->next;
4614                 remaining_data -= nl->next;
4615                 if (!next) {
4616                     break;
4617                 }
4618                 nl = (void*)nl + next;
4619             }
4620             break;
4621         }
4622         case DM_DEV_WAIT:
4623         case DM_TABLE_STATUS:
4624         {
4625             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4626             void *cur_data = argptr;
4627             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4628             int spec_size = thunk_type_size(arg_type, 0);
4629             int i;
4630 
4631             for (i = 0; i < host_dm->target_count; i++) {
4632                 uint32_t next = spec->next;
4633                 int slen = strlen((char*)&spec[1]) + 1;
4634                 spec->next = (cur_data - argptr) + spec_size + slen;
4635                 if (guest_data_size < spec->next) {
4636                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4637                     break;
4638                 }
4639                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4640                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4641                 cur_data = argptr + spec->next;
4642                 spec = (void*)host_dm + host_dm->data_start + next;
4643             }
4644             break;
4645         }
4646         case DM_TABLE_DEPS:
4647         {
4648             void *hdata = (void*)host_dm + host_dm->data_start;
4649             int count = *(uint32_t*)hdata;
4650             uint64_t *hdev = hdata + 8;
4651             uint64_t *gdev = argptr + 8;
4652             int i;
4653 
4654             *(uint32_t*)argptr = tswap32(count);
4655             for (i = 0; i < count; i++) {
4656                 *gdev = tswap64(*hdev);
4657                 gdev++;
4658                 hdev++;
4659             }
4660             break;
4661         }
4662         case DM_LIST_VERSIONS:
4663         {
4664             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4665             uint32_t remaining_data = guest_data_size;
4666             void *cur_data = argptr;
4667             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4668             int vers_size = thunk_type_size(arg_type, 0);
4669 
4670             while (1) {
4671                 uint32_t next = vers->next;
4672                 if (next) {
4673                     vers->next = vers_size + (strlen(vers->name) + 1);
4674                 }
4675                 if (remaining_data < vers->next) {
4676                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4677                     break;
4678                 }
4679                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4680                 strcpy(cur_data + vers_size, vers->name);
4681                 cur_data += vers->next;
4682                 remaining_data -= vers->next;
4683                 if (!next) {
4684                     break;
4685                 }
4686                 vers = (void*)vers + next;
4687             }
4688             break;
4689         }
4690         default:
4691             unlock_user(argptr, guest_data, 0);
4692             ret = -TARGET_EINVAL;
4693             goto out;
4694         }
4695         unlock_user(argptr, guest_data, guest_data_size);
4696 
4697         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4698         if (!argptr) {
4699             ret = -TARGET_EFAULT;
4700             goto out;
4701         }
4702         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4703         unlock_user(argptr, arg, target_size);
4704     }
4705 out:
4706     g_free(big_buf);
4707     return ret;
4708 }
4709 
4710 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4711                                int cmd, abi_long arg)
4712 {
4713     void *argptr;
4714     int target_size;
4715     const argtype *arg_type = ie->arg_type;
4716     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4717     abi_long ret;
4718 
4719     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4720     struct blkpg_partition host_part;
4721 
4722     /* Read and convert blkpg */
4723     arg_type++;
4724     target_size = thunk_type_size(arg_type, 0);
4725     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4726     if (!argptr) {
4727         ret = -TARGET_EFAULT;
4728         goto out;
4729     }
4730     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4731     unlock_user(argptr, arg, 0);
4732 
4733     switch (host_blkpg->op) {
4734     case BLKPG_ADD_PARTITION:
4735     case BLKPG_DEL_PARTITION:
4736         /* payload is struct blkpg_partition */
4737         break;
4738     default:
4739         /* Unknown opcode */
4740         ret = -TARGET_EINVAL;
4741         goto out;
4742     }
4743 
4744     /* Read and convert blkpg->data */
4745     arg = (abi_long)(uintptr_t)host_blkpg->data;
4746     target_size = thunk_type_size(part_arg_type, 0);
4747     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4748     if (!argptr) {
4749         ret = -TARGET_EFAULT;
4750         goto out;
4751     }
4752     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4753     unlock_user(argptr, arg, 0);
4754 
4755     /* Swizzle the data pointer to our local copy and call! */
4756     host_blkpg->data = &host_part;
4757     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4758 
4759 out:
4760     return ret;
4761 }
4762 
4763 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4764                                 int fd, int cmd, abi_long arg)
4765 {
4766     const argtype *arg_type = ie->arg_type;
4767     const StructEntry *se;
4768     const argtype *field_types;
4769     const int *dst_offsets, *src_offsets;
4770     int target_size;
4771     void *argptr;
4772     abi_ulong *target_rt_dev_ptr = NULL;
4773     unsigned long *host_rt_dev_ptr = NULL;
4774     abi_long ret;
4775     int i;
4776 
4777     assert(ie->access == IOC_W);
4778     assert(*arg_type == TYPE_PTR);
4779     arg_type++;
4780     assert(*arg_type == TYPE_STRUCT);
4781     target_size = thunk_type_size(arg_type, 0);
4782     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4783     if (!argptr) {
4784         return -TARGET_EFAULT;
4785     }
4786     arg_type++;
4787     assert(*arg_type == (int)STRUCT_rtentry);
4788     se = struct_entries + *arg_type++;
4789     assert(se->convert[0] == NULL);
4790     /* convert struct here to be able to catch rt_dev string */
4791     field_types = se->field_types;
4792     dst_offsets = se->field_offsets[THUNK_HOST];
4793     src_offsets = se->field_offsets[THUNK_TARGET];
4794     for (i = 0; i < se->nb_fields; i++) {
4795         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4796             assert(*field_types == TYPE_PTRVOID);
4797             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4798             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4799             if (*target_rt_dev_ptr != 0) {
4800                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4801                                                   tswapal(*target_rt_dev_ptr));
4802                 if (!*host_rt_dev_ptr) {
4803                     unlock_user(argptr, arg, 0);
4804                     return -TARGET_EFAULT;
4805                 }
4806             } else {
4807                 *host_rt_dev_ptr = 0;
4808             }
4809             field_types++;
4810             continue;
4811         }
4812         field_types = thunk_convert(buf_temp + dst_offsets[i],
4813                                     argptr + src_offsets[i],
4814                                     field_types, THUNK_HOST);
4815     }
4816     unlock_user(argptr, arg, 0);
4817 
4818     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4819 
4820     assert(host_rt_dev_ptr != NULL);
4821     assert(target_rt_dev_ptr != NULL);
4822     if (*host_rt_dev_ptr != 0) {
4823         unlock_user((void *)*host_rt_dev_ptr,
4824                     *target_rt_dev_ptr, 0);
4825     }
4826     return ret;
4827 }
4828 
4829 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4830                                      int fd, int cmd, abi_long arg)
4831 {
4832     int sig = target_to_host_signal(arg);
4833     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4834 }
4835 
4836 #ifdef TIOCGPTPEER
4837 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4838                                      int fd, int cmd, abi_long arg)
4839 {
4840     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4841     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4842 }
4843 #endif
4844 
4845 static IOCTLEntry ioctl_entries[] = {
4846 #define IOCTL(cmd, access, ...) \
4847     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4848 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4849     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4850 #define IOCTL_IGNORE(cmd) \
4851     { TARGET_ ## cmd, 0, #cmd },
4852 #include "ioctls.h"
4853     { 0, 0, },
4854 };
4855 
4856 /* ??? Implement proper locking for ioctls.  */
4857 /* do_ioctl() Must return target values and target errnos. */
4858 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4859 {
4860     const IOCTLEntry *ie;
4861     const argtype *arg_type;
4862     abi_long ret;
4863     uint8_t buf_temp[MAX_STRUCT_SIZE];
4864     int target_size;
4865     void *argptr;
4866 
4867     ie = ioctl_entries;
4868     for(;;) {
4869         if (ie->target_cmd == 0) {
4870             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4871             return -TARGET_ENOSYS;
4872         }
4873         if (ie->target_cmd == cmd)
4874             break;
4875         ie++;
4876     }
4877     arg_type = ie->arg_type;
4878     if (ie->do_ioctl) {
4879         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4880     } else if (!ie->host_cmd) {
4881         /* Some architectures define BSD ioctls in their headers
4882            that are not implemented in Linux.  */
4883         return -TARGET_ENOSYS;
4884     }
4885 
4886     switch(arg_type[0]) {
4887     case TYPE_NULL:
4888         /* no argument */
4889         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4890         break;
4891     case TYPE_PTRVOID:
4892     case TYPE_INT:
4893         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4894         break;
4895     case TYPE_PTR:
4896         arg_type++;
4897         target_size = thunk_type_size(arg_type, 0);
4898         switch(ie->access) {
4899         case IOC_R:
4900             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4901             if (!is_error(ret)) {
4902                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4903                 if (!argptr)
4904                     return -TARGET_EFAULT;
4905                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4906                 unlock_user(argptr, arg, target_size);
4907             }
4908             break;
4909         case IOC_W:
4910             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4911             if (!argptr)
4912                 return -TARGET_EFAULT;
4913             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4914             unlock_user(argptr, arg, 0);
4915             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4916             break;
4917         default:
4918         case IOC_RW:
4919             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4920             if (!argptr)
4921                 return -TARGET_EFAULT;
4922             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4923             unlock_user(argptr, arg, 0);
4924             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4925             if (!is_error(ret)) {
4926                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4927                 if (!argptr)
4928                     return -TARGET_EFAULT;
4929                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4930                 unlock_user(argptr, arg, target_size);
4931             }
4932             break;
4933         }
4934         break;
4935     default:
4936         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4937                  (long)cmd, arg_type[0]);
4938         ret = -TARGET_ENOSYS;
4939         break;
4940     }
4941     return ret;
4942 }
4943 
4944 static const bitmask_transtbl iflag_tbl[] = {
4945         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4946         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4947         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4948         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4949         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4950         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4951         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4952         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4953         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4954         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4955         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4956         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4957         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4958         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4959         { 0, 0, 0, 0 }
4960 };
4961 
4962 static const bitmask_transtbl oflag_tbl[] = {
4963 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4964 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4965 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4966 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4967 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4968 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4969 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4970 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4971 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4972 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4973 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4974 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4975 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4976 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4977 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4978 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4979 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4980 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4981 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4982 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4983 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4984 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4985 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4986 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4987 	{ 0, 0, 0, 0 }
4988 };
4989 
4990 static const bitmask_transtbl cflag_tbl[] = {
4991 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4992 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4993 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4994 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4995 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4996 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4997 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4998 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4999 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5000 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5001 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5002 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5003 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5004 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5005 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5006 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5007 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5008 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5009 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5010 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5011 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5012 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5013 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5014 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5015 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5016 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5017 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5018 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5019 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5020 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5021 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5022 	{ 0, 0, 0, 0 }
5023 };
5024 
5025 static const bitmask_transtbl lflag_tbl[] = {
5026 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5027 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5028 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5029 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5030 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5031 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5032 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5033 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5034 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5035 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5036 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5037 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5038 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5039 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5040 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5041 	{ 0, 0, 0, 0 }
5042 };
5043 
5044 static void target_to_host_termios (void *dst, const void *src)
5045 {
5046     struct host_termios *host = dst;
5047     const struct target_termios *target = src;
5048 
5049     host->c_iflag =
5050         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5051     host->c_oflag =
5052         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5053     host->c_cflag =
5054         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5055     host->c_lflag =
5056         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5057     host->c_line = target->c_line;
5058 
5059     memset(host->c_cc, 0, sizeof(host->c_cc));
5060     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5061     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5062     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5063     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5064     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5065     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5066     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5067     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5068     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5069     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5070     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5071     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5072     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5073     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5074     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5075     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5076     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5077 }
5078 
5079 static void host_to_target_termios (void *dst, const void *src)
5080 {
5081     struct target_termios *target = dst;
5082     const struct host_termios *host = src;
5083 
5084     target->c_iflag =
5085         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5086     target->c_oflag =
5087         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5088     target->c_cflag =
5089         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5090     target->c_lflag =
5091         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5092     target->c_line = host->c_line;
5093 
5094     memset(target->c_cc, 0, sizeof(target->c_cc));
5095     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5096     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5097     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5098     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5099     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5100     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5101     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5102     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5103     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5104     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5105     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5106     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5107     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5108     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5109     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5110     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5111     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5112 }
5113 
5114 static const StructEntry struct_termios_def = {
5115     .convert = { host_to_target_termios, target_to_host_termios },
5116     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5117     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5118 };
5119 
5120 static bitmask_transtbl mmap_flags_tbl[] = {
5121     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5122     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5123     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5124     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5125       MAP_ANONYMOUS, MAP_ANONYMOUS },
5126     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5127       MAP_GROWSDOWN, MAP_GROWSDOWN },
5128     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5129       MAP_DENYWRITE, MAP_DENYWRITE },
5130     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5131       MAP_EXECUTABLE, MAP_EXECUTABLE },
5132     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5133     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5134       MAP_NORESERVE, MAP_NORESERVE },
5135     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5136     /* MAP_STACK had been ignored by the kernel for quite some time.
5137        Recognize it for the target insofar as we do not want to pass
5138        it through to the host.  */
5139     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5140     { 0, 0, 0, 0 }
5141 };
5142 
5143 #if defined(TARGET_I386)
5144 
5145 /* NOTE: there is really one LDT for all the threads */
5146 static uint8_t *ldt_table;
5147 
5148 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5149 {
5150     int size;
5151     void *p;
5152 
5153     if (!ldt_table)
5154         return 0;
5155     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5156     if (size > bytecount)
5157         size = bytecount;
5158     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5159     if (!p)
5160         return -TARGET_EFAULT;
5161     /* ??? Should this by byteswapped?  */
5162     memcpy(p, ldt_table, size);
5163     unlock_user(p, ptr, size);
5164     return size;
5165 }
5166 
5167 /* XXX: add locking support */
5168 static abi_long write_ldt(CPUX86State *env,
5169                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5170 {
5171     struct target_modify_ldt_ldt_s ldt_info;
5172     struct target_modify_ldt_ldt_s *target_ldt_info;
5173     int seg_32bit, contents, read_exec_only, limit_in_pages;
5174     int seg_not_present, useable, lm;
5175     uint32_t *lp, entry_1, entry_2;
5176 
5177     if (bytecount != sizeof(ldt_info))
5178         return -TARGET_EINVAL;
5179     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5180         return -TARGET_EFAULT;
5181     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5182     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5183     ldt_info.limit = tswap32(target_ldt_info->limit);
5184     ldt_info.flags = tswap32(target_ldt_info->flags);
5185     unlock_user_struct(target_ldt_info, ptr, 0);
5186 
5187     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5188         return -TARGET_EINVAL;
5189     seg_32bit = ldt_info.flags & 1;
5190     contents = (ldt_info.flags >> 1) & 3;
5191     read_exec_only = (ldt_info.flags >> 3) & 1;
5192     limit_in_pages = (ldt_info.flags >> 4) & 1;
5193     seg_not_present = (ldt_info.flags >> 5) & 1;
5194     useable = (ldt_info.flags >> 6) & 1;
5195 #ifdef TARGET_ABI32
5196     lm = 0;
5197 #else
5198     lm = (ldt_info.flags >> 7) & 1;
5199 #endif
5200     if (contents == 3) {
5201         if (oldmode)
5202             return -TARGET_EINVAL;
5203         if (seg_not_present == 0)
5204             return -TARGET_EINVAL;
5205     }
5206     /* allocate the LDT */
5207     if (!ldt_table) {
5208         env->ldt.base = target_mmap(0,
5209                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5210                                     PROT_READ|PROT_WRITE,
5211                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5212         if (env->ldt.base == -1)
5213             return -TARGET_ENOMEM;
5214         memset(g2h(env->ldt.base), 0,
5215                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5216         env->ldt.limit = 0xffff;
5217         ldt_table = g2h(env->ldt.base);
5218     }
5219 
5220     /* NOTE: same code as Linux kernel */
5221     /* Allow LDTs to be cleared by the user. */
5222     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5223         if (oldmode ||
5224             (contents == 0		&&
5225              read_exec_only == 1	&&
5226              seg_32bit == 0		&&
5227              limit_in_pages == 0	&&
5228              seg_not_present == 1	&&
5229              useable == 0 )) {
5230             entry_1 = 0;
5231             entry_2 = 0;
5232             goto install;
5233         }
5234     }
5235 
5236     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5237         (ldt_info.limit & 0x0ffff);
5238     entry_2 = (ldt_info.base_addr & 0xff000000) |
5239         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5240         (ldt_info.limit & 0xf0000) |
5241         ((read_exec_only ^ 1) << 9) |
5242         (contents << 10) |
5243         ((seg_not_present ^ 1) << 15) |
5244         (seg_32bit << 22) |
5245         (limit_in_pages << 23) |
5246         (lm << 21) |
5247         0x7000;
5248     if (!oldmode)
5249         entry_2 |= (useable << 20);
5250 
5251     /* Install the new entry ...  */
5252 install:
5253     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5254     lp[0] = tswap32(entry_1);
5255     lp[1] = tswap32(entry_2);
5256     return 0;
5257 }
5258 
5259 /* specific and weird i386 syscalls */
5260 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5261                               unsigned long bytecount)
5262 {
5263     abi_long ret;
5264 
5265     switch (func) {
5266     case 0:
5267         ret = read_ldt(ptr, bytecount);
5268         break;
5269     case 1:
5270         ret = write_ldt(env, ptr, bytecount, 1);
5271         break;
5272     case 0x11:
5273         ret = write_ldt(env, ptr, bytecount, 0);
5274         break;
5275     default:
5276         ret = -TARGET_ENOSYS;
5277         break;
5278     }
5279     return ret;
5280 }
5281 
5282 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5283 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5284 {
5285     uint64_t *gdt_table = g2h(env->gdt.base);
5286     struct target_modify_ldt_ldt_s ldt_info;
5287     struct target_modify_ldt_ldt_s *target_ldt_info;
5288     int seg_32bit, contents, read_exec_only, limit_in_pages;
5289     int seg_not_present, useable, lm;
5290     uint32_t *lp, entry_1, entry_2;
5291     int i;
5292 
5293     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5294     if (!target_ldt_info)
5295         return -TARGET_EFAULT;
5296     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5297     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5298     ldt_info.limit = tswap32(target_ldt_info->limit);
5299     ldt_info.flags = tswap32(target_ldt_info->flags);
5300     if (ldt_info.entry_number == -1) {
5301         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5302             if (gdt_table[i] == 0) {
5303                 ldt_info.entry_number = i;
5304                 target_ldt_info->entry_number = tswap32(i);
5305                 break;
5306             }
5307         }
5308     }
5309     unlock_user_struct(target_ldt_info, ptr, 1);
5310 
5311     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5312         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5313            return -TARGET_EINVAL;
5314     seg_32bit = ldt_info.flags & 1;
5315     contents = (ldt_info.flags >> 1) & 3;
5316     read_exec_only = (ldt_info.flags >> 3) & 1;
5317     limit_in_pages = (ldt_info.flags >> 4) & 1;
5318     seg_not_present = (ldt_info.flags >> 5) & 1;
5319     useable = (ldt_info.flags >> 6) & 1;
5320 #ifdef TARGET_ABI32
5321     lm = 0;
5322 #else
5323     lm = (ldt_info.flags >> 7) & 1;
5324 #endif
5325 
5326     if (contents == 3) {
5327         if (seg_not_present == 0)
5328             return -TARGET_EINVAL;
5329     }
5330 
5331     /* NOTE: same code as Linux kernel */
5332     /* Allow LDTs to be cleared by the user. */
5333     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5334         if ((contents == 0             &&
5335              read_exec_only == 1       &&
5336              seg_32bit == 0            &&
5337              limit_in_pages == 0       &&
5338              seg_not_present == 1      &&
5339              useable == 0 )) {
5340             entry_1 = 0;
5341             entry_2 = 0;
5342             goto install;
5343         }
5344     }
5345 
5346     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5347         (ldt_info.limit & 0x0ffff);
5348     entry_2 = (ldt_info.base_addr & 0xff000000) |
5349         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5350         (ldt_info.limit & 0xf0000) |
5351         ((read_exec_only ^ 1) << 9) |
5352         (contents << 10) |
5353         ((seg_not_present ^ 1) << 15) |
5354         (seg_32bit << 22) |
5355         (limit_in_pages << 23) |
5356         (useable << 20) |
5357         (lm << 21) |
5358         0x7000;
5359 
5360     /* Install the new entry ...  */
5361 install:
5362     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5363     lp[0] = tswap32(entry_1);
5364     lp[1] = tswap32(entry_2);
5365     return 0;
5366 }
5367 
5368 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5369 {
5370     struct target_modify_ldt_ldt_s *target_ldt_info;
5371     uint64_t *gdt_table = g2h(env->gdt.base);
5372     uint32_t base_addr, limit, flags;
5373     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5374     int seg_not_present, useable, lm;
5375     uint32_t *lp, entry_1, entry_2;
5376 
5377     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5378     if (!target_ldt_info)
5379         return -TARGET_EFAULT;
5380     idx = tswap32(target_ldt_info->entry_number);
5381     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5382         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5383         unlock_user_struct(target_ldt_info, ptr, 1);
5384         return -TARGET_EINVAL;
5385     }
5386     lp = (uint32_t *)(gdt_table + idx);
5387     entry_1 = tswap32(lp[0]);
5388     entry_2 = tswap32(lp[1]);
5389 
5390     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5391     contents = (entry_2 >> 10) & 3;
5392     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5393     seg_32bit = (entry_2 >> 22) & 1;
5394     limit_in_pages = (entry_2 >> 23) & 1;
5395     useable = (entry_2 >> 20) & 1;
5396 #ifdef TARGET_ABI32
5397     lm = 0;
5398 #else
5399     lm = (entry_2 >> 21) & 1;
5400 #endif
5401     flags = (seg_32bit << 0) | (contents << 1) |
5402         (read_exec_only << 3) | (limit_in_pages << 4) |
5403         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5404     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5405     base_addr = (entry_1 >> 16) |
5406         (entry_2 & 0xff000000) |
5407         ((entry_2 & 0xff) << 16);
5408     target_ldt_info->base_addr = tswapal(base_addr);
5409     target_ldt_info->limit = tswap32(limit);
5410     target_ldt_info->flags = tswap32(flags);
5411     unlock_user_struct(target_ldt_info, ptr, 1);
5412     return 0;
5413 }
5414 #endif /* TARGET_I386 && TARGET_ABI32 */
5415 
5416 #ifndef TARGET_ABI32
5417 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5418 {
5419     abi_long ret = 0;
5420     abi_ulong val;
5421     int idx;
5422 
5423     switch(code) {
5424     case TARGET_ARCH_SET_GS:
5425     case TARGET_ARCH_SET_FS:
5426         if (code == TARGET_ARCH_SET_GS)
5427             idx = R_GS;
5428         else
5429             idx = R_FS;
5430         cpu_x86_load_seg(env, idx, 0);
5431         env->segs[idx].base = addr;
5432         break;
5433     case TARGET_ARCH_GET_GS:
5434     case TARGET_ARCH_GET_FS:
5435         if (code == TARGET_ARCH_GET_GS)
5436             idx = R_GS;
5437         else
5438             idx = R_FS;
5439         val = env->segs[idx].base;
5440         if (put_user(val, addr, abi_ulong))
5441             ret = -TARGET_EFAULT;
5442         break;
5443     default:
5444         ret = -TARGET_EINVAL;
5445         break;
5446     }
5447     return ret;
5448 }
5449 #endif
5450 
5451 #endif /* defined(TARGET_I386) */
5452 
5453 #define NEW_STACK_SIZE 0x40000
5454 
5455 
5456 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5457 typedef struct {
5458     CPUArchState *env;
5459     pthread_mutex_t mutex;
5460     pthread_cond_t cond;
5461     pthread_t thread;
5462     uint32_t tid;
5463     abi_ulong child_tidptr;
5464     abi_ulong parent_tidptr;
5465     sigset_t sigmask;
5466 } new_thread_info;
5467 
5468 static void *clone_func(void *arg)
5469 {
5470     new_thread_info *info = arg;
5471     CPUArchState *env;
5472     CPUState *cpu;
5473     TaskState *ts;
5474 
5475     rcu_register_thread();
5476     tcg_register_thread();
5477     env = info->env;
5478     cpu = ENV_GET_CPU(env);
5479     thread_cpu = cpu;
5480     ts = (TaskState *)cpu->opaque;
5481     info->tid = sys_gettid();
5482     task_settid(ts);
5483     if (info->child_tidptr)
5484         put_user_u32(info->tid, info->child_tidptr);
5485     if (info->parent_tidptr)
5486         put_user_u32(info->tid, info->parent_tidptr);
5487     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5488     /* Enable signals.  */
5489     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5490     /* Signal to the parent that we're ready.  */
5491     pthread_mutex_lock(&info->mutex);
5492     pthread_cond_broadcast(&info->cond);
5493     pthread_mutex_unlock(&info->mutex);
5494     /* Wait until the parent has finished initializing the tls state.  */
5495     pthread_mutex_lock(&clone_lock);
5496     pthread_mutex_unlock(&clone_lock);
5497     cpu_loop(env);
5498     /* never exits */
5499     return NULL;
5500 }
5501 
5502 /* do_fork() Must return host values and target errnos (unlike most
5503    do_*() functions). */
5504 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5505                    abi_ulong parent_tidptr, target_ulong newtls,
5506                    abi_ulong child_tidptr)
5507 {
5508     CPUState *cpu = ENV_GET_CPU(env);
5509     int ret;
5510     TaskState *ts;
5511     CPUState *new_cpu;
5512     CPUArchState *new_env;
5513     sigset_t sigmask;
5514 
5515     flags &= ~CLONE_IGNORED_FLAGS;
5516 
5517     /* Emulate vfork() with fork() */
5518     if (flags & CLONE_VFORK)
5519         flags &= ~(CLONE_VFORK | CLONE_VM);
5520 
5521     if (flags & CLONE_VM) {
5522         TaskState *parent_ts = (TaskState *)cpu->opaque;
5523         new_thread_info info;
5524         pthread_attr_t attr;
5525 
5526         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5527             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5528             return -TARGET_EINVAL;
5529         }
5530 
5531         ts = g_new0(TaskState, 1);
5532         init_task_state(ts);
5533 
5534         /* Grab a mutex so that thread setup appears atomic.  */
5535         pthread_mutex_lock(&clone_lock);
5536 
5537         /* we create a new CPU instance. */
5538         new_env = cpu_copy(env);
5539         /* Init regs that differ from the parent.  */
5540         cpu_clone_regs(new_env, newsp);
5541         new_cpu = ENV_GET_CPU(new_env);
5542         new_cpu->opaque = ts;
5543         ts->bprm = parent_ts->bprm;
5544         ts->info = parent_ts->info;
5545         ts->signal_mask = parent_ts->signal_mask;
5546 
5547         if (flags & CLONE_CHILD_CLEARTID) {
5548             ts->child_tidptr = child_tidptr;
5549         }
5550 
5551         if (flags & CLONE_SETTLS) {
5552             cpu_set_tls (new_env, newtls);
5553         }
5554 
5555         memset(&info, 0, sizeof(info));
5556         pthread_mutex_init(&info.mutex, NULL);
5557         pthread_mutex_lock(&info.mutex);
5558         pthread_cond_init(&info.cond, NULL);
5559         info.env = new_env;
5560         if (flags & CLONE_CHILD_SETTID) {
5561             info.child_tidptr = child_tidptr;
5562         }
5563         if (flags & CLONE_PARENT_SETTID) {
5564             info.parent_tidptr = parent_tidptr;
5565         }
5566 
5567         ret = pthread_attr_init(&attr);
5568         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5569         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5570         /* It is not safe to deliver signals until the child has finished
5571            initializing, so temporarily block all signals.  */
5572         sigfillset(&sigmask);
5573         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5574         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5575 
5576         /* If this is our first additional thread, we need to ensure we
5577          * generate code for parallel execution and flush old translations.
5578          */
5579         if (!parallel_cpus) {
5580             parallel_cpus = true;
5581             tb_flush(cpu);
5582         }
5583 
5584         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5585         /* TODO: Free new CPU state if thread creation failed.  */
5586 
5587         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5588         pthread_attr_destroy(&attr);
5589         if (ret == 0) {
5590             /* Wait for the child to initialize.  */
5591             pthread_cond_wait(&info.cond, &info.mutex);
5592             ret = info.tid;
5593         } else {
5594             ret = -1;
5595         }
5596         pthread_mutex_unlock(&info.mutex);
5597         pthread_cond_destroy(&info.cond);
5598         pthread_mutex_destroy(&info.mutex);
5599         pthread_mutex_unlock(&clone_lock);
5600     } else {
5601         /* if no CLONE_VM, we consider it is a fork */
5602         if (flags & CLONE_INVALID_FORK_FLAGS) {
5603             return -TARGET_EINVAL;
5604         }
5605 
5606         /* We can't support custom termination signals */
5607         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5608             return -TARGET_EINVAL;
5609         }
5610 
5611         if (block_signals()) {
5612             return -TARGET_ERESTARTSYS;
5613         }
5614 
5615         fork_start();
5616         ret = fork();
5617         if (ret == 0) {
5618             /* Child Process.  */
5619             cpu_clone_regs(env, newsp);
5620             fork_end(1);
5621             /* There is a race condition here.  The parent process could
5622                theoretically read the TID in the child process before the child
5623                tid is set.  This would require using either ptrace
5624                (not implemented) or having *_tidptr to point at a shared memory
5625                mapping.  We can't repeat the spinlock hack used above because
5626                the child process gets its own copy of the lock.  */
5627             if (flags & CLONE_CHILD_SETTID)
5628                 put_user_u32(sys_gettid(), child_tidptr);
5629             if (flags & CLONE_PARENT_SETTID)
5630                 put_user_u32(sys_gettid(), parent_tidptr);
5631             ts = (TaskState *)cpu->opaque;
5632             if (flags & CLONE_SETTLS)
5633                 cpu_set_tls (env, newtls);
5634             if (flags & CLONE_CHILD_CLEARTID)
5635                 ts->child_tidptr = child_tidptr;
5636         } else {
5637             fork_end(0);
5638         }
5639     }
5640     return ret;
5641 }
5642 
5643 /* warning : doesn't handle linux specific flags... */
5644 static int target_to_host_fcntl_cmd(int cmd)
5645 {
5646     int ret;
5647 
5648     switch(cmd) {
5649     case TARGET_F_DUPFD:
5650     case TARGET_F_GETFD:
5651     case TARGET_F_SETFD:
5652     case TARGET_F_GETFL:
5653     case TARGET_F_SETFL:
5654         ret = cmd;
5655         break;
5656     case TARGET_F_GETLK:
5657         ret = F_GETLK64;
5658         break;
5659     case TARGET_F_SETLK:
5660         ret = F_SETLK64;
5661         break;
5662     case TARGET_F_SETLKW:
5663         ret = F_SETLKW64;
5664         break;
5665     case TARGET_F_GETOWN:
5666         ret = F_GETOWN;
5667         break;
5668     case TARGET_F_SETOWN:
5669         ret = F_SETOWN;
5670         break;
5671     case TARGET_F_GETSIG:
5672         ret = F_GETSIG;
5673         break;
5674     case TARGET_F_SETSIG:
5675         ret = F_SETSIG;
5676         break;
5677 #if TARGET_ABI_BITS == 32
5678     case TARGET_F_GETLK64:
5679         ret = F_GETLK64;
5680         break;
5681     case TARGET_F_SETLK64:
5682         ret = F_SETLK64;
5683         break;
5684     case TARGET_F_SETLKW64:
5685         ret = F_SETLKW64;
5686         break;
5687 #endif
5688     case TARGET_F_SETLEASE:
5689         ret = F_SETLEASE;
5690         break;
5691     case TARGET_F_GETLEASE:
5692         ret = F_GETLEASE;
5693         break;
5694 #ifdef F_DUPFD_CLOEXEC
5695     case TARGET_F_DUPFD_CLOEXEC:
5696         ret = F_DUPFD_CLOEXEC;
5697         break;
5698 #endif
5699     case TARGET_F_NOTIFY:
5700         ret = F_NOTIFY;
5701         break;
5702 #ifdef F_GETOWN_EX
5703     case TARGET_F_GETOWN_EX:
5704         ret = F_GETOWN_EX;
5705         break;
5706 #endif
5707 #ifdef F_SETOWN_EX
5708     case TARGET_F_SETOWN_EX:
5709         ret = F_SETOWN_EX;
5710         break;
5711 #endif
5712 #ifdef F_SETPIPE_SZ
5713     case TARGET_F_SETPIPE_SZ:
5714         ret = F_SETPIPE_SZ;
5715         break;
5716     case TARGET_F_GETPIPE_SZ:
5717         ret = F_GETPIPE_SZ;
5718         break;
5719 #endif
5720     default:
5721         ret = -TARGET_EINVAL;
5722         break;
5723     }
5724 
5725 #if defined(__powerpc64__)
5726     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5727      * is not supported by kernel. The glibc fcntl call actually adjusts
5728      * them to 5, 6 and 7 before making the syscall(). Since we make the
5729      * syscall directly, adjust to what is supported by the kernel.
5730      */
5731     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5732         ret -= F_GETLK64 - 5;
5733     }
5734 #endif
5735 
5736     return ret;
5737 }
5738 
5739 #define FLOCK_TRANSTBL \
5740     switch (type) { \
5741     TRANSTBL_CONVERT(F_RDLCK); \
5742     TRANSTBL_CONVERT(F_WRLCK); \
5743     TRANSTBL_CONVERT(F_UNLCK); \
5744     TRANSTBL_CONVERT(F_EXLCK); \
5745     TRANSTBL_CONVERT(F_SHLCK); \
5746     }
5747 
5748 static int target_to_host_flock(int type)
5749 {
5750 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5751     FLOCK_TRANSTBL
5752 #undef  TRANSTBL_CONVERT
5753     return -TARGET_EINVAL;
5754 }
5755 
5756 static int host_to_target_flock(int type)
5757 {
5758 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5759     FLOCK_TRANSTBL
5760 #undef  TRANSTBL_CONVERT
5761     /* if we don't know how to convert the value coming
5762      * from the host we copy to the target field as-is
5763      */
5764     return type;
5765 }
5766 
5767 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5768                                             abi_ulong target_flock_addr)
5769 {
5770     struct target_flock *target_fl;
5771     int l_type;
5772 
5773     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5774         return -TARGET_EFAULT;
5775     }
5776 
5777     __get_user(l_type, &target_fl->l_type);
5778     l_type = target_to_host_flock(l_type);
5779     if (l_type < 0) {
5780         return l_type;
5781     }
5782     fl->l_type = l_type;
5783     __get_user(fl->l_whence, &target_fl->l_whence);
5784     __get_user(fl->l_start, &target_fl->l_start);
5785     __get_user(fl->l_len, &target_fl->l_len);
5786     __get_user(fl->l_pid, &target_fl->l_pid);
5787     unlock_user_struct(target_fl, target_flock_addr, 0);
5788     return 0;
5789 }
5790 
5791 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5792                                           const struct flock64 *fl)
5793 {
5794     struct target_flock *target_fl;
5795     short l_type;
5796 
5797     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5798         return -TARGET_EFAULT;
5799     }
5800 
5801     l_type = host_to_target_flock(fl->l_type);
5802     __put_user(l_type, &target_fl->l_type);
5803     __put_user(fl->l_whence, &target_fl->l_whence);
5804     __put_user(fl->l_start, &target_fl->l_start);
5805     __put_user(fl->l_len, &target_fl->l_len);
5806     __put_user(fl->l_pid, &target_fl->l_pid);
5807     unlock_user_struct(target_fl, target_flock_addr, 1);
5808     return 0;
5809 }
5810 
5811 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5812 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5813 
5814 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5815 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5816                                                    abi_ulong target_flock_addr)
5817 {
5818     struct target_oabi_flock64 *target_fl;
5819     int l_type;
5820 
5821     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5822         return -TARGET_EFAULT;
5823     }
5824 
5825     __get_user(l_type, &target_fl->l_type);
5826     l_type = target_to_host_flock(l_type);
5827     if (l_type < 0) {
5828         return l_type;
5829     }
5830     fl->l_type = l_type;
5831     __get_user(fl->l_whence, &target_fl->l_whence);
5832     __get_user(fl->l_start, &target_fl->l_start);
5833     __get_user(fl->l_len, &target_fl->l_len);
5834     __get_user(fl->l_pid, &target_fl->l_pid);
5835     unlock_user_struct(target_fl, target_flock_addr, 0);
5836     return 0;
5837 }
5838 
5839 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5840                                                  const struct flock64 *fl)
5841 {
5842     struct target_oabi_flock64 *target_fl;
5843     short l_type;
5844 
5845     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5846         return -TARGET_EFAULT;
5847     }
5848 
5849     l_type = host_to_target_flock(fl->l_type);
5850     __put_user(l_type, &target_fl->l_type);
5851     __put_user(fl->l_whence, &target_fl->l_whence);
5852     __put_user(fl->l_start, &target_fl->l_start);
5853     __put_user(fl->l_len, &target_fl->l_len);
5854     __put_user(fl->l_pid, &target_fl->l_pid);
5855     unlock_user_struct(target_fl, target_flock_addr, 1);
5856     return 0;
5857 }
5858 #endif
5859 
5860 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5861                                               abi_ulong target_flock_addr)
5862 {
5863     struct target_flock64 *target_fl;
5864     int l_type;
5865 
5866     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5867         return -TARGET_EFAULT;
5868     }
5869 
5870     __get_user(l_type, &target_fl->l_type);
5871     l_type = target_to_host_flock(l_type);
5872     if (l_type < 0) {
5873         return l_type;
5874     }
5875     fl->l_type = l_type;
5876     __get_user(fl->l_whence, &target_fl->l_whence);
5877     __get_user(fl->l_start, &target_fl->l_start);
5878     __get_user(fl->l_len, &target_fl->l_len);
5879     __get_user(fl->l_pid, &target_fl->l_pid);
5880     unlock_user_struct(target_fl, target_flock_addr, 0);
5881     return 0;
5882 }
5883 
5884 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5885                                             const struct flock64 *fl)
5886 {
5887     struct target_flock64 *target_fl;
5888     short l_type;
5889 
5890     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5891         return -TARGET_EFAULT;
5892     }
5893 
5894     l_type = host_to_target_flock(fl->l_type);
5895     __put_user(l_type, &target_fl->l_type);
5896     __put_user(fl->l_whence, &target_fl->l_whence);
5897     __put_user(fl->l_start, &target_fl->l_start);
5898     __put_user(fl->l_len, &target_fl->l_len);
5899     __put_user(fl->l_pid, &target_fl->l_pid);
5900     unlock_user_struct(target_fl, target_flock_addr, 1);
5901     return 0;
5902 }
5903 
5904 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5905 {
5906     struct flock64 fl64;
5907 #ifdef F_GETOWN_EX
5908     struct f_owner_ex fox;
5909     struct target_f_owner_ex *target_fox;
5910 #endif
5911     abi_long ret;
5912     int host_cmd = target_to_host_fcntl_cmd(cmd);
5913 
5914     if (host_cmd == -TARGET_EINVAL)
5915 	    return host_cmd;
5916 
5917     switch(cmd) {
5918     case TARGET_F_GETLK:
5919         ret = copy_from_user_flock(&fl64, arg);
5920         if (ret) {
5921             return ret;
5922         }
5923         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5924         if (ret == 0) {
5925             ret = copy_to_user_flock(arg, &fl64);
5926         }
5927         break;
5928 
5929     case TARGET_F_SETLK:
5930     case TARGET_F_SETLKW:
5931         ret = copy_from_user_flock(&fl64, arg);
5932         if (ret) {
5933             return ret;
5934         }
5935         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5936         break;
5937 
5938     case TARGET_F_GETLK64:
5939         ret = copy_from_user_flock64(&fl64, arg);
5940         if (ret) {
5941             return ret;
5942         }
5943         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5944         if (ret == 0) {
5945             ret = copy_to_user_flock64(arg, &fl64);
5946         }
5947         break;
5948     case TARGET_F_SETLK64:
5949     case TARGET_F_SETLKW64:
5950         ret = copy_from_user_flock64(&fl64, arg);
5951         if (ret) {
5952             return ret;
5953         }
5954         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5955         break;
5956 
5957     case TARGET_F_GETFL:
5958         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5959         if (ret >= 0) {
5960             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5961         }
5962         break;
5963 
5964     case TARGET_F_SETFL:
5965         ret = get_errno(safe_fcntl(fd, host_cmd,
5966                                    target_to_host_bitmask(arg,
5967                                                           fcntl_flags_tbl)));
5968         break;
5969 
5970 #ifdef F_GETOWN_EX
5971     case TARGET_F_GETOWN_EX:
5972         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5973         if (ret >= 0) {
5974             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5975                 return -TARGET_EFAULT;
5976             target_fox->type = tswap32(fox.type);
5977             target_fox->pid = tswap32(fox.pid);
5978             unlock_user_struct(target_fox, arg, 1);
5979         }
5980         break;
5981 #endif
5982 
5983 #ifdef F_SETOWN_EX
5984     case TARGET_F_SETOWN_EX:
5985         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5986             return -TARGET_EFAULT;
5987         fox.type = tswap32(target_fox->type);
5988         fox.pid = tswap32(target_fox->pid);
5989         unlock_user_struct(target_fox, arg, 0);
5990         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5991         break;
5992 #endif
5993 
5994     case TARGET_F_SETOWN:
5995     case TARGET_F_GETOWN:
5996     case TARGET_F_SETSIG:
5997     case TARGET_F_GETSIG:
5998     case TARGET_F_SETLEASE:
5999     case TARGET_F_GETLEASE:
6000     case TARGET_F_SETPIPE_SZ:
6001     case TARGET_F_GETPIPE_SZ:
6002         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6003         break;
6004 
6005     default:
6006         ret = get_errno(safe_fcntl(fd, cmd, arg));
6007         break;
6008     }
6009     return ret;
6010 }
6011 
6012 #ifdef USE_UID16
6013 
6014 static inline int high2lowuid(int uid)
6015 {
6016     if (uid > 65535)
6017         return 65534;
6018     else
6019         return uid;
6020 }
6021 
6022 static inline int high2lowgid(int gid)
6023 {
6024     if (gid > 65535)
6025         return 65534;
6026     else
6027         return gid;
6028 }
6029 
6030 static inline int low2highuid(int uid)
6031 {
6032     if ((int16_t)uid == -1)
6033         return -1;
6034     else
6035         return uid;
6036 }
6037 
6038 static inline int low2highgid(int gid)
6039 {
6040     if ((int16_t)gid == -1)
6041         return -1;
6042     else
6043         return gid;
6044 }
6045 static inline int tswapid(int id)
6046 {
6047     return tswap16(id);
6048 }
6049 
6050 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6051 
6052 #else /* !USE_UID16 */
6053 static inline int high2lowuid(int uid)
6054 {
6055     return uid;
6056 }
6057 static inline int high2lowgid(int gid)
6058 {
6059     return gid;
6060 }
6061 static inline int low2highuid(int uid)
6062 {
6063     return uid;
6064 }
6065 static inline int low2highgid(int gid)
6066 {
6067     return gid;
6068 }
6069 static inline int tswapid(int id)
6070 {
6071     return tswap32(id);
6072 }
6073 
6074 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6075 
6076 #endif /* USE_UID16 */
6077 
6078 /* We must do direct syscalls for setting UID/GID, because we want to
6079  * implement the Linux system call semantics of "change only for this thread",
6080  * not the libc/POSIX semantics of "change for all threads in process".
6081  * (See http://ewontfix.com/17/ for more details.)
6082  * We use the 32-bit version of the syscalls if present; if it is not
6083  * then either the host architecture supports 32-bit UIDs natively with
6084  * the standard syscall, or the 16-bit UID is the best we can do.
6085  */
6086 #ifdef __NR_setuid32
6087 #define __NR_sys_setuid __NR_setuid32
6088 #else
6089 #define __NR_sys_setuid __NR_setuid
6090 #endif
6091 #ifdef __NR_setgid32
6092 #define __NR_sys_setgid __NR_setgid32
6093 #else
6094 #define __NR_sys_setgid __NR_setgid
6095 #endif
6096 #ifdef __NR_setresuid32
6097 #define __NR_sys_setresuid __NR_setresuid32
6098 #else
6099 #define __NR_sys_setresuid __NR_setresuid
6100 #endif
6101 #ifdef __NR_setresgid32
6102 #define __NR_sys_setresgid __NR_setresgid32
6103 #else
6104 #define __NR_sys_setresgid __NR_setresgid
6105 #endif
6106 
6107 _syscall1(int, sys_setuid, uid_t, uid)
6108 _syscall1(int, sys_setgid, gid_t, gid)
6109 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6110 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6111 
6112 void syscall_init(void)
6113 {
6114     IOCTLEntry *ie;
6115     const argtype *arg_type;
6116     int size;
6117     int i;
6118 
6119     thunk_init(STRUCT_MAX);
6120 
6121 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6122 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6123 #include "syscall_types.h"
6124 #undef STRUCT
6125 #undef STRUCT_SPECIAL
6126 
6127     /* Build target_to_host_errno_table[] table from
6128      * host_to_target_errno_table[]. */
6129     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6130         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6131     }
6132 
6133     /* we patch the ioctl size if necessary. We rely on the fact that
6134        no ioctl has all the bits at '1' in the size field */
6135     ie = ioctl_entries;
6136     while (ie->target_cmd != 0) {
6137         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6138             TARGET_IOC_SIZEMASK) {
6139             arg_type = ie->arg_type;
6140             if (arg_type[0] != TYPE_PTR) {
6141                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6142                         ie->target_cmd);
6143                 exit(1);
6144             }
6145             arg_type++;
6146             size = thunk_type_size(arg_type, 0);
6147             ie->target_cmd = (ie->target_cmd &
6148                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6149                 (size << TARGET_IOC_SIZESHIFT);
6150         }
6151 
6152         /* automatic consistency check if same arch */
6153 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6154     (defined(__x86_64__) && defined(TARGET_X86_64))
6155         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6156             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6157                     ie->name, ie->target_cmd, ie->host_cmd);
6158         }
6159 #endif
6160         ie++;
6161     }
6162 }
6163 
6164 #if TARGET_ABI_BITS == 32
6165 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6166 {
6167 #ifdef TARGET_WORDS_BIGENDIAN
6168     return ((uint64_t)word0 << 32) | word1;
6169 #else
6170     return ((uint64_t)word1 << 32) | word0;
6171 #endif
6172 }
6173 #else /* TARGET_ABI_BITS == 32 */
6174 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6175 {
6176     return word0;
6177 }
6178 #endif /* TARGET_ABI_BITS != 32 */
6179 
6180 #ifdef TARGET_NR_truncate64
6181 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6182                                          abi_long arg2,
6183                                          abi_long arg3,
6184                                          abi_long arg4)
6185 {
6186     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6187         arg2 = arg3;
6188         arg3 = arg4;
6189     }
6190     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6191 }
6192 #endif
6193 
6194 #ifdef TARGET_NR_ftruncate64
6195 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6196                                           abi_long arg2,
6197                                           abi_long arg3,
6198                                           abi_long arg4)
6199 {
6200     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6201         arg2 = arg3;
6202         arg3 = arg4;
6203     }
6204     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6205 }
6206 #endif
6207 
6208 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6209                                                abi_ulong target_addr)
6210 {
6211     struct target_timespec *target_ts;
6212 
6213     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6214         return -TARGET_EFAULT;
6215     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6216     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6217     unlock_user_struct(target_ts, target_addr, 0);
6218     return 0;
6219 }
6220 
6221 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6222                                                struct timespec *host_ts)
6223 {
6224     struct target_timespec *target_ts;
6225 
6226     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6227         return -TARGET_EFAULT;
6228     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6229     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6230     unlock_user_struct(target_ts, target_addr, 1);
6231     return 0;
6232 }
6233 
6234 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6235                                                  abi_ulong target_addr)
6236 {
6237     struct target_itimerspec *target_itspec;
6238 
6239     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6240         return -TARGET_EFAULT;
6241     }
6242 
6243     host_itspec->it_interval.tv_sec =
6244                             tswapal(target_itspec->it_interval.tv_sec);
6245     host_itspec->it_interval.tv_nsec =
6246                             tswapal(target_itspec->it_interval.tv_nsec);
6247     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6248     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6249 
6250     unlock_user_struct(target_itspec, target_addr, 1);
6251     return 0;
6252 }
6253 
6254 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6255                                                struct itimerspec *host_its)
6256 {
6257     struct target_itimerspec *target_itspec;
6258 
6259     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6260         return -TARGET_EFAULT;
6261     }
6262 
6263     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6264     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6265 
6266     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6267     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6268 
6269     unlock_user_struct(target_itspec, target_addr, 0);
6270     return 0;
6271 }
6272 
6273 static inline abi_long target_to_host_timex(struct timex *host_tx,
6274                                             abi_long target_addr)
6275 {
6276     struct target_timex *target_tx;
6277 
6278     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6279         return -TARGET_EFAULT;
6280     }
6281 
6282     __get_user(host_tx->modes, &target_tx->modes);
6283     __get_user(host_tx->offset, &target_tx->offset);
6284     __get_user(host_tx->freq, &target_tx->freq);
6285     __get_user(host_tx->maxerror, &target_tx->maxerror);
6286     __get_user(host_tx->esterror, &target_tx->esterror);
6287     __get_user(host_tx->status, &target_tx->status);
6288     __get_user(host_tx->constant, &target_tx->constant);
6289     __get_user(host_tx->precision, &target_tx->precision);
6290     __get_user(host_tx->tolerance, &target_tx->tolerance);
6291     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6292     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6293     __get_user(host_tx->tick, &target_tx->tick);
6294     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6295     __get_user(host_tx->jitter, &target_tx->jitter);
6296     __get_user(host_tx->shift, &target_tx->shift);
6297     __get_user(host_tx->stabil, &target_tx->stabil);
6298     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6299     __get_user(host_tx->calcnt, &target_tx->calcnt);
6300     __get_user(host_tx->errcnt, &target_tx->errcnt);
6301     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6302     __get_user(host_tx->tai, &target_tx->tai);
6303 
6304     unlock_user_struct(target_tx, target_addr, 0);
6305     return 0;
6306 }
6307 
6308 static inline abi_long host_to_target_timex(abi_long target_addr,
6309                                             struct timex *host_tx)
6310 {
6311     struct target_timex *target_tx;
6312 
6313     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6314         return -TARGET_EFAULT;
6315     }
6316 
6317     __put_user(host_tx->modes, &target_tx->modes);
6318     __put_user(host_tx->offset, &target_tx->offset);
6319     __put_user(host_tx->freq, &target_tx->freq);
6320     __put_user(host_tx->maxerror, &target_tx->maxerror);
6321     __put_user(host_tx->esterror, &target_tx->esterror);
6322     __put_user(host_tx->status, &target_tx->status);
6323     __put_user(host_tx->constant, &target_tx->constant);
6324     __put_user(host_tx->precision, &target_tx->precision);
6325     __put_user(host_tx->tolerance, &target_tx->tolerance);
6326     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6327     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6328     __put_user(host_tx->tick, &target_tx->tick);
6329     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6330     __put_user(host_tx->jitter, &target_tx->jitter);
6331     __put_user(host_tx->shift, &target_tx->shift);
6332     __put_user(host_tx->stabil, &target_tx->stabil);
6333     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6334     __put_user(host_tx->calcnt, &target_tx->calcnt);
6335     __put_user(host_tx->errcnt, &target_tx->errcnt);
6336     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6337     __put_user(host_tx->tai, &target_tx->tai);
6338 
6339     unlock_user_struct(target_tx, target_addr, 1);
6340     return 0;
6341 }
6342 
6343 
6344 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6345                                                abi_ulong target_addr)
6346 {
6347     struct target_sigevent *target_sevp;
6348 
6349     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6350         return -TARGET_EFAULT;
6351     }
6352 
6353     /* This union is awkward on 64 bit systems because it has a 32 bit
6354      * integer and a pointer in it; we follow the conversion approach
6355      * used for handling sigval types in signal.c so the guest should get
6356      * the correct value back even if we did a 64 bit byteswap and it's
6357      * using the 32 bit integer.
6358      */
6359     host_sevp->sigev_value.sival_ptr =
6360         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6361     host_sevp->sigev_signo =
6362         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6363     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6364     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6365 
6366     unlock_user_struct(target_sevp, target_addr, 1);
6367     return 0;
6368 }
6369 
6370 #if defined(TARGET_NR_mlockall)
6371 static inline int target_to_host_mlockall_arg(int arg)
6372 {
6373     int result = 0;
6374 
6375     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6376         result |= MCL_CURRENT;
6377     }
6378     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6379         result |= MCL_FUTURE;
6380     }
6381     return result;
6382 }
6383 #endif
6384 
6385 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6386      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6387      defined(TARGET_NR_newfstatat))
6388 static inline abi_long host_to_target_stat64(void *cpu_env,
6389                                              abi_ulong target_addr,
6390                                              struct stat *host_st)
6391 {
6392 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6393     if (((CPUARMState *)cpu_env)->eabi) {
6394         struct target_eabi_stat64 *target_st;
6395 
6396         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6397             return -TARGET_EFAULT;
6398         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6399         __put_user(host_st->st_dev, &target_st->st_dev);
6400         __put_user(host_st->st_ino, &target_st->st_ino);
6401 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6402         __put_user(host_st->st_ino, &target_st->__st_ino);
6403 #endif
6404         __put_user(host_st->st_mode, &target_st->st_mode);
6405         __put_user(host_st->st_nlink, &target_st->st_nlink);
6406         __put_user(host_st->st_uid, &target_st->st_uid);
6407         __put_user(host_st->st_gid, &target_st->st_gid);
6408         __put_user(host_st->st_rdev, &target_st->st_rdev);
6409         __put_user(host_st->st_size, &target_st->st_size);
6410         __put_user(host_st->st_blksize, &target_st->st_blksize);
6411         __put_user(host_st->st_blocks, &target_st->st_blocks);
6412         __put_user(host_st->st_atime, &target_st->target_st_atime);
6413         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6414         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6415         unlock_user_struct(target_st, target_addr, 1);
6416     } else
6417 #endif
6418     {
6419 #if defined(TARGET_HAS_STRUCT_STAT64)
6420         struct target_stat64 *target_st;
6421 #else
6422         struct target_stat *target_st;
6423 #endif
6424 
6425         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6426             return -TARGET_EFAULT;
6427         memset(target_st, 0, sizeof(*target_st));
6428         __put_user(host_st->st_dev, &target_st->st_dev);
6429         __put_user(host_st->st_ino, &target_st->st_ino);
6430 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6431         __put_user(host_st->st_ino, &target_st->__st_ino);
6432 #endif
6433         __put_user(host_st->st_mode, &target_st->st_mode);
6434         __put_user(host_st->st_nlink, &target_st->st_nlink);
6435         __put_user(host_st->st_uid, &target_st->st_uid);
6436         __put_user(host_st->st_gid, &target_st->st_gid);
6437         __put_user(host_st->st_rdev, &target_st->st_rdev);
6438         /* XXX: better use of kernel struct */
6439         __put_user(host_st->st_size, &target_st->st_size);
6440         __put_user(host_st->st_blksize, &target_st->st_blksize);
6441         __put_user(host_st->st_blocks, &target_st->st_blocks);
6442         __put_user(host_st->st_atime, &target_st->target_st_atime);
6443         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6444         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6445         unlock_user_struct(target_st, target_addr, 1);
6446     }
6447 
6448     return 0;
6449 }
6450 #endif
6451 
6452 /* ??? Using host futex calls even when target atomic operations
6453    are not really atomic probably breaks things.  However implementing
6454    futexes locally would make futexes shared between multiple processes
6455    tricky.  However they're probably useless because guest atomic
6456    operations won't work either.  */
6457 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6458                     target_ulong uaddr2, int val3)
6459 {
6460     struct timespec ts, *pts;
6461     int base_op;
6462 
6463     /* ??? We assume FUTEX_* constants are the same on both host
6464        and target.  */
6465 #ifdef FUTEX_CMD_MASK
6466     base_op = op & FUTEX_CMD_MASK;
6467 #else
6468     base_op = op;
6469 #endif
6470     switch (base_op) {
6471     case FUTEX_WAIT:
6472     case FUTEX_WAIT_BITSET:
6473         if (timeout) {
6474             pts = &ts;
6475             target_to_host_timespec(pts, timeout);
6476         } else {
6477             pts = NULL;
6478         }
6479         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6480                          pts, NULL, val3));
6481     case FUTEX_WAKE:
6482         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6483     case FUTEX_FD:
6484         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6485     case FUTEX_REQUEUE:
6486     case FUTEX_CMP_REQUEUE:
6487     case FUTEX_WAKE_OP:
6488         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6489            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6490            But the prototype takes a `struct timespec *'; insert casts
6491            to satisfy the compiler.  We do not need to tswap TIMEOUT
6492            since it's not compared to guest memory.  */
6493         pts = (struct timespec *)(uintptr_t) timeout;
6494         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6495                                     g2h(uaddr2),
6496                                     (base_op == FUTEX_CMP_REQUEUE
6497                                      ? tswap32(val3)
6498                                      : val3)));
6499     default:
6500         return -TARGET_ENOSYS;
6501     }
6502 }
6503 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6504 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6505                                      abi_long handle, abi_long mount_id,
6506                                      abi_long flags)
6507 {
6508     struct file_handle *target_fh;
6509     struct file_handle *fh;
6510     int mid = 0;
6511     abi_long ret;
6512     char *name;
6513     unsigned int size, total_size;
6514 
6515     if (get_user_s32(size, handle)) {
6516         return -TARGET_EFAULT;
6517     }
6518 
6519     name = lock_user_string(pathname);
6520     if (!name) {
6521         return -TARGET_EFAULT;
6522     }
6523 
6524     total_size = sizeof(struct file_handle) + size;
6525     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6526     if (!target_fh) {
6527         unlock_user(name, pathname, 0);
6528         return -TARGET_EFAULT;
6529     }
6530 
6531     fh = g_malloc0(total_size);
6532     fh->handle_bytes = size;
6533 
6534     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6535     unlock_user(name, pathname, 0);
6536 
6537     /* man name_to_handle_at(2):
6538      * Other than the use of the handle_bytes field, the caller should treat
6539      * the file_handle structure as an opaque data type
6540      */
6541 
6542     memcpy(target_fh, fh, total_size);
6543     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6544     target_fh->handle_type = tswap32(fh->handle_type);
6545     g_free(fh);
6546     unlock_user(target_fh, handle, total_size);
6547 
6548     if (put_user_s32(mid, mount_id)) {
6549         return -TARGET_EFAULT;
6550     }
6551 
6552     return ret;
6553 
6554 }
6555 #endif
6556 
6557 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6558 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6559                                      abi_long flags)
6560 {
6561     struct file_handle *target_fh;
6562     struct file_handle *fh;
6563     unsigned int size, total_size;
6564     abi_long ret;
6565 
6566     if (get_user_s32(size, handle)) {
6567         return -TARGET_EFAULT;
6568     }
6569 
6570     total_size = sizeof(struct file_handle) + size;
6571     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6572     if (!target_fh) {
6573         return -TARGET_EFAULT;
6574     }
6575 
6576     fh = g_memdup(target_fh, total_size);
6577     fh->handle_bytes = size;
6578     fh->handle_type = tswap32(target_fh->handle_type);
6579 
6580     ret = get_errno(open_by_handle_at(mount_fd, fh,
6581                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6582 
6583     g_free(fh);
6584 
6585     unlock_user(target_fh, handle, total_size);
6586 
6587     return ret;
6588 }
6589 #endif
6590 
6591 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6592 
6593 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6594 {
6595     int host_flags;
6596     target_sigset_t *target_mask;
6597     sigset_t host_mask;
6598     abi_long ret;
6599 
6600     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6601         return -TARGET_EINVAL;
6602     }
6603     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6604         return -TARGET_EFAULT;
6605     }
6606 
6607     target_to_host_sigset(&host_mask, target_mask);
6608 
6609     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6610 
6611     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6612     if (ret >= 0) {
6613         fd_trans_register(ret, &target_signalfd_trans);
6614     }
6615 
6616     unlock_user_struct(target_mask, mask, 0);
6617 
6618     return ret;
6619 }
6620 #endif
6621 
6622 /* Map host to target signal numbers for the wait family of syscalls.
6623    Assume all other status bits are the same.  */
6624 int host_to_target_waitstatus(int status)
6625 {
6626     if (WIFSIGNALED(status)) {
6627         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6628     }
6629     if (WIFSTOPPED(status)) {
6630         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6631                | (status & 0xff);
6632     }
6633     return status;
6634 }
6635 
6636 static int open_self_cmdline(void *cpu_env, int fd)
6637 {
6638     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6639     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6640     int i;
6641 
6642     for (i = 0; i < bprm->argc; i++) {
6643         size_t len = strlen(bprm->argv[i]) + 1;
6644 
6645         if (write(fd, bprm->argv[i], len) != len) {
6646             return -1;
6647         }
6648     }
6649 
6650     return 0;
6651 }
6652 
6653 static int open_self_maps(void *cpu_env, int fd)
6654 {
6655     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6656     TaskState *ts = cpu->opaque;
6657     FILE *fp;
6658     char *line = NULL;
6659     size_t len = 0;
6660     ssize_t read;
6661 
6662     fp = fopen("/proc/self/maps", "r");
6663     if (fp == NULL) {
6664         return -1;
6665     }
6666 
6667     while ((read = getline(&line, &len, fp)) != -1) {
6668         int fields, dev_maj, dev_min, inode;
6669         uint64_t min, max, offset;
6670         char flag_r, flag_w, flag_x, flag_p;
6671         char path[512] = "";
6672         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6673                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6674                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6675 
6676         if ((fields < 10) || (fields > 11)) {
6677             continue;
6678         }
6679         if (h2g_valid(min)) {
6680             int flags = page_get_flags(h2g(min));
6681             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6682             if (page_check_range(h2g(min), max - min, flags) == -1) {
6683                 continue;
6684             }
6685             if (h2g(min) == ts->info->stack_limit) {
6686                 pstrcpy(path, sizeof(path), "      [stack]");
6687             }
6688             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6689                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6690                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6691                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6692                     path[0] ? "         " : "", path);
6693         }
6694     }
6695 
6696     free(line);
6697     fclose(fp);
6698 
6699     return 0;
6700 }
6701 
6702 static int open_self_stat(void *cpu_env, int fd)
6703 {
6704     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6705     TaskState *ts = cpu->opaque;
6706     abi_ulong start_stack = ts->info->start_stack;
6707     int i;
6708 
6709     for (i = 0; i < 44; i++) {
6710       char buf[128];
6711       int len;
6712       uint64_t val = 0;
6713 
6714       if (i == 0) {
6715         /* pid */
6716         val = getpid();
6717         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6718       } else if (i == 1) {
6719         /* app name */
6720         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6721       } else if (i == 27) {
6722         /* stack bottom */
6723         val = start_stack;
6724         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6725       } else {
6726         /* for the rest, there is MasterCard */
6727         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6728       }
6729 
6730       len = strlen(buf);
6731       if (write(fd, buf, len) != len) {
6732           return -1;
6733       }
6734     }
6735 
6736     return 0;
6737 }
6738 
6739 static int open_self_auxv(void *cpu_env, int fd)
6740 {
6741     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6742     TaskState *ts = cpu->opaque;
6743     abi_ulong auxv = ts->info->saved_auxv;
6744     abi_ulong len = ts->info->auxv_len;
6745     char *ptr;
6746 
6747     /*
6748      * Auxiliary vector is stored in target process stack.
6749      * read in whole auxv vector and copy it to file
6750      */
6751     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6752     if (ptr != NULL) {
6753         while (len > 0) {
6754             ssize_t r;
6755             r = write(fd, ptr, len);
6756             if (r <= 0) {
6757                 break;
6758             }
6759             len -= r;
6760             ptr += r;
6761         }
6762         lseek(fd, 0, SEEK_SET);
6763         unlock_user(ptr, auxv, len);
6764     }
6765 
6766     return 0;
6767 }
6768 
6769 static int is_proc_myself(const char *filename, const char *entry)
6770 {
6771     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6772         filename += strlen("/proc/");
6773         if (!strncmp(filename, "self/", strlen("self/"))) {
6774             filename += strlen("self/");
6775         } else if (*filename >= '1' && *filename <= '9') {
6776             char myself[80];
6777             snprintf(myself, sizeof(myself), "%d/", getpid());
6778             if (!strncmp(filename, myself, strlen(myself))) {
6779                 filename += strlen(myself);
6780             } else {
6781                 return 0;
6782             }
6783         } else {
6784             return 0;
6785         }
6786         if (!strcmp(filename, entry)) {
6787             return 1;
6788         }
6789     }
6790     return 0;
6791 }
6792 
6793 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6794 static int is_proc(const char *filename, const char *entry)
6795 {
6796     return strcmp(filename, entry) == 0;
6797 }
6798 
6799 static int open_net_route(void *cpu_env, int fd)
6800 {
6801     FILE *fp;
6802     char *line = NULL;
6803     size_t len = 0;
6804     ssize_t read;
6805 
6806     fp = fopen("/proc/net/route", "r");
6807     if (fp == NULL) {
6808         return -1;
6809     }
6810 
6811     /* read header */
6812 
6813     read = getline(&line, &len, fp);
6814     dprintf(fd, "%s", line);
6815 
6816     /* read routes */
6817 
6818     while ((read = getline(&line, &len, fp)) != -1) {
6819         char iface[16];
6820         uint32_t dest, gw, mask;
6821         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6822         int fields;
6823 
6824         fields = sscanf(line,
6825                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6826                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6827                         &mask, &mtu, &window, &irtt);
6828         if (fields != 11) {
6829             continue;
6830         }
6831         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6832                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6833                 metric, tswap32(mask), mtu, window, irtt);
6834     }
6835 
6836     free(line);
6837     fclose(fp);
6838 
6839     return 0;
6840 }
6841 #endif
6842 
6843 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6844 {
6845     struct fake_open {
6846         const char *filename;
6847         int (*fill)(void *cpu_env, int fd);
6848         int (*cmp)(const char *s1, const char *s2);
6849     };
6850     const struct fake_open *fake_open;
6851     static const struct fake_open fakes[] = {
6852         { "maps", open_self_maps, is_proc_myself },
6853         { "stat", open_self_stat, is_proc_myself },
6854         { "auxv", open_self_auxv, is_proc_myself },
6855         { "cmdline", open_self_cmdline, is_proc_myself },
6856 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6857         { "/proc/net/route", open_net_route, is_proc },
6858 #endif
6859         { NULL, NULL, NULL }
6860     };
6861 
6862     if (is_proc_myself(pathname, "exe")) {
6863         int execfd = qemu_getauxval(AT_EXECFD);
6864         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6865     }
6866 
6867     for (fake_open = fakes; fake_open->filename; fake_open++) {
6868         if (fake_open->cmp(pathname, fake_open->filename)) {
6869             break;
6870         }
6871     }
6872 
6873     if (fake_open->filename) {
6874         const char *tmpdir;
6875         char filename[PATH_MAX];
6876         int fd, r;
6877 
6878         /* create temporary file to map stat to */
6879         tmpdir = getenv("TMPDIR");
6880         if (!tmpdir)
6881             tmpdir = "/tmp";
6882         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6883         fd = mkstemp(filename);
6884         if (fd < 0) {
6885             return fd;
6886         }
6887         unlink(filename);
6888 
6889         if ((r = fake_open->fill(cpu_env, fd))) {
6890             int e = errno;
6891             close(fd);
6892             errno = e;
6893             return r;
6894         }
6895         lseek(fd, 0, SEEK_SET);
6896 
6897         return fd;
6898     }
6899 
6900     return safe_openat(dirfd, path(pathname), flags, mode);
6901 }
6902 
6903 #define TIMER_MAGIC 0x0caf0000
6904 #define TIMER_MAGIC_MASK 0xffff0000
6905 
6906 /* Convert QEMU provided timer ID back to internal 16bit index format */
6907 static target_timer_t get_timer_id(abi_long arg)
6908 {
6909     target_timer_t timerid = arg;
6910 
6911     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6912         return -TARGET_EINVAL;
6913     }
6914 
6915     timerid &= 0xffff;
6916 
6917     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6918         return -TARGET_EINVAL;
6919     }
6920 
6921     return timerid;
6922 }
6923 
6924 static int target_to_host_cpu_mask(unsigned long *host_mask,
6925                                    size_t host_size,
6926                                    abi_ulong target_addr,
6927                                    size_t target_size)
6928 {
6929     unsigned target_bits = sizeof(abi_ulong) * 8;
6930     unsigned host_bits = sizeof(*host_mask) * 8;
6931     abi_ulong *target_mask;
6932     unsigned i, j;
6933 
6934     assert(host_size >= target_size);
6935 
6936     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6937     if (!target_mask) {
6938         return -TARGET_EFAULT;
6939     }
6940     memset(host_mask, 0, host_size);
6941 
6942     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6943         unsigned bit = i * target_bits;
6944         abi_ulong val;
6945 
6946         __get_user(val, &target_mask[i]);
6947         for (j = 0; j < target_bits; j++, bit++) {
6948             if (val & (1UL << j)) {
6949                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6950             }
6951         }
6952     }
6953 
6954     unlock_user(target_mask, target_addr, 0);
6955     return 0;
6956 }
6957 
6958 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6959                                    size_t host_size,
6960                                    abi_ulong target_addr,
6961                                    size_t target_size)
6962 {
6963     unsigned target_bits = sizeof(abi_ulong) * 8;
6964     unsigned host_bits = sizeof(*host_mask) * 8;
6965     abi_ulong *target_mask;
6966     unsigned i, j;
6967 
6968     assert(host_size >= target_size);
6969 
6970     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6971     if (!target_mask) {
6972         return -TARGET_EFAULT;
6973     }
6974 
6975     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6976         unsigned bit = i * target_bits;
6977         abi_ulong val = 0;
6978 
6979         for (j = 0; j < target_bits; j++, bit++) {
6980             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6981                 val |= 1UL << j;
6982             }
6983         }
6984         __put_user(val, &target_mask[i]);
6985     }
6986 
6987     unlock_user(target_mask, target_addr, target_size);
6988     return 0;
6989 }
6990 
6991 /* This is an internal helper for do_syscall so that it is easier
6992  * to have a single return point, so that actions, such as logging
6993  * of syscall results, can be performed.
6994  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6995  */
6996 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6997                             abi_long arg2, abi_long arg3, abi_long arg4,
6998                             abi_long arg5, abi_long arg6, abi_long arg7,
6999                             abi_long arg8)
7000 {
7001     CPUState *cpu = ENV_GET_CPU(cpu_env);
7002     abi_long ret;
7003 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7004     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7005     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7006     struct stat st;
7007 #endif
7008 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7009     || defined(TARGET_NR_fstatfs)
7010     struct statfs stfs;
7011 #endif
7012     void *p;
7013 
7014     switch(num) {
7015     case TARGET_NR_exit:
7016         /* In old applications this may be used to implement _exit(2).
7017            However in threaded applictions it is used for thread termination,
7018            and _exit_group is used for application termination.
7019            Do thread termination if we have more then one thread.  */
7020 
7021         if (block_signals()) {
7022             return -TARGET_ERESTARTSYS;
7023         }
7024 
7025         cpu_list_lock();
7026 
7027         if (CPU_NEXT(first_cpu)) {
7028             TaskState *ts;
7029 
7030             /* Remove the CPU from the list.  */
7031             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7032 
7033             cpu_list_unlock();
7034 
7035             ts = cpu->opaque;
7036             if (ts->child_tidptr) {
7037                 put_user_u32(0, ts->child_tidptr);
7038                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7039                           NULL, NULL, 0);
7040             }
7041             thread_cpu = NULL;
7042             object_unref(OBJECT(cpu));
7043             g_free(ts);
7044             rcu_unregister_thread();
7045             pthread_exit(NULL);
7046         }
7047 
7048         cpu_list_unlock();
7049         preexit_cleanup(cpu_env, arg1);
7050         _exit(arg1);
7051         return 0; /* avoid warning */
7052     case TARGET_NR_read:
7053         if (arg2 == 0 && arg3 == 0) {
7054             return get_errno(safe_read(arg1, 0, 0));
7055         } else {
7056             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7057                 return -TARGET_EFAULT;
7058             ret = get_errno(safe_read(arg1, p, arg3));
7059             if (ret >= 0 &&
7060                 fd_trans_host_to_target_data(arg1)) {
7061                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7062             }
7063             unlock_user(p, arg2, ret);
7064         }
7065         return ret;
7066     case TARGET_NR_write:
7067         if (arg2 == 0 && arg3 == 0) {
7068             return get_errno(safe_write(arg1, 0, 0));
7069         }
7070         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7071             return -TARGET_EFAULT;
7072         if (fd_trans_target_to_host_data(arg1)) {
7073             void *copy = g_malloc(arg3);
7074             memcpy(copy, p, arg3);
7075             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7076             if (ret >= 0) {
7077                 ret = get_errno(safe_write(arg1, copy, ret));
7078             }
7079             g_free(copy);
7080         } else {
7081             ret = get_errno(safe_write(arg1, p, arg3));
7082         }
7083         unlock_user(p, arg2, 0);
7084         return ret;
7085 
7086 #ifdef TARGET_NR_open
7087     case TARGET_NR_open:
7088         if (!(p = lock_user_string(arg1)))
7089             return -TARGET_EFAULT;
7090         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7091                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7092                                   arg3));
7093         fd_trans_unregister(ret);
7094         unlock_user(p, arg1, 0);
7095         return ret;
7096 #endif
7097     case TARGET_NR_openat:
7098         if (!(p = lock_user_string(arg2)))
7099             return -TARGET_EFAULT;
7100         ret = get_errno(do_openat(cpu_env, arg1, p,
7101                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7102                                   arg4));
7103         fd_trans_unregister(ret);
7104         unlock_user(p, arg2, 0);
7105         return ret;
7106 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7107     case TARGET_NR_name_to_handle_at:
7108         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7109         return ret;
7110 #endif
7111 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7112     case TARGET_NR_open_by_handle_at:
7113         ret = do_open_by_handle_at(arg1, arg2, arg3);
7114         fd_trans_unregister(ret);
7115         return ret;
7116 #endif
7117     case TARGET_NR_close:
7118         fd_trans_unregister(arg1);
7119         return get_errno(close(arg1));
7120 
7121     case TARGET_NR_brk:
7122         return do_brk(arg1);
7123 #ifdef TARGET_NR_fork
7124     case TARGET_NR_fork:
7125         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7126 #endif
7127 #ifdef TARGET_NR_waitpid
7128     case TARGET_NR_waitpid:
7129         {
7130             int status;
7131             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7132             if (!is_error(ret) && arg2 && ret
7133                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7134                 return -TARGET_EFAULT;
7135         }
7136         return ret;
7137 #endif
7138 #ifdef TARGET_NR_waitid
7139     case TARGET_NR_waitid:
7140         {
7141             siginfo_t info;
7142             info.si_pid = 0;
7143             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7144             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7145                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7146                     return -TARGET_EFAULT;
7147                 host_to_target_siginfo(p, &info);
7148                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7149             }
7150         }
7151         return ret;
7152 #endif
7153 #ifdef TARGET_NR_creat /* not on alpha */
7154     case TARGET_NR_creat:
7155         if (!(p = lock_user_string(arg1)))
7156             return -TARGET_EFAULT;
7157         ret = get_errno(creat(p, arg2));
7158         fd_trans_unregister(ret);
7159         unlock_user(p, arg1, 0);
7160         return ret;
7161 #endif
7162 #ifdef TARGET_NR_link
7163     case TARGET_NR_link:
7164         {
7165             void * p2;
7166             p = lock_user_string(arg1);
7167             p2 = lock_user_string(arg2);
7168             if (!p || !p2)
7169                 ret = -TARGET_EFAULT;
7170             else
7171                 ret = get_errno(link(p, p2));
7172             unlock_user(p2, arg2, 0);
7173             unlock_user(p, arg1, 0);
7174         }
7175         return ret;
7176 #endif
7177 #if defined(TARGET_NR_linkat)
7178     case TARGET_NR_linkat:
7179         {
7180             void * p2 = NULL;
7181             if (!arg2 || !arg4)
7182                 return -TARGET_EFAULT;
7183             p  = lock_user_string(arg2);
7184             p2 = lock_user_string(arg4);
7185             if (!p || !p2)
7186                 ret = -TARGET_EFAULT;
7187             else
7188                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7189             unlock_user(p, arg2, 0);
7190             unlock_user(p2, arg4, 0);
7191         }
7192         return ret;
7193 #endif
7194 #ifdef TARGET_NR_unlink
7195     case TARGET_NR_unlink:
7196         if (!(p = lock_user_string(arg1)))
7197             return -TARGET_EFAULT;
7198         ret = get_errno(unlink(p));
7199         unlock_user(p, arg1, 0);
7200         return ret;
7201 #endif
7202 #if defined(TARGET_NR_unlinkat)
7203     case TARGET_NR_unlinkat:
7204         if (!(p = lock_user_string(arg2)))
7205             return -TARGET_EFAULT;
7206         ret = get_errno(unlinkat(arg1, p, arg3));
7207         unlock_user(p, arg2, 0);
7208         return ret;
7209 #endif
7210     case TARGET_NR_execve:
7211         {
7212             char **argp, **envp;
7213             int argc, envc;
7214             abi_ulong gp;
7215             abi_ulong guest_argp;
7216             abi_ulong guest_envp;
7217             abi_ulong addr;
7218             char **q;
7219             int total_size = 0;
7220 
7221             argc = 0;
7222             guest_argp = arg2;
7223             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7224                 if (get_user_ual(addr, gp))
7225                     return -TARGET_EFAULT;
7226                 if (!addr)
7227                     break;
7228                 argc++;
7229             }
7230             envc = 0;
7231             guest_envp = arg3;
7232             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7233                 if (get_user_ual(addr, gp))
7234                     return -TARGET_EFAULT;
7235                 if (!addr)
7236                     break;
7237                 envc++;
7238             }
7239 
7240             argp = g_new0(char *, argc + 1);
7241             envp = g_new0(char *, envc + 1);
7242 
7243             for (gp = guest_argp, q = argp; gp;
7244                   gp += sizeof(abi_ulong), q++) {
7245                 if (get_user_ual(addr, gp))
7246                     goto execve_efault;
7247                 if (!addr)
7248                     break;
7249                 if (!(*q = lock_user_string(addr)))
7250                     goto execve_efault;
7251                 total_size += strlen(*q) + 1;
7252             }
7253             *q = NULL;
7254 
7255             for (gp = guest_envp, q = envp; gp;
7256                   gp += sizeof(abi_ulong), q++) {
7257                 if (get_user_ual(addr, gp))
7258                     goto execve_efault;
7259                 if (!addr)
7260                     break;
7261                 if (!(*q = lock_user_string(addr)))
7262                     goto execve_efault;
7263                 total_size += strlen(*q) + 1;
7264             }
7265             *q = NULL;
7266 
7267             if (!(p = lock_user_string(arg1)))
7268                 goto execve_efault;
7269             /* Although execve() is not an interruptible syscall it is
7270              * a special case where we must use the safe_syscall wrapper:
7271              * if we allow a signal to happen before we make the host
7272              * syscall then we will 'lose' it, because at the point of
7273              * execve the process leaves QEMU's control. So we use the
7274              * safe syscall wrapper to ensure that we either take the
7275              * signal as a guest signal, or else it does not happen
7276              * before the execve completes and makes it the other
7277              * program's problem.
7278              */
7279             ret = get_errno(safe_execve(p, argp, envp));
7280             unlock_user(p, arg1, 0);
7281 
7282             goto execve_end;
7283 
7284         execve_efault:
7285             ret = -TARGET_EFAULT;
7286 
7287         execve_end:
7288             for (gp = guest_argp, q = argp; *q;
7289                   gp += sizeof(abi_ulong), q++) {
7290                 if (get_user_ual(addr, gp)
7291                     || !addr)
7292                     break;
7293                 unlock_user(*q, addr, 0);
7294             }
7295             for (gp = guest_envp, q = envp; *q;
7296                   gp += sizeof(abi_ulong), q++) {
7297                 if (get_user_ual(addr, gp)
7298                     || !addr)
7299                     break;
7300                 unlock_user(*q, addr, 0);
7301             }
7302 
7303             g_free(argp);
7304             g_free(envp);
7305         }
7306         return ret;
7307     case TARGET_NR_chdir:
7308         if (!(p = lock_user_string(arg1)))
7309             return -TARGET_EFAULT;
7310         ret = get_errno(chdir(p));
7311         unlock_user(p, arg1, 0);
7312         return ret;
7313 #ifdef TARGET_NR_time
7314     case TARGET_NR_time:
7315         {
7316             time_t host_time;
7317             ret = get_errno(time(&host_time));
7318             if (!is_error(ret)
7319                 && arg1
7320                 && put_user_sal(host_time, arg1))
7321                 return -TARGET_EFAULT;
7322         }
7323         return ret;
7324 #endif
7325 #ifdef TARGET_NR_mknod
7326     case TARGET_NR_mknod:
7327         if (!(p = lock_user_string(arg1)))
7328             return -TARGET_EFAULT;
7329         ret = get_errno(mknod(p, arg2, arg3));
7330         unlock_user(p, arg1, 0);
7331         return ret;
7332 #endif
7333 #if defined(TARGET_NR_mknodat)
7334     case TARGET_NR_mknodat:
7335         if (!(p = lock_user_string(arg2)))
7336             return -TARGET_EFAULT;
7337         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7338         unlock_user(p, arg2, 0);
7339         return ret;
7340 #endif
7341 #ifdef TARGET_NR_chmod
7342     case TARGET_NR_chmod:
7343         if (!(p = lock_user_string(arg1)))
7344             return -TARGET_EFAULT;
7345         ret = get_errno(chmod(p, arg2));
7346         unlock_user(p, arg1, 0);
7347         return ret;
7348 #endif
7349 #ifdef TARGET_NR_lseek
7350     case TARGET_NR_lseek:
7351         return get_errno(lseek(arg1, arg2, arg3));
7352 #endif
7353 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7354     /* Alpha specific */
7355     case TARGET_NR_getxpid:
7356         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7357         return get_errno(getpid());
7358 #endif
7359 #ifdef TARGET_NR_getpid
7360     case TARGET_NR_getpid:
7361         return get_errno(getpid());
7362 #endif
7363     case TARGET_NR_mount:
7364         {
7365             /* need to look at the data field */
7366             void *p2, *p3;
7367 
7368             if (arg1) {
7369                 p = lock_user_string(arg1);
7370                 if (!p) {
7371                     return -TARGET_EFAULT;
7372                 }
7373             } else {
7374                 p = NULL;
7375             }
7376 
7377             p2 = lock_user_string(arg2);
7378             if (!p2) {
7379                 if (arg1) {
7380                     unlock_user(p, arg1, 0);
7381                 }
7382                 return -TARGET_EFAULT;
7383             }
7384 
7385             if (arg3) {
7386                 p3 = lock_user_string(arg3);
7387                 if (!p3) {
7388                     if (arg1) {
7389                         unlock_user(p, arg1, 0);
7390                     }
7391                     unlock_user(p2, arg2, 0);
7392                     return -TARGET_EFAULT;
7393                 }
7394             } else {
7395                 p3 = NULL;
7396             }
7397 
7398             /* FIXME - arg5 should be locked, but it isn't clear how to
7399              * do that since it's not guaranteed to be a NULL-terminated
7400              * string.
7401              */
7402             if (!arg5) {
7403                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7404             } else {
7405                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7406             }
7407             ret = get_errno(ret);
7408 
7409             if (arg1) {
7410                 unlock_user(p, arg1, 0);
7411             }
7412             unlock_user(p2, arg2, 0);
7413             if (arg3) {
7414                 unlock_user(p3, arg3, 0);
7415             }
7416         }
7417         return ret;
7418 #ifdef TARGET_NR_umount
7419     case TARGET_NR_umount:
7420         if (!(p = lock_user_string(arg1)))
7421             return -TARGET_EFAULT;
7422         ret = get_errno(umount(p));
7423         unlock_user(p, arg1, 0);
7424         return ret;
7425 #endif
7426 #ifdef TARGET_NR_stime /* not on alpha */
7427     case TARGET_NR_stime:
7428         {
7429             time_t host_time;
7430             if (get_user_sal(host_time, arg1))
7431                 return -TARGET_EFAULT;
7432             return get_errno(stime(&host_time));
7433         }
7434 #endif
7435 #ifdef TARGET_NR_alarm /* not on alpha */
7436     case TARGET_NR_alarm:
7437         return alarm(arg1);
7438 #endif
7439 #ifdef TARGET_NR_pause /* not on alpha */
7440     case TARGET_NR_pause:
7441         if (!block_signals()) {
7442             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7443         }
7444         return -TARGET_EINTR;
7445 #endif
7446 #ifdef TARGET_NR_utime
7447     case TARGET_NR_utime:
7448         {
7449             struct utimbuf tbuf, *host_tbuf;
7450             struct target_utimbuf *target_tbuf;
7451             if (arg2) {
7452                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7453                     return -TARGET_EFAULT;
7454                 tbuf.actime = tswapal(target_tbuf->actime);
7455                 tbuf.modtime = tswapal(target_tbuf->modtime);
7456                 unlock_user_struct(target_tbuf, arg2, 0);
7457                 host_tbuf = &tbuf;
7458             } else {
7459                 host_tbuf = NULL;
7460             }
7461             if (!(p = lock_user_string(arg1)))
7462                 return -TARGET_EFAULT;
7463             ret = get_errno(utime(p, host_tbuf));
7464             unlock_user(p, arg1, 0);
7465         }
7466         return ret;
7467 #endif
7468 #ifdef TARGET_NR_utimes
7469     case TARGET_NR_utimes:
7470         {
7471             struct timeval *tvp, tv[2];
7472             if (arg2) {
7473                 if (copy_from_user_timeval(&tv[0], arg2)
7474                     || copy_from_user_timeval(&tv[1],
7475                                               arg2 + sizeof(struct target_timeval)))
7476                     return -TARGET_EFAULT;
7477                 tvp = tv;
7478             } else {
7479                 tvp = NULL;
7480             }
7481             if (!(p = lock_user_string(arg1)))
7482                 return -TARGET_EFAULT;
7483             ret = get_errno(utimes(p, tvp));
7484             unlock_user(p, arg1, 0);
7485         }
7486         return ret;
7487 #endif
7488 #if defined(TARGET_NR_futimesat)
7489     case TARGET_NR_futimesat:
7490         {
7491             struct timeval *tvp, tv[2];
7492             if (arg3) {
7493                 if (copy_from_user_timeval(&tv[0], arg3)
7494                     || copy_from_user_timeval(&tv[1],
7495                                               arg3 + sizeof(struct target_timeval)))
7496                     return -TARGET_EFAULT;
7497                 tvp = tv;
7498             } else {
7499                 tvp = NULL;
7500             }
7501             if (!(p = lock_user_string(arg2))) {
7502                 return -TARGET_EFAULT;
7503             }
7504             ret = get_errno(futimesat(arg1, path(p), tvp));
7505             unlock_user(p, arg2, 0);
7506         }
7507         return ret;
7508 #endif
7509 #ifdef TARGET_NR_access
7510     case TARGET_NR_access:
7511         if (!(p = lock_user_string(arg1))) {
7512             return -TARGET_EFAULT;
7513         }
7514         ret = get_errno(access(path(p), arg2));
7515         unlock_user(p, arg1, 0);
7516         return ret;
7517 #endif
7518 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7519     case TARGET_NR_faccessat:
7520         if (!(p = lock_user_string(arg2))) {
7521             return -TARGET_EFAULT;
7522         }
7523         ret = get_errno(faccessat(arg1, p, arg3, 0));
7524         unlock_user(p, arg2, 0);
7525         return ret;
7526 #endif
7527 #ifdef TARGET_NR_nice /* not on alpha */
7528     case TARGET_NR_nice:
7529         return get_errno(nice(arg1));
7530 #endif
7531     case TARGET_NR_sync:
7532         sync();
7533         return 0;
7534 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7535     case TARGET_NR_syncfs:
7536         return get_errno(syncfs(arg1));
7537 #endif
7538     case TARGET_NR_kill:
7539         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7540 #ifdef TARGET_NR_rename
7541     case TARGET_NR_rename:
7542         {
7543             void *p2;
7544             p = lock_user_string(arg1);
7545             p2 = lock_user_string(arg2);
7546             if (!p || !p2)
7547                 ret = -TARGET_EFAULT;
7548             else
7549                 ret = get_errno(rename(p, p2));
7550             unlock_user(p2, arg2, 0);
7551             unlock_user(p, arg1, 0);
7552         }
7553         return ret;
7554 #endif
7555 #if defined(TARGET_NR_renameat)
7556     case TARGET_NR_renameat:
7557         {
7558             void *p2;
7559             p  = lock_user_string(arg2);
7560             p2 = lock_user_string(arg4);
7561             if (!p || !p2)
7562                 ret = -TARGET_EFAULT;
7563             else
7564                 ret = get_errno(renameat(arg1, p, arg3, p2));
7565             unlock_user(p2, arg4, 0);
7566             unlock_user(p, arg2, 0);
7567         }
7568         return ret;
7569 #endif
7570 #if defined(TARGET_NR_renameat2)
7571     case TARGET_NR_renameat2:
7572         {
7573             void *p2;
7574             p  = lock_user_string(arg2);
7575             p2 = lock_user_string(arg4);
7576             if (!p || !p2) {
7577                 ret = -TARGET_EFAULT;
7578             } else {
7579                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7580             }
7581             unlock_user(p2, arg4, 0);
7582             unlock_user(p, arg2, 0);
7583         }
7584         return ret;
7585 #endif
7586 #ifdef TARGET_NR_mkdir
7587     case TARGET_NR_mkdir:
7588         if (!(p = lock_user_string(arg1)))
7589             return -TARGET_EFAULT;
7590         ret = get_errno(mkdir(p, arg2));
7591         unlock_user(p, arg1, 0);
7592         return ret;
7593 #endif
7594 #if defined(TARGET_NR_mkdirat)
7595     case TARGET_NR_mkdirat:
7596         if (!(p = lock_user_string(arg2)))
7597             return -TARGET_EFAULT;
7598         ret = get_errno(mkdirat(arg1, p, arg3));
7599         unlock_user(p, arg2, 0);
7600         return ret;
7601 #endif
7602 #ifdef TARGET_NR_rmdir
7603     case TARGET_NR_rmdir:
7604         if (!(p = lock_user_string(arg1)))
7605             return -TARGET_EFAULT;
7606         ret = get_errno(rmdir(p));
7607         unlock_user(p, arg1, 0);
7608         return ret;
7609 #endif
7610     case TARGET_NR_dup:
7611         ret = get_errno(dup(arg1));
7612         if (ret >= 0) {
7613             fd_trans_dup(arg1, ret);
7614         }
7615         return ret;
7616 #ifdef TARGET_NR_pipe
7617     case TARGET_NR_pipe:
7618         return do_pipe(cpu_env, arg1, 0, 0);
7619 #endif
7620 #ifdef TARGET_NR_pipe2
7621     case TARGET_NR_pipe2:
7622         return do_pipe(cpu_env, arg1,
7623                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7624 #endif
7625     case TARGET_NR_times:
7626         {
7627             struct target_tms *tmsp;
7628             struct tms tms;
7629             ret = get_errno(times(&tms));
7630             if (arg1) {
7631                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7632                 if (!tmsp)
7633                     return -TARGET_EFAULT;
7634                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7635                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7636                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7637                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7638             }
7639             if (!is_error(ret))
7640                 ret = host_to_target_clock_t(ret);
7641         }
7642         return ret;
7643     case TARGET_NR_acct:
7644         if (arg1 == 0) {
7645             ret = get_errno(acct(NULL));
7646         } else {
7647             if (!(p = lock_user_string(arg1))) {
7648                 return -TARGET_EFAULT;
7649             }
7650             ret = get_errno(acct(path(p)));
7651             unlock_user(p, arg1, 0);
7652         }
7653         return ret;
7654 #ifdef TARGET_NR_umount2
7655     case TARGET_NR_umount2:
7656         if (!(p = lock_user_string(arg1)))
7657             return -TARGET_EFAULT;
7658         ret = get_errno(umount2(p, arg2));
7659         unlock_user(p, arg1, 0);
7660         return ret;
7661 #endif
7662     case TARGET_NR_ioctl:
7663         return do_ioctl(arg1, arg2, arg3);
7664 #ifdef TARGET_NR_fcntl
7665     case TARGET_NR_fcntl:
7666         return do_fcntl(arg1, arg2, arg3);
7667 #endif
7668     case TARGET_NR_setpgid:
7669         return get_errno(setpgid(arg1, arg2));
7670     case TARGET_NR_umask:
7671         return get_errno(umask(arg1));
7672     case TARGET_NR_chroot:
7673         if (!(p = lock_user_string(arg1)))
7674             return -TARGET_EFAULT;
7675         ret = get_errno(chroot(p));
7676         unlock_user(p, arg1, 0);
7677         return ret;
7678 #ifdef TARGET_NR_dup2
7679     case TARGET_NR_dup2:
7680         ret = get_errno(dup2(arg1, arg2));
7681         if (ret >= 0) {
7682             fd_trans_dup(arg1, arg2);
7683         }
7684         return ret;
7685 #endif
7686 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7687     case TARGET_NR_dup3:
7688     {
7689         int host_flags;
7690 
7691         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7692             return -EINVAL;
7693         }
7694         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7695         ret = get_errno(dup3(arg1, arg2, host_flags));
7696         if (ret >= 0) {
7697             fd_trans_dup(arg1, arg2);
7698         }
7699         return ret;
7700     }
7701 #endif
7702 #ifdef TARGET_NR_getppid /* not on alpha */
7703     case TARGET_NR_getppid:
7704         return get_errno(getppid());
7705 #endif
7706 #ifdef TARGET_NR_getpgrp
7707     case TARGET_NR_getpgrp:
7708         return get_errno(getpgrp());
7709 #endif
7710     case TARGET_NR_setsid:
7711         return get_errno(setsid());
7712 #ifdef TARGET_NR_sigaction
7713     case TARGET_NR_sigaction:
7714         {
7715 #if defined(TARGET_ALPHA)
7716             struct target_sigaction act, oact, *pact = 0;
7717             struct target_old_sigaction *old_act;
7718             if (arg2) {
7719                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7720                     return -TARGET_EFAULT;
7721                 act._sa_handler = old_act->_sa_handler;
7722                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7723                 act.sa_flags = old_act->sa_flags;
7724                 act.sa_restorer = 0;
7725                 unlock_user_struct(old_act, arg2, 0);
7726                 pact = &act;
7727             }
7728             ret = get_errno(do_sigaction(arg1, pact, &oact));
7729             if (!is_error(ret) && arg3) {
7730                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7731                     return -TARGET_EFAULT;
7732                 old_act->_sa_handler = oact._sa_handler;
7733                 old_act->sa_mask = oact.sa_mask.sig[0];
7734                 old_act->sa_flags = oact.sa_flags;
7735                 unlock_user_struct(old_act, arg3, 1);
7736             }
7737 #elif defined(TARGET_MIPS)
7738 	    struct target_sigaction act, oact, *pact, *old_act;
7739 
7740 	    if (arg2) {
7741                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7742                     return -TARGET_EFAULT;
7743 		act._sa_handler = old_act->_sa_handler;
7744 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7745 		act.sa_flags = old_act->sa_flags;
7746 		unlock_user_struct(old_act, arg2, 0);
7747 		pact = &act;
7748 	    } else {
7749 		pact = NULL;
7750 	    }
7751 
7752 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7753 
7754 	    if (!is_error(ret) && arg3) {
7755                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7756                     return -TARGET_EFAULT;
7757 		old_act->_sa_handler = oact._sa_handler;
7758 		old_act->sa_flags = oact.sa_flags;
7759 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7760 		old_act->sa_mask.sig[1] = 0;
7761 		old_act->sa_mask.sig[2] = 0;
7762 		old_act->sa_mask.sig[3] = 0;
7763 		unlock_user_struct(old_act, arg3, 1);
7764 	    }
7765 #else
7766             struct target_old_sigaction *old_act;
7767             struct target_sigaction act, oact, *pact;
7768             if (arg2) {
7769                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7770                     return -TARGET_EFAULT;
7771                 act._sa_handler = old_act->_sa_handler;
7772                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7773                 act.sa_flags = old_act->sa_flags;
7774                 act.sa_restorer = old_act->sa_restorer;
7775 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7776                 act.ka_restorer = 0;
7777 #endif
7778                 unlock_user_struct(old_act, arg2, 0);
7779                 pact = &act;
7780             } else {
7781                 pact = NULL;
7782             }
7783             ret = get_errno(do_sigaction(arg1, pact, &oact));
7784             if (!is_error(ret) && arg3) {
7785                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7786                     return -TARGET_EFAULT;
7787                 old_act->_sa_handler = oact._sa_handler;
7788                 old_act->sa_mask = oact.sa_mask.sig[0];
7789                 old_act->sa_flags = oact.sa_flags;
7790                 old_act->sa_restorer = oact.sa_restorer;
7791                 unlock_user_struct(old_act, arg3, 1);
7792             }
7793 #endif
7794         }
7795         return ret;
7796 #endif
7797     case TARGET_NR_rt_sigaction:
7798         {
7799 #if defined(TARGET_ALPHA)
7800             /* For Alpha and SPARC this is a 5 argument syscall, with
7801              * a 'restorer' parameter which must be copied into the
7802              * sa_restorer field of the sigaction struct.
7803              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7804              * and arg5 is the sigsetsize.
7805              * Alpha also has a separate rt_sigaction struct that it uses
7806              * here; SPARC uses the usual sigaction struct.
7807              */
7808             struct target_rt_sigaction *rt_act;
7809             struct target_sigaction act, oact, *pact = 0;
7810 
7811             if (arg4 != sizeof(target_sigset_t)) {
7812                 return -TARGET_EINVAL;
7813             }
7814             if (arg2) {
7815                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7816                     return -TARGET_EFAULT;
7817                 act._sa_handler = rt_act->_sa_handler;
7818                 act.sa_mask = rt_act->sa_mask;
7819                 act.sa_flags = rt_act->sa_flags;
7820                 act.sa_restorer = arg5;
7821                 unlock_user_struct(rt_act, arg2, 0);
7822                 pact = &act;
7823             }
7824             ret = get_errno(do_sigaction(arg1, pact, &oact));
7825             if (!is_error(ret) && arg3) {
7826                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7827                     return -TARGET_EFAULT;
7828                 rt_act->_sa_handler = oact._sa_handler;
7829                 rt_act->sa_mask = oact.sa_mask;
7830                 rt_act->sa_flags = oact.sa_flags;
7831                 unlock_user_struct(rt_act, arg3, 1);
7832             }
7833 #else
7834 #ifdef TARGET_SPARC
7835             target_ulong restorer = arg4;
7836             target_ulong sigsetsize = arg5;
7837 #else
7838             target_ulong sigsetsize = arg4;
7839 #endif
7840             struct target_sigaction *act;
7841             struct target_sigaction *oact;
7842 
7843             if (sigsetsize != sizeof(target_sigset_t)) {
7844                 return -TARGET_EINVAL;
7845             }
7846             if (arg2) {
7847                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7848                     return -TARGET_EFAULT;
7849                 }
7850 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7851                 act->ka_restorer = restorer;
7852 #endif
7853             } else {
7854                 act = NULL;
7855             }
7856             if (arg3) {
7857                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7858                     ret = -TARGET_EFAULT;
7859                     goto rt_sigaction_fail;
7860                 }
7861             } else
7862                 oact = NULL;
7863             ret = get_errno(do_sigaction(arg1, act, oact));
7864 	rt_sigaction_fail:
7865             if (act)
7866                 unlock_user_struct(act, arg2, 0);
7867             if (oact)
7868                 unlock_user_struct(oact, arg3, 1);
7869 #endif
7870         }
7871         return ret;
7872 #ifdef TARGET_NR_sgetmask /* not on alpha */
7873     case TARGET_NR_sgetmask:
7874         {
7875             sigset_t cur_set;
7876             abi_ulong target_set;
7877             ret = do_sigprocmask(0, NULL, &cur_set);
7878             if (!ret) {
7879                 host_to_target_old_sigset(&target_set, &cur_set);
7880                 ret = target_set;
7881             }
7882         }
7883         return ret;
7884 #endif
7885 #ifdef TARGET_NR_ssetmask /* not on alpha */
7886     case TARGET_NR_ssetmask:
7887         {
7888             sigset_t set, oset;
7889             abi_ulong target_set = arg1;
7890             target_to_host_old_sigset(&set, &target_set);
7891             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7892             if (!ret) {
7893                 host_to_target_old_sigset(&target_set, &oset);
7894                 ret = target_set;
7895             }
7896         }
7897         return ret;
7898 #endif
7899 #ifdef TARGET_NR_sigprocmask
7900     case TARGET_NR_sigprocmask:
7901         {
7902 #if defined(TARGET_ALPHA)
7903             sigset_t set, oldset;
7904             abi_ulong mask;
7905             int how;
7906 
7907             switch (arg1) {
7908             case TARGET_SIG_BLOCK:
7909                 how = SIG_BLOCK;
7910                 break;
7911             case TARGET_SIG_UNBLOCK:
7912                 how = SIG_UNBLOCK;
7913                 break;
7914             case TARGET_SIG_SETMASK:
7915                 how = SIG_SETMASK;
7916                 break;
7917             default:
7918                 return -TARGET_EINVAL;
7919             }
7920             mask = arg2;
7921             target_to_host_old_sigset(&set, &mask);
7922 
7923             ret = do_sigprocmask(how, &set, &oldset);
7924             if (!is_error(ret)) {
7925                 host_to_target_old_sigset(&mask, &oldset);
7926                 ret = mask;
7927                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7928             }
7929 #else
7930             sigset_t set, oldset, *set_ptr;
7931             int how;
7932 
7933             if (arg2) {
7934                 switch (arg1) {
7935                 case TARGET_SIG_BLOCK:
7936                     how = SIG_BLOCK;
7937                     break;
7938                 case TARGET_SIG_UNBLOCK:
7939                     how = SIG_UNBLOCK;
7940                     break;
7941                 case TARGET_SIG_SETMASK:
7942                     how = SIG_SETMASK;
7943                     break;
7944                 default:
7945                     return -TARGET_EINVAL;
7946                 }
7947                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7948                     return -TARGET_EFAULT;
7949                 target_to_host_old_sigset(&set, p);
7950                 unlock_user(p, arg2, 0);
7951                 set_ptr = &set;
7952             } else {
7953                 how = 0;
7954                 set_ptr = NULL;
7955             }
7956             ret = do_sigprocmask(how, set_ptr, &oldset);
7957             if (!is_error(ret) && arg3) {
7958                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7959                     return -TARGET_EFAULT;
7960                 host_to_target_old_sigset(p, &oldset);
7961                 unlock_user(p, arg3, sizeof(target_sigset_t));
7962             }
7963 #endif
7964         }
7965         return ret;
7966 #endif
7967     case TARGET_NR_rt_sigprocmask:
7968         {
7969             int how = arg1;
7970             sigset_t set, oldset, *set_ptr;
7971 
7972             if (arg4 != sizeof(target_sigset_t)) {
7973                 return -TARGET_EINVAL;
7974             }
7975 
7976             if (arg2) {
7977                 switch(how) {
7978                 case TARGET_SIG_BLOCK:
7979                     how = SIG_BLOCK;
7980                     break;
7981                 case TARGET_SIG_UNBLOCK:
7982                     how = SIG_UNBLOCK;
7983                     break;
7984                 case TARGET_SIG_SETMASK:
7985                     how = SIG_SETMASK;
7986                     break;
7987                 default:
7988                     return -TARGET_EINVAL;
7989                 }
7990                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7991                     return -TARGET_EFAULT;
7992                 target_to_host_sigset(&set, p);
7993                 unlock_user(p, arg2, 0);
7994                 set_ptr = &set;
7995             } else {
7996                 how = 0;
7997                 set_ptr = NULL;
7998             }
7999             ret = do_sigprocmask(how, set_ptr, &oldset);
8000             if (!is_error(ret) && arg3) {
8001                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8002                     return -TARGET_EFAULT;
8003                 host_to_target_sigset(p, &oldset);
8004                 unlock_user(p, arg3, sizeof(target_sigset_t));
8005             }
8006         }
8007         return ret;
8008 #ifdef TARGET_NR_sigpending
8009     case TARGET_NR_sigpending:
8010         {
8011             sigset_t set;
8012             ret = get_errno(sigpending(&set));
8013             if (!is_error(ret)) {
8014                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8015                     return -TARGET_EFAULT;
8016                 host_to_target_old_sigset(p, &set);
8017                 unlock_user(p, arg1, sizeof(target_sigset_t));
8018             }
8019         }
8020         return ret;
8021 #endif
8022     case TARGET_NR_rt_sigpending:
8023         {
8024             sigset_t set;
8025 
8026             /* Yes, this check is >, not != like most. We follow the kernel's
8027              * logic and it does it like this because it implements
8028              * NR_sigpending through the same code path, and in that case
8029              * the old_sigset_t is smaller in size.
8030              */
8031             if (arg2 > sizeof(target_sigset_t)) {
8032                 return -TARGET_EINVAL;
8033             }
8034 
8035             ret = get_errno(sigpending(&set));
8036             if (!is_error(ret)) {
8037                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8038                     return -TARGET_EFAULT;
8039                 host_to_target_sigset(p, &set);
8040                 unlock_user(p, arg1, sizeof(target_sigset_t));
8041             }
8042         }
8043         return ret;
8044 #ifdef TARGET_NR_sigsuspend
8045     case TARGET_NR_sigsuspend:
8046         {
8047             TaskState *ts = cpu->opaque;
8048 #if defined(TARGET_ALPHA)
8049             abi_ulong mask = arg1;
8050             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8051 #else
8052             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8053                 return -TARGET_EFAULT;
8054             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8055             unlock_user(p, arg1, 0);
8056 #endif
8057             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8058                                                SIGSET_T_SIZE));
8059             if (ret != -TARGET_ERESTARTSYS) {
8060                 ts->in_sigsuspend = 1;
8061             }
8062         }
8063         return ret;
8064 #endif
8065     case TARGET_NR_rt_sigsuspend:
8066         {
8067             TaskState *ts = cpu->opaque;
8068 
8069             if (arg2 != sizeof(target_sigset_t)) {
8070                 return -TARGET_EINVAL;
8071             }
8072             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8073                 return -TARGET_EFAULT;
8074             target_to_host_sigset(&ts->sigsuspend_mask, p);
8075             unlock_user(p, arg1, 0);
8076             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8077                                                SIGSET_T_SIZE));
8078             if (ret != -TARGET_ERESTARTSYS) {
8079                 ts->in_sigsuspend = 1;
8080             }
8081         }
8082         return ret;
8083     case TARGET_NR_rt_sigtimedwait:
8084         {
8085             sigset_t set;
8086             struct timespec uts, *puts;
8087             siginfo_t uinfo;
8088 
8089             if (arg4 != sizeof(target_sigset_t)) {
8090                 return -TARGET_EINVAL;
8091             }
8092 
8093             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8094                 return -TARGET_EFAULT;
8095             target_to_host_sigset(&set, p);
8096             unlock_user(p, arg1, 0);
8097             if (arg3) {
8098                 puts = &uts;
8099                 target_to_host_timespec(puts, arg3);
8100             } else {
8101                 puts = NULL;
8102             }
8103             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8104                                                  SIGSET_T_SIZE));
8105             if (!is_error(ret)) {
8106                 if (arg2) {
8107                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8108                                   0);
8109                     if (!p) {
8110                         return -TARGET_EFAULT;
8111                     }
8112                     host_to_target_siginfo(p, &uinfo);
8113                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8114                 }
8115                 ret = host_to_target_signal(ret);
8116             }
8117         }
8118         return ret;
8119     case TARGET_NR_rt_sigqueueinfo:
8120         {
8121             siginfo_t uinfo;
8122 
8123             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8124             if (!p) {
8125                 return -TARGET_EFAULT;
8126             }
8127             target_to_host_siginfo(&uinfo, p);
8128             unlock_user(p, arg3, 0);
8129             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8130         }
8131         return ret;
8132     case TARGET_NR_rt_tgsigqueueinfo:
8133         {
8134             siginfo_t uinfo;
8135 
8136             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8137             if (!p) {
8138                 return -TARGET_EFAULT;
8139             }
8140             target_to_host_siginfo(&uinfo, p);
8141             unlock_user(p, arg4, 0);
8142             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8143         }
8144         return ret;
8145 #ifdef TARGET_NR_sigreturn
8146     case TARGET_NR_sigreturn:
8147         if (block_signals()) {
8148             return -TARGET_ERESTARTSYS;
8149         }
8150         return do_sigreturn(cpu_env);
8151 #endif
8152     case TARGET_NR_rt_sigreturn:
8153         if (block_signals()) {
8154             return -TARGET_ERESTARTSYS;
8155         }
8156         return do_rt_sigreturn(cpu_env);
8157     case TARGET_NR_sethostname:
8158         if (!(p = lock_user_string(arg1)))
8159             return -TARGET_EFAULT;
8160         ret = get_errno(sethostname(p, arg2));
8161         unlock_user(p, arg1, 0);
8162         return ret;
8163 #ifdef TARGET_NR_setrlimit
8164     case TARGET_NR_setrlimit:
8165         {
8166             int resource = target_to_host_resource(arg1);
8167             struct target_rlimit *target_rlim;
8168             struct rlimit rlim;
8169             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8170                 return -TARGET_EFAULT;
8171             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8172             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8173             unlock_user_struct(target_rlim, arg2, 0);
8174             /*
8175              * If we just passed through resource limit settings for memory then
8176              * they would also apply to QEMU's own allocations, and QEMU will
8177              * crash or hang or die if its allocations fail. Ideally we would
8178              * track the guest allocations in QEMU and apply the limits ourselves.
8179              * For now, just tell the guest the call succeeded but don't actually
8180              * limit anything.
8181              */
8182             if (resource != RLIMIT_AS &&
8183                 resource != RLIMIT_DATA &&
8184                 resource != RLIMIT_STACK) {
8185                 return get_errno(setrlimit(resource, &rlim));
8186             } else {
8187                 return 0;
8188             }
8189         }
8190 #endif
8191 #ifdef TARGET_NR_getrlimit
8192     case TARGET_NR_getrlimit:
8193         {
8194             int resource = target_to_host_resource(arg1);
8195             struct target_rlimit *target_rlim;
8196             struct rlimit rlim;
8197 
8198             ret = get_errno(getrlimit(resource, &rlim));
8199             if (!is_error(ret)) {
8200                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8201                     return -TARGET_EFAULT;
8202                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8203                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8204                 unlock_user_struct(target_rlim, arg2, 1);
8205             }
8206         }
8207         return ret;
8208 #endif
8209     case TARGET_NR_getrusage:
8210         {
8211             struct rusage rusage;
8212             ret = get_errno(getrusage(arg1, &rusage));
8213             if (!is_error(ret)) {
8214                 ret = host_to_target_rusage(arg2, &rusage);
8215             }
8216         }
8217         return ret;
8218     case TARGET_NR_gettimeofday:
8219         {
8220             struct timeval tv;
8221             ret = get_errno(gettimeofday(&tv, NULL));
8222             if (!is_error(ret)) {
8223                 if (copy_to_user_timeval(arg1, &tv))
8224                     return -TARGET_EFAULT;
8225             }
8226         }
8227         return ret;
8228     case TARGET_NR_settimeofday:
8229         {
8230             struct timeval tv, *ptv = NULL;
8231             struct timezone tz, *ptz = NULL;
8232 
8233             if (arg1) {
8234                 if (copy_from_user_timeval(&tv, arg1)) {
8235                     return -TARGET_EFAULT;
8236                 }
8237                 ptv = &tv;
8238             }
8239 
8240             if (arg2) {
8241                 if (copy_from_user_timezone(&tz, arg2)) {
8242                     return -TARGET_EFAULT;
8243                 }
8244                 ptz = &tz;
8245             }
8246 
8247             return get_errno(settimeofday(ptv, ptz));
8248         }
8249 #if defined(TARGET_NR_select)
8250     case TARGET_NR_select:
8251 #if defined(TARGET_WANT_NI_OLD_SELECT)
8252         /* some architectures used to have old_select here
8253          * but now ENOSYS it.
8254          */
8255         ret = -TARGET_ENOSYS;
8256 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8257         ret = do_old_select(arg1);
8258 #else
8259         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8260 #endif
8261         return ret;
8262 #endif
8263 #ifdef TARGET_NR_pselect6
8264     case TARGET_NR_pselect6:
8265         {
8266             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8267             fd_set rfds, wfds, efds;
8268             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8269             struct timespec ts, *ts_ptr;
8270 
8271             /*
8272              * The 6th arg is actually two args smashed together,
8273              * so we cannot use the C library.
8274              */
8275             sigset_t set;
8276             struct {
8277                 sigset_t *set;
8278                 size_t size;
8279             } sig, *sig_ptr;
8280 
8281             abi_ulong arg_sigset, arg_sigsize, *arg7;
8282             target_sigset_t *target_sigset;
8283 
8284             n = arg1;
8285             rfd_addr = arg2;
8286             wfd_addr = arg3;
8287             efd_addr = arg4;
8288             ts_addr = arg5;
8289 
8290             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8291             if (ret) {
8292                 return ret;
8293             }
8294             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8295             if (ret) {
8296                 return ret;
8297             }
8298             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8299             if (ret) {
8300                 return ret;
8301             }
8302 
8303             /*
8304              * This takes a timespec, and not a timeval, so we cannot
8305              * use the do_select() helper ...
8306              */
8307             if (ts_addr) {
8308                 if (target_to_host_timespec(&ts, ts_addr)) {
8309                     return -TARGET_EFAULT;
8310                 }
8311                 ts_ptr = &ts;
8312             } else {
8313                 ts_ptr = NULL;
8314             }
8315 
8316             /* Extract the two packed args for the sigset */
8317             if (arg6) {
8318                 sig_ptr = &sig;
8319                 sig.size = SIGSET_T_SIZE;
8320 
8321                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8322                 if (!arg7) {
8323                     return -TARGET_EFAULT;
8324                 }
8325                 arg_sigset = tswapal(arg7[0]);
8326                 arg_sigsize = tswapal(arg7[1]);
8327                 unlock_user(arg7, arg6, 0);
8328 
8329                 if (arg_sigset) {
8330                     sig.set = &set;
8331                     if (arg_sigsize != sizeof(*target_sigset)) {
8332                         /* Like the kernel, we enforce correct size sigsets */
8333                         return -TARGET_EINVAL;
8334                     }
8335                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8336                                               sizeof(*target_sigset), 1);
8337                     if (!target_sigset) {
8338                         return -TARGET_EFAULT;
8339                     }
8340                     target_to_host_sigset(&set, target_sigset);
8341                     unlock_user(target_sigset, arg_sigset, 0);
8342                 } else {
8343                     sig.set = NULL;
8344                 }
8345             } else {
8346                 sig_ptr = NULL;
8347             }
8348 
8349             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8350                                           ts_ptr, sig_ptr));
8351 
8352             if (!is_error(ret)) {
8353                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8354                     return -TARGET_EFAULT;
8355                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8356                     return -TARGET_EFAULT;
8357                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8358                     return -TARGET_EFAULT;
8359 
8360                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8361                     return -TARGET_EFAULT;
8362             }
8363         }
8364         return ret;
8365 #endif
8366 #ifdef TARGET_NR_symlink
8367     case TARGET_NR_symlink:
8368         {
8369             void *p2;
8370             p = lock_user_string(arg1);
8371             p2 = lock_user_string(arg2);
8372             if (!p || !p2)
8373                 ret = -TARGET_EFAULT;
8374             else
8375                 ret = get_errno(symlink(p, p2));
8376             unlock_user(p2, arg2, 0);
8377             unlock_user(p, arg1, 0);
8378         }
8379         return ret;
8380 #endif
8381 #if defined(TARGET_NR_symlinkat)
8382     case TARGET_NR_symlinkat:
8383         {
8384             void *p2;
8385             p  = lock_user_string(arg1);
8386             p2 = lock_user_string(arg3);
8387             if (!p || !p2)
8388                 ret = -TARGET_EFAULT;
8389             else
8390                 ret = get_errno(symlinkat(p, arg2, p2));
8391             unlock_user(p2, arg3, 0);
8392             unlock_user(p, arg1, 0);
8393         }
8394         return ret;
8395 #endif
8396 #ifdef TARGET_NR_readlink
8397     case TARGET_NR_readlink:
8398         {
8399             void *p2;
8400             p = lock_user_string(arg1);
8401             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8402             if (!p || !p2) {
8403                 ret = -TARGET_EFAULT;
8404             } else if (!arg3) {
8405                 /* Short circuit this for the magic exe check. */
8406                 ret = -TARGET_EINVAL;
8407             } else if (is_proc_myself((const char *)p, "exe")) {
8408                 char real[PATH_MAX], *temp;
8409                 temp = realpath(exec_path, real);
8410                 /* Return value is # of bytes that we wrote to the buffer. */
8411                 if (temp == NULL) {
8412                     ret = get_errno(-1);
8413                 } else {
8414                     /* Don't worry about sign mismatch as earlier mapping
8415                      * logic would have thrown a bad address error. */
8416                     ret = MIN(strlen(real), arg3);
8417                     /* We cannot NUL terminate the string. */
8418                     memcpy(p2, real, ret);
8419                 }
8420             } else {
8421                 ret = get_errno(readlink(path(p), p2, arg3));
8422             }
8423             unlock_user(p2, arg2, ret);
8424             unlock_user(p, arg1, 0);
8425         }
8426         return ret;
8427 #endif
8428 #if defined(TARGET_NR_readlinkat)
8429     case TARGET_NR_readlinkat:
8430         {
8431             void *p2;
8432             p  = lock_user_string(arg2);
8433             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8434             if (!p || !p2) {
8435                 ret = -TARGET_EFAULT;
8436             } else if (is_proc_myself((const char *)p, "exe")) {
8437                 char real[PATH_MAX], *temp;
8438                 temp = realpath(exec_path, real);
8439                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8440                 snprintf((char *)p2, arg4, "%s", real);
8441             } else {
8442                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8443             }
8444             unlock_user(p2, arg3, ret);
8445             unlock_user(p, arg2, 0);
8446         }
8447         return ret;
8448 #endif
8449 #ifdef TARGET_NR_swapon
8450     case TARGET_NR_swapon:
8451         if (!(p = lock_user_string(arg1)))
8452             return -TARGET_EFAULT;
8453         ret = get_errno(swapon(p, arg2));
8454         unlock_user(p, arg1, 0);
8455         return ret;
8456 #endif
8457     case TARGET_NR_reboot:
8458         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8459            /* arg4 must be ignored in all other cases */
8460            p = lock_user_string(arg4);
8461            if (!p) {
8462                return -TARGET_EFAULT;
8463            }
8464            ret = get_errno(reboot(arg1, arg2, arg3, p));
8465            unlock_user(p, arg4, 0);
8466         } else {
8467            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8468         }
8469         return ret;
8470 #ifdef TARGET_NR_mmap
8471     case TARGET_NR_mmap:
8472 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8473     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8474     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8475     || defined(TARGET_S390X)
8476         {
8477             abi_ulong *v;
8478             abi_ulong v1, v2, v3, v4, v5, v6;
8479             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8480                 return -TARGET_EFAULT;
8481             v1 = tswapal(v[0]);
8482             v2 = tswapal(v[1]);
8483             v3 = tswapal(v[2]);
8484             v4 = tswapal(v[3]);
8485             v5 = tswapal(v[4]);
8486             v6 = tswapal(v[5]);
8487             unlock_user(v, arg1, 0);
8488             ret = get_errno(target_mmap(v1, v2, v3,
8489                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8490                                         v5, v6));
8491         }
8492 #else
8493         ret = get_errno(target_mmap(arg1, arg2, arg3,
8494                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8495                                     arg5,
8496                                     arg6));
8497 #endif
8498         return ret;
8499 #endif
8500 #ifdef TARGET_NR_mmap2
8501     case TARGET_NR_mmap2:
8502 #ifndef MMAP_SHIFT
8503 #define MMAP_SHIFT 12
8504 #endif
8505         ret = target_mmap(arg1, arg2, arg3,
8506                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8507                           arg5, arg6 << MMAP_SHIFT);
8508         return get_errno(ret);
8509 #endif
8510     case TARGET_NR_munmap:
8511         return get_errno(target_munmap(arg1, arg2));
8512     case TARGET_NR_mprotect:
8513         {
8514             TaskState *ts = cpu->opaque;
8515             /* Special hack to detect libc making the stack executable.  */
8516             if ((arg3 & PROT_GROWSDOWN)
8517                 && arg1 >= ts->info->stack_limit
8518                 && arg1 <= ts->info->start_stack) {
8519                 arg3 &= ~PROT_GROWSDOWN;
8520                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8521                 arg1 = ts->info->stack_limit;
8522             }
8523         }
8524         return get_errno(target_mprotect(arg1, arg2, arg3));
8525 #ifdef TARGET_NR_mremap
8526     case TARGET_NR_mremap:
8527         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8528 #endif
8529         /* ??? msync/mlock/munlock are broken for softmmu.  */
8530 #ifdef TARGET_NR_msync
8531     case TARGET_NR_msync:
8532         return get_errno(msync(g2h(arg1), arg2, arg3));
8533 #endif
8534 #ifdef TARGET_NR_mlock
8535     case TARGET_NR_mlock:
8536         return get_errno(mlock(g2h(arg1), arg2));
8537 #endif
8538 #ifdef TARGET_NR_munlock
8539     case TARGET_NR_munlock:
8540         return get_errno(munlock(g2h(arg1), arg2));
8541 #endif
8542 #ifdef TARGET_NR_mlockall
8543     case TARGET_NR_mlockall:
8544         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8545 #endif
8546 #ifdef TARGET_NR_munlockall
8547     case TARGET_NR_munlockall:
8548         return get_errno(munlockall());
8549 #endif
8550 #ifdef TARGET_NR_truncate
8551     case TARGET_NR_truncate:
8552         if (!(p = lock_user_string(arg1)))
8553             return -TARGET_EFAULT;
8554         ret = get_errno(truncate(p, arg2));
8555         unlock_user(p, arg1, 0);
8556         return ret;
8557 #endif
8558 #ifdef TARGET_NR_ftruncate
8559     case TARGET_NR_ftruncate:
8560         return get_errno(ftruncate(arg1, arg2));
8561 #endif
8562     case TARGET_NR_fchmod:
8563         return get_errno(fchmod(arg1, arg2));
8564 #if defined(TARGET_NR_fchmodat)
8565     case TARGET_NR_fchmodat:
8566         if (!(p = lock_user_string(arg2)))
8567             return -TARGET_EFAULT;
8568         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8569         unlock_user(p, arg2, 0);
8570         return ret;
8571 #endif
8572     case TARGET_NR_getpriority:
8573         /* Note that negative values are valid for getpriority, so we must
8574            differentiate based on errno settings.  */
8575         errno = 0;
8576         ret = getpriority(arg1, arg2);
8577         if (ret == -1 && errno != 0) {
8578             return -host_to_target_errno(errno);
8579         }
8580 #ifdef TARGET_ALPHA
8581         /* Return value is the unbiased priority.  Signal no error.  */
8582         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8583 #else
8584         /* Return value is a biased priority to avoid negative numbers.  */
8585         ret = 20 - ret;
8586 #endif
8587         return ret;
8588     case TARGET_NR_setpriority:
8589         return get_errno(setpriority(arg1, arg2, arg3));
8590 #ifdef TARGET_NR_statfs
8591     case TARGET_NR_statfs:
8592         if (!(p = lock_user_string(arg1))) {
8593             return -TARGET_EFAULT;
8594         }
8595         ret = get_errno(statfs(path(p), &stfs));
8596         unlock_user(p, arg1, 0);
8597     convert_statfs:
8598         if (!is_error(ret)) {
8599             struct target_statfs *target_stfs;
8600 
8601             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8602                 return -TARGET_EFAULT;
8603             __put_user(stfs.f_type, &target_stfs->f_type);
8604             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8605             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8606             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8607             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8608             __put_user(stfs.f_files, &target_stfs->f_files);
8609             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8610             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8611             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8612             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8613             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8614 #ifdef _STATFS_F_FLAGS
8615             __put_user(stfs.f_flags, &target_stfs->f_flags);
8616 #else
8617             __put_user(0, &target_stfs->f_flags);
8618 #endif
8619             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8620             unlock_user_struct(target_stfs, arg2, 1);
8621         }
8622         return ret;
8623 #endif
8624 #ifdef TARGET_NR_fstatfs
8625     case TARGET_NR_fstatfs:
8626         ret = get_errno(fstatfs(arg1, &stfs));
8627         goto convert_statfs;
8628 #endif
8629 #ifdef TARGET_NR_statfs64
8630     case TARGET_NR_statfs64:
8631         if (!(p = lock_user_string(arg1))) {
8632             return -TARGET_EFAULT;
8633         }
8634         ret = get_errno(statfs(path(p), &stfs));
8635         unlock_user(p, arg1, 0);
8636     convert_statfs64:
8637         if (!is_error(ret)) {
8638             struct target_statfs64 *target_stfs;
8639 
8640             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8641                 return -TARGET_EFAULT;
8642             __put_user(stfs.f_type, &target_stfs->f_type);
8643             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8644             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8645             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8646             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8647             __put_user(stfs.f_files, &target_stfs->f_files);
8648             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8649             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8650             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8651             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8652             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8653             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8654             unlock_user_struct(target_stfs, arg3, 1);
8655         }
8656         return ret;
8657     case TARGET_NR_fstatfs64:
8658         ret = get_errno(fstatfs(arg1, &stfs));
8659         goto convert_statfs64;
8660 #endif
8661 #ifdef TARGET_NR_socketcall
8662     case TARGET_NR_socketcall:
8663         return do_socketcall(arg1, arg2);
8664 #endif
8665 #ifdef TARGET_NR_accept
8666     case TARGET_NR_accept:
8667         return do_accept4(arg1, arg2, arg3, 0);
8668 #endif
8669 #ifdef TARGET_NR_accept4
8670     case TARGET_NR_accept4:
8671         return do_accept4(arg1, arg2, arg3, arg4);
8672 #endif
8673 #ifdef TARGET_NR_bind
8674     case TARGET_NR_bind:
8675         return do_bind(arg1, arg2, arg3);
8676 #endif
8677 #ifdef TARGET_NR_connect
8678     case TARGET_NR_connect:
8679         return do_connect(arg1, arg2, arg3);
8680 #endif
8681 #ifdef TARGET_NR_getpeername
8682     case TARGET_NR_getpeername:
8683         return do_getpeername(arg1, arg2, arg3);
8684 #endif
8685 #ifdef TARGET_NR_getsockname
8686     case TARGET_NR_getsockname:
8687         return do_getsockname(arg1, arg2, arg3);
8688 #endif
8689 #ifdef TARGET_NR_getsockopt
8690     case TARGET_NR_getsockopt:
8691         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8692 #endif
8693 #ifdef TARGET_NR_listen
8694     case TARGET_NR_listen:
8695         return get_errno(listen(arg1, arg2));
8696 #endif
8697 #ifdef TARGET_NR_recv
8698     case TARGET_NR_recv:
8699         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8700 #endif
8701 #ifdef TARGET_NR_recvfrom
8702     case TARGET_NR_recvfrom:
8703         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8704 #endif
8705 #ifdef TARGET_NR_recvmsg
8706     case TARGET_NR_recvmsg:
8707         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8708 #endif
8709 #ifdef TARGET_NR_send
8710     case TARGET_NR_send:
8711         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8712 #endif
8713 #ifdef TARGET_NR_sendmsg
8714     case TARGET_NR_sendmsg:
8715         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8716 #endif
8717 #ifdef TARGET_NR_sendmmsg
8718     case TARGET_NR_sendmmsg:
8719         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8720     case TARGET_NR_recvmmsg:
8721         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8722 #endif
8723 #ifdef TARGET_NR_sendto
8724     case TARGET_NR_sendto:
8725         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8726 #endif
8727 #ifdef TARGET_NR_shutdown
8728     case TARGET_NR_shutdown:
8729         return get_errno(shutdown(arg1, arg2));
8730 #endif
8731 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8732     case TARGET_NR_getrandom:
8733         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8734         if (!p) {
8735             return -TARGET_EFAULT;
8736         }
8737         ret = get_errno(getrandom(p, arg2, arg3));
8738         unlock_user(p, arg1, ret);
8739         return ret;
8740 #endif
8741 #ifdef TARGET_NR_socket
8742     case TARGET_NR_socket:
8743         return do_socket(arg1, arg2, arg3);
8744 #endif
8745 #ifdef TARGET_NR_socketpair
8746     case TARGET_NR_socketpair:
8747         return do_socketpair(arg1, arg2, arg3, arg4);
8748 #endif
8749 #ifdef TARGET_NR_setsockopt
8750     case TARGET_NR_setsockopt:
8751         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8752 #endif
8753 #if defined(TARGET_NR_syslog)
8754     case TARGET_NR_syslog:
8755         {
8756             int len = arg2;
8757 
8758             switch (arg1) {
8759             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8760             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8761             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8762             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8763             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8764             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8765             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8766             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8767                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8768             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8769             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8770             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8771                 {
8772                     if (len < 0) {
8773                         return -TARGET_EINVAL;
8774                     }
8775                     if (len == 0) {
8776                         return 0;
8777                     }
8778                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8779                     if (!p) {
8780                         return -TARGET_EFAULT;
8781                     }
8782                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8783                     unlock_user(p, arg2, arg3);
8784                 }
8785                 return ret;
8786             default:
8787                 return -TARGET_EINVAL;
8788             }
8789         }
8790         break;
8791 #endif
8792     case TARGET_NR_setitimer:
8793         {
8794             struct itimerval value, ovalue, *pvalue;
8795 
8796             if (arg2) {
8797                 pvalue = &value;
8798                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8799                     || copy_from_user_timeval(&pvalue->it_value,
8800                                               arg2 + sizeof(struct target_timeval)))
8801                     return -TARGET_EFAULT;
8802             } else {
8803                 pvalue = NULL;
8804             }
8805             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8806             if (!is_error(ret) && arg3) {
8807                 if (copy_to_user_timeval(arg3,
8808                                          &ovalue.it_interval)
8809                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8810                                             &ovalue.it_value))
8811                     return -TARGET_EFAULT;
8812             }
8813         }
8814         return ret;
8815     case TARGET_NR_getitimer:
8816         {
8817             struct itimerval value;
8818 
8819             ret = get_errno(getitimer(arg1, &value));
8820             if (!is_error(ret) && arg2) {
8821                 if (copy_to_user_timeval(arg2,
8822                                          &value.it_interval)
8823                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8824                                             &value.it_value))
8825                     return -TARGET_EFAULT;
8826             }
8827         }
8828         return ret;
8829 #ifdef TARGET_NR_stat
8830     case TARGET_NR_stat:
8831         if (!(p = lock_user_string(arg1))) {
8832             return -TARGET_EFAULT;
8833         }
8834         ret = get_errno(stat(path(p), &st));
8835         unlock_user(p, arg1, 0);
8836         goto do_stat;
8837 #endif
8838 #ifdef TARGET_NR_lstat
8839     case TARGET_NR_lstat:
8840         if (!(p = lock_user_string(arg1))) {
8841             return -TARGET_EFAULT;
8842         }
8843         ret = get_errno(lstat(path(p), &st));
8844         unlock_user(p, arg1, 0);
8845         goto do_stat;
8846 #endif
8847 #ifdef TARGET_NR_fstat
8848     case TARGET_NR_fstat:
8849         {
8850             ret = get_errno(fstat(arg1, &st));
8851 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8852         do_stat:
8853 #endif
8854             if (!is_error(ret)) {
8855                 struct target_stat *target_st;
8856 
8857                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8858                     return -TARGET_EFAULT;
8859                 memset(target_st, 0, sizeof(*target_st));
8860                 __put_user(st.st_dev, &target_st->st_dev);
8861                 __put_user(st.st_ino, &target_st->st_ino);
8862                 __put_user(st.st_mode, &target_st->st_mode);
8863                 __put_user(st.st_uid, &target_st->st_uid);
8864                 __put_user(st.st_gid, &target_st->st_gid);
8865                 __put_user(st.st_nlink, &target_st->st_nlink);
8866                 __put_user(st.st_rdev, &target_st->st_rdev);
8867                 __put_user(st.st_size, &target_st->st_size);
8868                 __put_user(st.st_blksize, &target_st->st_blksize);
8869                 __put_user(st.st_blocks, &target_st->st_blocks);
8870                 __put_user(st.st_atime, &target_st->target_st_atime);
8871                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8872                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8873                 unlock_user_struct(target_st, arg2, 1);
8874             }
8875         }
8876         return ret;
8877 #endif
8878     case TARGET_NR_vhangup:
8879         return get_errno(vhangup());
8880 #ifdef TARGET_NR_syscall
8881     case TARGET_NR_syscall:
8882         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8883                           arg6, arg7, arg8, 0);
8884 #endif
8885     case TARGET_NR_wait4:
8886         {
8887             int status;
8888             abi_long status_ptr = arg2;
8889             struct rusage rusage, *rusage_ptr;
8890             abi_ulong target_rusage = arg4;
8891             abi_long rusage_err;
8892             if (target_rusage)
8893                 rusage_ptr = &rusage;
8894             else
8895                 rusage_ptr = NULL;
8896             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8897             if (!is_error(ret)) {
8898                 if (status_ptr && ret) {
8899                     status = host_to_target_waitstatus(status);
8900                     if (put_user_s32(status, status_ptr))
8901                         return -TARGET_EFAULT;
8902                 }
8903                 if (target_rusage) {
8904                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8905                     if (rusage_err) {
8906                         ret = rusage_err;
8907                     }
8908                 }
8909             }
8910         }
8911         return ret;
8912 #ifdef TARGET_NR_swapoff
8913     case TARGET_NR_swapoff:
8914         if (!(p = lock_user_string(arg1)))
8915             return -TARGET_EFAULT;
8916         ret = get_errno(swapoff(p));
8917         unlock_user(p, arg1, 0);
8918         return ret;
8919 #endif
8920     case TARGET_NR_sysinfo:
8921         {
8922             struct target_sysinfo *target_value;
8923             struct sysinfo value;
8924             ret = get_errno(sysinfo(&value));
8925             if (!is_error(ret) && arg1)
8926             {
8927                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8928                     return -TARGET_EFAULT;
8929                 __put_user(value.uptime, &target_value->uptime);
8930                 __put_user(value.loads[0], &target_value->loads[0]);
8931                 __put_user(value.loads[1], &target_value->loads[1]);
8932                 __put_user(value.loads[2], &target_value->loads[2]);
8933                 __put_user(value.totalram, &target_value->totalram);
8934                 __put_user(value.freeram, &target_value->freeram);
8935                 __put_user(value.sharedram, &target_value->sharedram);
8936                 __put_user(value.bufferram, &target_value->bufferram);
8937                 __put_user(value.totalswap, &target_value->totalswap);
8938                 __put_user(value.freeswap, &target_value->freeswap);
8939                 __put_user(value.procs, &target_value->procs);
8940                 __put_user(value.totalhigh, &target_value->totalhigh);
8941                 __put_user(value.freehigh, &target_value->freehigh);
8942                 __put_user(value.mem_unit, &target_value->mem_unit);
8943                 unlock_user_struct(target_value, arg1, 1);
8944             }
8945         }
8946         return ret;
8947 #ifdef TARGET_NR_ipc
8948     case TARGET_NR_ipc:
8949         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8950 #endif
8951 #ifdef TARGET_NR_semget
8952     case TARGET_NR_semget:
8953         return get_errno(semget(arg1, arg2, arg3));
8954 #endif
8955 #ifdef TARGET_NR_semop
8956     case TARGET_NR_semop:
8957         return do_semop(arg1, arg2, arg3);
8958 #endif
8959 #ifdef TARGET_NR_semctl
8960     case TARGET_NR_semctl:
8961         return do_semctl(arg1, arg2, arg3, arg4);
8962 #endif
8963 #ifdef TARGET_NR_msgctl
8964     case TARGET_NR_msgctl:
8965         return do_msgctl(arg1, arg2, arg3);
8966 #endif
8967 #ifdef TARGET_NR_msgget
8968     case TARGET_NR_msgget:
8969         return get_errno(msgget(arg1, arg2));
8970 #endif
8971 #ifdef TARGET_NR_msgrcv
8972     case TARGET_NR_msgrcv:
8973         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8974 #endif
8975 #ifdef TARGET_NR_msgsnd
8976     case TARGET_NR_msgsnd:
8977         return do_msgsnd(arg1, arg2, arg3, arg4);
8978 #endif
8979 #ifdef TARGET_NR_shmget
8980     case TARGET_NR_shmget:
8981         return get_errno(shmget(arg1, arg2, arg3));
8982 #endif
8983 #ifdef TARGET_NR_shmctl
8984     case TARGET_NR_shmctl:
8985         return do_shmctl(arg1, arg2, arg3);
8986 #endif
8987 #ifdef TARGET_NR_shmat
8988     case TARGET_NR_shmat:
8989         return do_shmat(cpu_env, arg1, arg2, arg3);
8990 #endif
8991 #ifdef TARGET_NR_shmdt
8992     case TARGET_NR_shmdt:
8993         return do_shmdt(arg1);
8994 #endif
8995     case TARGET_NR_fsync:
8996         return get_errno(fsync(arg1));
8997     case TARGET_NR_clone:
8998         /* Linux manages to have three different orderings for its
8999          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9000          * match the kernel's CONFIG_CLONE_* settings.
9001          * Microblaze is further special in that it uses a sixth
9002          * implicit argument to clone for the TLS pointer.
9003          */
9004 #if defined(TARGET_MICROBLAZE)
9005         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9006 #elif defined(TARGET_CLONE_BACKWARDS)
9007         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9008 #elif defined(TARGET_CLONE_BACKWARDS2)
9009         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9010 #else
9011         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9012 #endif
9013         return ret;
9014 #ifdef __NR_exit_group
9015         /* new thread calls */
9016     case TARGET_NR_exit_group:
9017         preexit_cleanup(cpu_env, arg1);
9018         return get_errno(exit_group(arg1));
9019 #endif
9020     case TARGET_NR_setdomainname:
9021         if (!(p = lock_user_string(arg1)))
9022             return -TARGET_EFAULT;
9023         ret = get_errno(setdomainname(p, arg2));
9024         unlock_user(p, arg1, 0);
9025         return ret;
9026     case TARGET_NR_uname:
9027         /* no need to transcode because we use the linux syscall */
9028         {
9029             struct new_utsname * buf;
9030 
9031             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9032                 return -TARGET_EFAULT;
9033             ret = get_errno(sys_uname(buf));
9034             if (!is_error(ret)) {
9035                 /* Overwrite the native machine name with whatever is being
9036                    emulated. */
9037                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9038                           sizeof(buf->machine));
9039                 /* Allow the user to override the reported release.  */
9040                 if (qemu_uname_release && *qemu_uname_release) {
9041                     g_strlcpy(buf->release, qemu_uname_release,
9042                               sizeof(buf->release));
9043                 }
9044             }
9045             unlock_user_struct(buf, arg1, 1);
9046         }
9047         return ret;
9048 #ifdef TARGET_I386
9049     case TARGET_NR_modify_ldt:
9050         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9051 #if !defined(TARGET_X86_64)
9052     case TARGET_NR_vm86:
9053         return do_vm86(cpu_env, arg1, arg2);
9054 #endif
9055 #endif
9056     case TARGET_NR_adjtimex:
9057         {
9058             struct timex host_buf;
9059 
9060             if (target_to_host_timex(&host_buf, arg1) != 0) {
9061                 return -TARGET_EFAULT;
9062             }
9063             ret = get_errno(adjtimex(&host_buf));
9064             if (!is_error(ret)) {
9065                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9066                     return -TARGET_EFAULT;
9067                 }
9068             }
9069         }
9070         return ret;
9071 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9072     case TARGET_NR_clock_adjtime:
9073         {
9074             struct timex htx, *phtx = &htx;
9075 
9076             if (target_to_host_timex(phtx, arg2) != 0) {
9077                 return -TARGET_EFAULT;
9078             }
9079             ret = get_errno(clock_adjtime(arg1, phtx));
9080             if (!is_error(ret) && phtx) {
9081                 if (host_to_target_timex(arg2, phtx) != 0) {
9082                     return -TARGET_EFAULT;
9083                 }
9084             }
9085         }
9086         return ret;
9087 #endif
9088     case TARGET_NR_getpgid:
9089         return get_errno(getpgid(arg1));
9090     case TARGET_NR_fchdir:
9091         return get_errno(fchdir(arg1));
9092     case TARGET_NR_personality:
9093         return get_errno(personality(arg1));
9094 #ifdef TARGET_NR__llseek /* Not on alpha */
9095     case TARGET_NR__llseek:
9096         {
9097             int64_t res;
9098 #if !defined(__NR_llseek)
9099             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9100             if (res == -1) {
9101                 ret = get_errno(res);
9102             } else {
9103                 ret = 0;
9104             }
9105 #else
9106             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9107 #endif
9108             if ((ret == 0) && put_user_s64(res, arg4)) {
9109                 return -TARGET_EFAULT;
9110             }
9111         }
9112         return ret;
9113 #endif
9114 #ifdef TARGET_NR_getdents
9115     case TARGET_NR_getdents:
9116 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9117 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9118         {
9119             struct target_dirent *target_dirp;
9120             struct linux_dirent *dirp;
9121             abi_long count = arg3;
9122 
9123             dirp = g_try_malloc(count);
9124             if (!dirp) {
9125                 return -TARGET_ENOMEM;
9126             }
9127 
9128             ret = get_errno(sys_getdents(arg1, dirp, count));
9129             if (!is_error(ret)) {
9130                 struct linux_dirent *de;
9131 		struct target_dirent *tde;
9132                 int len = ret;
9133                 int reclen, treclen;
9134 		int count1, tnamelen;
9135 
9136 		count1 = 0;
9137                 de = dirp;
9138                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9139                     return -TARGET_EFAULT;
9140 		tde = target_dirp;
9141                 while (len > 0) {
9142                     reclen = de->d_reclen;
9143                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9144                     assert(tnamelen >= 0);
9145                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9146                     assert(count1 + treclen <= count);
9147                     tde->d_reclen = tswap16(treclen);
9148                     tde->d_ino = tswapal(de->d_ino);
9149                     tde->d_off = tswapal(de->d_off);
9150                     memcpy(tde->d_name, de->d_name, tnamelen);
9151                     de = (struct linux_dirent *)((char *)de + reclen);
9152                     len -= reclen;
9153                     tde = (struct target_dirent *)((char *)tde + treclen);
9154 		    count1 += treclen;
9155                 }
9156 		ret = count1;
9157                 unlock_user(target_dirp, arg2, ret);
9158             }
9159             g_free(dirp);
9160         }
9161 #else
9162         {
9163             struct linux_dirent *dirp;
9164             abi_long count = arg3;
9165 
9166             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9167                 return -TARGET_EFAULT;
9168             ret = get_errno(sys_getdents(arg1, dirp, count));
9169             if (!is_error(ret)) {
9170                 struct linux_dirent *de;
9171                 int len = ret;
9172                 int reclen;
9173                 de = dirp;
9174                 while (len > 0) {
9175                     reclen = de->d_reclen;
9176                     if (reclen > len)
9177                         break;
9178                     de->d_reclen = tswap16(reclen);
9179                     tswapls(&de->d_ino);
9180                     tswapls(&de->d_off);
9181                     de = (struct linux_dirent *)((char *)de + reclen);
9182                     len -= reclen;
9183                 }
9184             }
9185             unlock_user(dirp, arg2, ret);
9186         }
9187 #endif
9188 #else
9189         /* Implement getdents in terms of getdents64 */
9190         {
9191             struct linux_dirent64 *dirp;
9192             abi_long count = arg3;
9193 
9194             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9195             if (!dirp) {
9196                 return -TARGET_EFAULT;
9197             }
9198             ret = get_errno(sys_getdents64(arg1, dirp, count));
9199             if (!is_error(ret)) {
9200                 /* Convert the dirent64 structs to target dirent.  We do this
9201                  * in-place, since we can guarantee that a target_dirent is no
9202                  * larger than a dirent64; however this means we have to be
9203                  * careful to read everything before writing in the new format.
9204                  */
9205                 struct linux_dirent64 *de;
9206                 struct target_dirent *tde;
9207                 int len = ret;
9208                 int tlen = 0;
9209 
9210                 de = dirp;
9211                 tde = (struct target_dirent *)dirp;
9212                 while (len > 0) {
9213                     int namelen, treclen;
9214                     int reclen = de->d_reclen;
9215                     uint64_t ino = de->d_ino;
9216                     int64_t off = de->d_off;
9217                     uint8_t type = de->d_type;
9218 
9219                     namelen = strlen(de->d_name);
9220                     treclen = offsetof(struct target_dirent, d_name)
9221                         + namelen + 2;
9222                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9223 
9224                     memmove(tde->d_name, de->d_name, namelen + 1);
9225                     tde->d_ino = tswapal(ino);
9226                     tde->d_off = tswapal(off);
9227                     tde->d_reclen = tswap16(treclen);
9228                     /* The target_dirent type is in what was formerly a padding
9229                      * byte at the end of the structure:
9230                      */
9231                     *(((char *)tde) + treclen - 1) = type;
9232 
9233                     de = (struct linux_dirent64 *)((char *)de + reclen);
9234                     tde = (struct target_dirent *)((char *)tde + treclen);
9235                     len -= reclen;
9236                     tlen += treclen;
9237                 }
9238                 ret = tlen;
9239             }
9240             unlock_user(dirp, arg2, ret);
9241         }
9242 #endif
9243         return ret;
9244 #endif /* TARGET_NR_getdents */
9245 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9246     case TARGET_NR_getdents64:
9247         {
9248             struct linux_dirent64 *dirp;
9249             abi_long count = arg3;
9250             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9251                 return -TARGET_EFAULT;
9252             ret = get_errno(sys_getdents64(arg1, dirp, count));
9253             if (!is_error(ret)) {
9254                 struct linux_dirent64 *de;
9255                 int len = ret;
9256                 int reclen;
9257                 de = dirp;
9258                 while (len > 0) {
9259                     reclen = de->d_reclen;
9260                     if (reclen > len)
9261                         break;
9262                     de->d_reclen = tswap16(reclen);
9263                     tswap64s((uint64_t *)&de->d_ino);
9264                     tswap64s((uint64_t *)&de->d_off);
9265                     de = (struct linux_dirent64 *)((char *)de + reclen);
9266                     len -= reclen;
9267                 }
9268             }
9269             unlock_user(dirp, arg2, ret);
9270         }
9271         return ret;
9272 #endif /* TARGET_NR_getdents64 */
9273 #if defined(TARGET_NR__newselect)
9274     case TARGET_NR__newselect:
9275         return do_select(arg1, arg2, arg3, arg4, arg5);
9276 #endif
9277 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9278 # ifdef TARGET_NR_poll
9279     case TARGET_NR_poll:
9280 # endif
9281 # ifdef TARGET_NR_ppoll
9282     case TARGET_NR_ppoll:
9283 # endif
9284         {
9285             struct target_pollfd *target_pfd;
9286             unsigned int nfds = arg2;
9287             struct pollfd *pfd;
9288             unsigned int i;
9289 
9290             pfd = NULL;
9291             target_pfd = NULL;
9292             if (nfds) {
9293                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9294                     return -TARGET_EINVAL;
9295                 }
9296 
9297                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9298                                        sizeof(struct target_pollfd) * nfds, 1);
9299                 if (!target_pfd) {
9300                     return -TARGET_EFAULT;
9301                 }
9302 
9303                 pfd = alloca(sizeof(struct pollfd) * nfds);
9304                 for (i = 0; i < nfds; i++) {
9305                     pfd[i].fd = tswap32(target_pfd[i].fd);
9306                     pfd[i].events = tswap16(target_pfd[i].events);
9307                 }
9308             }
9309 
9310             switch (num) {
9311 # ifdef TARGET_NR_ppoll
9312             case TARGET_NR_ppoll:
9313             {
9314                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9315                 target_sigset_t *target_set;
9316                 sigset_t _set, *set = &_set;
9317 
9318                 if (arg3) {
9319                     if (target_to_host_timespec(timeout_ts, arg3)) {
9320                         unlock_user(target_pfd, arg1, 0);
9321                         return -TARGET_EFAULT;
9322                     }
9323                 } else {
9324                     timeout_ts = NULL;
9325                 }
9326 
9327                 if (arg4) {
9328                     if (arg5 != sizeof(target_sigset_t)) {
9329                         unlock_user(target_pfd, arg1, 0);
9330                         return -TARGET_EINVAL;
9331                     }
9332 
9333                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9334                     if (!target_set) {
9335                         unlock_user(target_pfd, arg1, 0);
9336                         return -TARGET_EFAULT;
9337                     }
9338                     target_to_host_sigset(set, target_set);
9339                 } else {
9340                     set = NULL;
9341                 }
9342 
9343                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9344                                            set, SIGSET_T_SIZE));
9345 
9346                 if (!is_error(ret) && arg3) {
9347                     host_to_target_timespec(arg3, timeout_ts);
9348                 }
9349                 if (arg4) {
9350                     unlock_user(target_set, arg4, 0);
9351                 }
9352                 break;
9353             }
9354 # endif
9355 # ifdef TARGET_NR_poll
9356             case TARGET_NR_poll:
9357             {
9358                 struct timespec ts, *pts;
9359 
9360                 if (arg3 >= 0) {
9361                     /* Convert ms to secs, ns */
9362                     ts.tv_sec = arg3 / 1000;
9363                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9364                     pts = &ts;
9365                 } else {
9366                     /* -ve poll() timeout means "infinite" */
9367                     pts = NULL;
9368                 }
9369                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9370                 break;
9371             }
9372 # endif
9373             default:
9374                 g_assert_not_reached();
9375             }
9376 
9377             if (!is_error(ret)) {
9378                 for(i = 0; i < nfds; i++) {
9379                     target_pfd[i].revents = tswap16(pfd[i].revents);
9380                 }
9381             }
9382             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9383         }
9384         return ret;
9385 #endif
9386     case TARGET_NR_flock:
9387         /* NOTE: the flock constant seems to be the same for every
9388            Linux platform */
9389         return get_errno(safe_flock(arg1, arg2));
9390     case TARGET_NR_readv:
9391         {
9392             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9393             if (vec != NULL) {
9394                 ret = get_errno(safe_readv(arg1, vec, arg3));
9395                 unlock_iovec(vec, arg2, arg3, 1);
9396             } else {
9397                 ret = -host_to_target_errno(errno);
9398             }
9399         }
9400         return ret;
9401     case TARGET_NR_writev:
9402         {
9403             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9404             if (vec != NULL) {
9405                 ret = get_errno(safe_writev(arg1, vec, arg3));
9406                 unlock_iovec(vec, arg2, arg3, 0);
9407             } else {
9408                 ret = -host_to_target_errno(errno);
9409             }
9410         }
9411         return ret;
9412 #if defined(TARGET_NR_preadv)
9413     case TARGET_NR_preadv:
9414         {
9415             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9416             if (vec != NULL) {
9417                 unsigned long low, high;
9418 
9419                 target_to_host_low_high(arg4, arg5, &low, &high);
9420                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9421                 unlock_iovec(vec, arg2, arg3, 1);
9422             } else {
9423                 ret = -host_to_target_errno(errno);
9424            }
9425         }
9426         return ret;
9427 #endif
9428 #if defined(TARGET_NR_pwritev)
9429     case TARGET_NR_pwritev:
9430         {
9431             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9432             if (vec != NULL) {
9433                 unsigned long low, high;
9434 
9435                 target_to_host_low_high(arg4, arg5, &low, &high);
9436                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9437                 unlock_iovec(vec, arg2, arg3, 0);
9438             } else {
9439                 ret = -host_to_target_errno(errno);
9440            }
9441         }
9442         return ret;
9443 #endif
9444     case TARGET_NR_getsid:
9445         return get_errno(getsid(arg1));
9446 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9447     case TARGET_NR_fdatasync:
9448         return get_errno(fdatasync(arg1));
9449 #endif
9450 #ifdef TARGET_NR__sysctl
9451     case TARGET_NR__sysctl:
9452         /* We don't implement this, but ENOTDIR is always a safe
9453            return value. */
9454         return -TARGET_ENOTDIR;
9455 #endif
9456     case TARGET_NR_sched_getaffinity:
9457         {
9458             unsigned int mask_size;
9459             unsigned long *mask;
9460 
9461             /*
9462              * sched_getaffinity needs multiples of ulong, so need to take
9463              * care of mismatches between target ulong and host ulong sizes.
9464              */
9465             if (arg2 & (sizeof(abi_ulong) - 1)) {
9466                 return -TARGET_EINVAL;
9467             }
9468             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9469 
9470             mask = alloca(mask_size);
9471             memset(mask, 0, mask_size);
9472             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9473 
9474             if (!is_error(ret)) {
9475                 if (ret > arg2) {
9476                     /* More data returned than the caller's buffer will fit.
9477                      * This only happens if sizeof(abi_long) < sizeof(long)
9478                      * and the caller passed us a buffer holding an odd number
9479                      * of abi_longs. If the host kernel is actually using the
9480                      * extra 4 bytes then fail EINVAL; otherwise we can just
9481                      * ignore them and only copy the interesting part.
9482                      */
9483                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9484                     if (numcpus > arg2 * 8) {
9485                         return -TARGET_EINVAL;
9486                     }
9487                     ret = arg2;
9488                 }
9489 
9490                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9491                     return -TARGET_EFAULT;
9492                 }
9493             }
9494         }
9495         return ret;
9496     case TARGET_NR_sched_setaffinity:
9497         {
9498             unsigned int mask_size;
9499             unsigned long *mask;
9500 
9501             /*
9502              * sched_setaffinity needs multiples of ulong, so need to take
9503              * care of mismatches between target ulong and host ulong sizes.
9504              */
9505             if (arg2 & (sizeof(abi_ulong) - 1)) {
9506                 return -TARGET_EINVAL;
9507             }
9508             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9509             mask = alloca(mask_size);
9510 
9511             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9512             if (ret) {
9513                 return ret;
9514             }
9515 
9516             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9517         }
9518     case TARGET_NR_getcpu:
9519         {
9520             unsigned cpu, node;
9521             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9522                                        arg2 ? &node : NULL,
9523                                        NULL));
9524             if (is_error(ret)) {
9525                 return ret;
9526             }
9527             if (arg1 && put_user_u32(cpu, arg1)) {
9528                 return -TARGET_EFAULT;
9529             }
9530             if (arg2 && put_user_u32(node, arg2)) {
9531                 return -TARGET_EFAULT;
9532             }
9533         }
9534         return ret;
9535     case TARGET_NR_sched_setparam:
9536         {
9537             struct sched_param *target_schp;
9538             struct sched_param schp;
9539 
9540             if (arg2 == 0) {
9541                 return -TARGET_EINVAL;
9542             }
9543             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9544                 return -TARGET_EFAULT;
9545             schp.sched_priority = tswap32(target_schp->sched_priority);
9546             unlock_user_struct(target_schp, arg2, 0);
9547             return get_errno(sched_setparam(arg1, &schp));
9548         }
9549     case TARGET_NR_sched_getparam:
9550         {
9551             struct sched_param *target_schp;
9552             struct sched_param schp;
9553 
9554             if (arg2 == 0) {
9555                 return -TARGET_EINVAL;
9556             }
9557             ret = get_errno(sched_getparam(arg1, &schp));
9558             if (!is_error(ret)) {
9559                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9560                     return -TARGET_EFAULT;
9561                 target_schp->sched_priority = tswap32(schp.sched_priority);
9562                 unlock_user_struct(target_schp, arg2, 1);
9563             }
9564         }
9565         return ret;
9566     case TARGET_NR_sched_setscheduler:
9567         {
9568             struct sched_param *target_schp;
9569             struct sched_param schp;
9570             if (arg3 == 0) {
9571                 return -TARGET_EINVAL;
9572             }
9573             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9574                 return -TARGET_EFAULT;
9575             schp.sched_priority = tswap32(target_schp->sched_priority);
9576             unlock_user_struct(target_schp, arg3, 0);
9577             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9578         }
9579     case TARGET_NR_sched_getscheduler:
9580         return get_errno(sched_getscheduler(arg1));
9581     case TARGET_NR_sched_yield:
9582         return get_errno(sched_yield());
9583     case TARGET_NR_sched_get_priority_max:
9584         return get_errno(sched_get_priority_max(arg1));
9585     case TARGET_NR_sched_get_priority_min:
9586         return get_errno(sched_get_priority_min(arg1));
9587     case TARGET_NR_sched_rr_get_interval:
9588         {
9589             struct timespec ts;
9590             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9591             if (!is_error(ret)) {
9592                 ret = host_to_target_timespec(arg2, &ts);
9593             }
9594         }
9595         return ret;
9596     case TARGET_NR_nanosleep:
9597         {
9598             struct timespec req, rem;
9599             target_to_host_timespec(&req, arg1);
9600             ret = get_errno(safe_nanosleep(&req, &rem));
9601             if (is_error(ret) && arg2) {
9602                 host_to_target_timespec(arg2, &rem);
9603             }
9604         }
9605         return ret;
9606     case TARGET_NR_prctl:
9607         switch (arg1) {
9608         case PR_GET_PDEATHSIG:
9609         {
9610             int deathsig;
9611             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9612             if (!is_error(ret) && arg2
9613                 && put_user_ual(deathsig, arg2)) {
9614                 return -TARGET_EFAULT;
9615             }
9616             return ret;
9617         }
9618 #ifdef PR_GET_NAME
9619         case PR_GET_NAME:
9620         {
9621             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9622             if (!name) {
9623                 return -TARGET_EFAULT;
9624             }
9625             ret = get_errno(prctl(arg1, (unsigned long)name,
9626                                   arg3, arg4, arg5));
9627             unlock_user(name, arg2, 16);
9628             return ret;
9629         }
9630         case PR_SET_NAME:
9631         {
9632             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9633             if (!name) {
9634                 return -TARGET_EFAULT;
9635             }
9636             ret = get_errno(prctl(arg1, (unsigned long)name,
9637                                   arg3, arg4, arg5));
9638             unlock_user(name, arg2, 0);
9639             return ret;
9640         }
9641 #endif
9642 #ifdef TARGET_MIPS
9643         case TARGET_PR_GET_FP_MODE:
9644         {
9645             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9646             ret = 0;
9647             if (env->CP0_Status & (1 << CP0St_FR)) {
9648                 ret |= TARGET_PR_FP_MODE_FR;
9649             }
9650             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9651                 ret |= TARGET_PR_FP_MODE_FRE;
9652             }
9653             return ret;
9654         }
9655         case TARGET_PR_SET_FP_MODE:
9656         {
9657             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9658             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9659             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9660             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9661             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9662 
9663             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9664                                             TARGET_PR_FP_MODE_FRE;
9665 
9666             /* If nothing to change, return right away, successfully.  */
9667             if (old_fr == new_fr && old_fre == new_fre) {
9668                 return 0;
9669             }
9670             /* Check the value is valid */
9671             if (arg2 & ~known_bits) {
9672                 return -TARGET_EOPNOTSUPP;
9673             }
9674             /* Setting FRE without FR is not supported.  */
9675             if (new_fre && !new_fr) {
9676                 return -TARGET_EOPNOTSUPP;
9677             }
9678             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9679                 /* FR1 is not supported */
9680                 return -TARGET_EOPNOTSUPP;
9681             }
9682             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9683                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9684                 /* cannot set FR=0 */
9685                 return -TARGET_EOPNOTSUPP;
9686             }
9687             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9688                 /* Cannot set FRE=1 */
9689                 return -TARGET_EOPNOTSUPP;
9690             }
9691 
9692             int i;
9693             fpr_t *fpr = env->active_fpu.fpr;
9694             for (i = 0; i < 32 ; i += 2) {
9695                 if (!old_fr && new_fr) {
9696                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9697                 } else if (old_fr && !new_fr) {
9698                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9699                 }
9700             }
9701 
9702             if (new_fr) {
9703                 env->CP0_Status |= (1 << CP0St_FR);
9704                 env->hflags |= MIPS_HFLAG_F64;
9705             } else {
9706                 env->CP0_Status &= ~(1 << CP0St_FR);
9707                 env->hflags &= ~MIPS_HFLAG_F64;
9708             }
9709             if (new_fre) {
9710                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9711                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9712                     env->hflags |= MIPS_HFLAG_FRE;
9713                 }
9714             } else {
9715                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9716                 env->hflags &= ~MIPS_HFLAG_FRE;
9717             }
9718 
9719             return 0;
9720         }
9721 #endif /* MIPS */
9722 #ifdef TARGET_AARCH64
9723         case TARGET_PR_SVE_SET_VL:
9724             /*
9725              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9726              * PR_SVE_VL_INHERIT.  Note the kernel definition
9727              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9728              * even though the current architectural maximum is VQ=16.
9729              */
9730             ret = -TARGET_EINVAL;
9731             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9732                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9733                 CPUARMState *env = cpu_env;
9734                 ARMCPU *cpu = arm_env_get_cpu(env);
9735                 uint32_t vq, old_vq;
9736 
9737                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9738                 vq = MAX(arg2 / 16, 1);
9739                 vq = MIN(vq, cpu->sve_max_vq);
9740 
9741                 if (vq < old_vq) {
9742                     aarch64_sve_narrow_vq(env, vq);
9743                 }
9744                 env->vfp.zcr_el[1] = vq - 1;
9745                 ret = vq * 16;
9746             }
9747             return ret;
9748         case TARGET_PR_SVE_GET_VL:
9749             ret = -TARGET_EINVAL;
9750             {
9751                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9752                 if (cpu_isar_feature(aa64_sve, cpu)) {
9753                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9754                 }
9755             }
9756             return ret;
9757         case TARGET_PR_PAC_RESET_KEYS:
9758             {
9759                 CPUARMState *env = cpu_env;
9760                 ARMCPU *cpu = arm_env_get_cpu(env);
9761 
9762                 if (arg3 || arg4 || arg5) {
9763                     return -TARGET_EINVAL;
9764                 }
9765                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9766                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9767                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9768                                TARGET_PR_PAC_APGAKEY);
9769                     int ret = 0;
9770                     Error *err = NULL;
9771 
9772                     if (arg2 == 0) {
9773                         arg2 = all;
9774                     } else if (arg2 & ~all) {
9775                         return -TARGET_EINVAL;
9776                     }
9777                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9778                         ret |= qemu_guest_getrandom(&env->keys.apia,
9779                                                     sizeof(ARMPACKey), &err);
9780                     }
9781                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9782                         ret |= qemu_guest_getrandom(&env->keys.apib,
9783                                                     sizeof(ARMPACKey), &err);
9784                     }
9785                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9786                         ret |= qemu_guest_getrandom(&env->keys.apda,
9787                                                     sizeof(ARMPACKey), &err);
9788                     }
9789                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9790                         ret |= qemu_guest_getrandom(&env->keys.apdb,
9791                                                     sizeof(ARMPACKey), &err);
9792                     }
9793                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9794                         ret |= qemu_guest_getrandom(&env->keys.apga,
9795                                                     sizeof(ARMPACKey), &err);
9796                     }
9797                     if (ret != 0) {
9798                         /*
9799                          * Some unknown failure in the crypto.  The best
9800                          * we can do is log it and fail the syscall.
9801                          * The real syscall cannot fail this way.
9802                          */
9803                         qemu_log_mask(LOG_UNIMP,
9804                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
9805                                       error_get_pretty(err));
9806                         error_free(err);
9807                         return -TARGET_EIO;
9808                     }
9809                     return 0;
9810                 }
9811             }
9812             return -TARGET_EINVAL;
9813 #endif /* AARCH64 */
9814         case PR_GET_SECCOMP:
9815         case PR_SET_SECCOMP:
9816             /* Disable seccomp to prevent the target disabling syscalls we
9817              * need. */
9818             return -TARGET_EINVAL;
9819         default:
9820             /* Most prctl options have no pointer arguments */
9821             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9822         }
9823         break;
9824 #ifdef TARGET_NR_arch_prctl
9825     case TARGET_NR_arch_prctl:
9826 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9827         return do_arch_prctl(cpu_env, arg1, arg2);
9828 #else
9829 #error unreachable
9830 #endif
9831 #endif
9832 #ifdef TARGET_NR_pread64
9833     case TARGET_NR_pread64:
9834         if (regpairs_aligned(cpu_env, num)) {
9835             arg4 = arg5;
9836             arg5 = arg6;
9837         }
9838         if (arg2 == 0 && arg3 == 0) {
9839             /* Special-case NULL buffer and zero length, which should succeed */
9840             p = 0;
9841         } else {
9842             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9843             if (!p) {
9844                 return -TARGET_EFAULT;
9845             }
9846         }
9847         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9848         unlock_user(p, arg2, ret);
9849         return ret;
9850     case TARGET_NR_pwrite64:
9851         if (regpairs_aligned(cpu_env, num)) {
9852             arg4 = arg5;
9853             arg5 = arg6;
9854         }
9855         if (arg2 == 0 && arg3 == 0) {
9856             /* Special-case NULL buffer and zero length, which should succeed */
9857             p = 0;
9858         } else {
9859             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9860             if (!p) {
9861                 return -TARGET_EFAULT;
9862             }
9863         }
9864         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9865         unlock_user(p, arg2, 0);
9866         return ret;
9867 #endif
9868     case TARGET_NR_getcwd:
9869         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9870             return -TARGET_EFAULT;
9871         ret = get_errno(sys_getcwd1(p, arg2));
9872         unlock_user(p, arg1, ret);
9873         return ret;
9874     case TARGET_NR_capget:
9875     case TARGET_NR_capset:
9876     {
9877         struct target_user_cap_header *target_header;
9878         struct target_user_cap_data *target_data = NULL;
9879         struct __user_cap_header_struct header;
9880         struct __user_cap_data_struct data[2];
9881         struct __user_cap_data_struct *dataptr = NULL;
9882         int i, target_datalen;
9883         int data_items = 1;
9884 
9885         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9886             return -TARGET_EFAULT;
9887         }
9888         header.version = tswap32(target_header->version);
9889         header.pid = tswap32(target_header->pid);
9890 
9891         if (header.version != _LINUX_CAPABILITY_VERSION) {
9892             /* Version 2 and up takes pointer to two user_data structs */
9893             data_items = 2;
9894         }
9895 
9896         target_datalen = sizeof(*target_data) * data_items;
9897 
9898         if (arg2) {
9899             if (num == TARGET_NR_capget) {
9900                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9901             } else {
9902                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9903             }
9904             if (!target_data) {
9905                 unlock_user_struct(target_header, arg1, 0);
9906                 return -TARGET_EFAULT;
9907             }
9908 
9909             if (num == TARGET_NR_capset) {
9910                 for (i = 0; i < data_items; i++) {
9911                     data[i].effective = tswap32(target_data[i].effective);
9912                     data[i].permitted = tswap32(target_data[i].permitted);
9913                     data[i].inheritable = tswap32(target_data[i].inheritable);
9914                 }
9915             }
9916 
9917             dataptr = data;
9918         }
9919 
9920         if (num == TARGET_NR_capget) {
9921             ret = get_errno(capget(&header, dataptr));
9922         } else {
9923             ret = get_errno(capset(&header, dataptr));
9924         }
9925 
9926         /* The kernel always updates version for both capget and capset */
9927         target_header->version = tswap32(header.version);
9928         unlock_user_struct(target_header, arg1, 1);
9929 
9930         if (arg2) {
9931             if (num == TARGET_NR_capget) {
9932                 for (i = 0; i < data_items; i++) {
9933                     target_data[i].effective = tswap32(data[i].effective);
9934                     target_data[i].permitted = tswap32(data[i].permitted);
9935                     target_data[i].inheritable = tswap32(data[i].inheritable);
9936                 }
9937                 unlock_user(target_data, arg2, target_datalen);
9938             } else {
9939                 unlock_user(target_data, arg2, 0);
9940             }
9941         }
9942         return ret;
9943     }
9944     case TARGET_NR_sigaltstack:
9945         return do_sigaltstack(arg1, arg2,
9946                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9947 
9948 #ifdef CONFIG_SENDFILE
9949 #ifdef TARGET_NR_sendfile
9950     case TARGET_NR_sendfile:
9951     {
9952         off_t *offp = NULL;
9953         off_t off;
9954         if (arg3) {
9955             ret = get_user_sal(off, arg3);
9956             if (is_error(ret)) {
9957                 return ret;
9958             }
9959             offp = &off;
9960         }
9961         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9962         if (!is_error(ret) && arg3) {
9963             abi_long ret2 = put_user_sal(off, arg3);
9964             if (is_error(ret2)) {
9965                 ret = ret2;
9966             }
9967         }
9968         return ret;
9969     }
9970 #endif
9971 #ifdef TARGET_NR_sendfile64
9972     case TARGET_NR_sendfile64:
9973     {
9974         off_t *offp = NULL;
9975         off_t off;
9976         if (arg3) {
9977             ret = get_user_s64(off, arg3);
9978             if (is_error(ret)) {
9979                 return ret;
9980             }
9981             offp = &off;
9982         }
9983         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9984         if (!is_error(ret) && arg3) {
9985             abi_long ret2 = put_user_s64(off, arg3);
9986             if (is_error(ret2)) {
9987                 ret = ret2;
9988             }
9989         }
9990         return ret;
9991     }
9992 #endif
9993 #endif
9994 #ifdef TARGET_NR_vfork
9995     case TARGET_NR_vfork:
9996         return get_errno(do_fork(cpu_env,
9997                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9998                          0, 0, 0, 0));
9999 #endif
10000 #ifdef TARGET_NR_ugetrlimit
10001     case TARGET_NR_ugetrlimit:
10002     {
10003 	struct rlimit rlim;
10004 	int resource = target_to_host_resource(arg1);
10005 	ret = get_errno(getrlimit(resource, &rlim));
10006 	if (!is_error(ret)) {
10007 	    struct target_rlimit *target_rlim;
10008             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10009                 return -TARGET_EFAULT;
10010 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10011 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10012             unlock_user_struct(target_rlim, arg2, 1);
10013 	}
10014         return ret;
10015     }
10016 #endif
10017 #ifdef TARGET_NR_truncate64
10018     case TARGET_NR_truncate64:
10019         if (!(p = lock_user_string(arg1)))
10020             return -TARGET_EFAULT;
10021 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10022         unlock_user(p, arg1, 0);
10023         return ret;
10024 #endif
10025 #ifdef TARGET_NR_ftruncate64
10026     case TARGET_NR_ftruncate64:
10027         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10028 #endif
10029 #ifdef TARGET_NR_stat64
10030     case TARGET_NR_stat64:
10031         if (!(p = lock_user_string(arg1))) {
10032             return -TARGET_EFAULT;
10033         }
10034         ret = get_errno(stat(path(p), &st));
10035         unlock_user(p, arg1, 0);
10036         if (!is_error(ret))
10037             ret = host_to_target_stat64(cpu_env, arg2, &st);
10038         return ret;
10039 #endif
10040 #ifdef TARGET_NR_lstat64
10041     case TARGET_NR_lstat64:
10042         if (!(p = lock_user_string(arg1))) {
10043             return -TARGET_EFAULT;
10044         }
10045         ret = get_errno(lstat(path(p), &st));
10046         unlock_user(p, arg1, 0);
10047         if (!is_error(ret))
10048             ret = host_to_target_stat64(cpu_env, arg2, &st);
10049         return ret;
10050 #endif
10051 #ifdef TARGET_NR_fstat64
10052     case TARGET_NR_fstat64:
10053         ret = get_errno(fstat(arg1, &st));
10054         if (!is_error(ret))
10055             ret = host_to_target_stat64(cpu_env, arg2, &st);
10056         return ret;
10057 #endif
10058 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10059 #ifdef TARGET_NR_fstatat64
10060     case TARGET_NR_fstatat64:
10061 #endif
10062 #ifdef TARGET_NR_newfstatat
10063     case TARGET_NR_newfstatat:
10064 #endif
10065         if (!(p = lock_user_string(arg2))) {
10066             return -TARGET_EFAULT;
10067         }
10068         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10069         unlock_user(p, arg2, 0);
10070         if (!is_error(ret))
10071             ret = host_to_target_stat64(cpu_env, arg3, &st);
10072         return ret;
10073 #endif
10074 #ifdef TARGET_NR_lchown
10075     case TARGET_NR_lchown:
10076         if (!(p = lock_user_string(arg1)))
10077             return -TARGET_EFAULT;
10078         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10079         unlock_user(p, arg1, 0);
10080         return ret;
10081 #endif
10082 #ifdef TARGET_NR_getuid
10083     case TARGET_NR_getuid:
10084         return get_errno(high2lowuid(getuid()));
10085 #endif
10086 #ifdef TARGET_NR_getgid
10087     case TARGET_NR_getgid:
10088         return get_errno(high2lowgid(getgid()));
10089 #endif
10090 #ifdef TARGET_NR_geteuid
10091     case TARGET_NR_geteuid:
10092         return get_errno(high2lowuid(geteuid()));
10093 #endif
10094 #ifdef TARGET_NR_getegid
10095     case TARGET_NR_getegid:
10096         return get_errno(high2lowgid(getegid()));
10097 #endif
10098     case TARGET_NR_setreuid:
10099         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10100     case TARGET_NR_setregid:
10101         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10102     case TARGET_NR_getgroups:
10103         {
10104             int gidsetsize = arg1;
10105             target_id *target_grouplist;
10106             gid_t *grouplist;
10107             int i;
10108 
10109             grouplist = alloca(gidsetsize * sizeof(gid_t));
10110             ret = get_errno(getgroups(gidsetsize, grouplist));
10111             if (gidsetsize == 0)
10112                 return ret;
10113             if (!is_error(ret)) {
10114                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10115                 if (!target_grouplist)
10116                     return -TARGET_EFAULT;
10117                 for(i = 0;i < ret; i++)
10118                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10119                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10120             }
10121         }
10122         return ret;
10123     case TARGET_NR_setgroups:
10124         {
10125             int gidsetsize = arg1;
10126             target_id *target_grouplist;
10127             gid_t *grouplist = NULL;
10128             int i;
10129             if (gidsetsize) {
10130                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10131                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10132                 if (!target_grouplist) {
10133                     return -TARGET_EFAULT;
10134                 }
10135                 for (i = 0; i < gidsetsize; i++) {
10136                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10137                 }
10138                 unlock_user(target_grouplist, arg2, 0);
10139             }
10140             return get_errno(setgroups(gidsetsize, grouplist));
10141         }
10142     case TARGET_NR_fchown:
10143         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10144 #if defined(TARGET_NR_fchownat)
10145     case TARGET_NR_fchownat:
10146         if (!(p = lock_user_string(arg2)))
10147             return -TARGET_EFAULT;
10148         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10149                                  low2highgid(arg4), arg5));
10150         unlock_user(p, arg2, 0);
10151         return ret;
10152 #endif
10153 #ifdef TARGET_NR_setresuid
10154     case TARGET_NR_setresuid:
10155         return get_errno(sys_setresuid(low2highuid(arg1),
10156                                        low2highuid(arg2),
10157                                        low2highuid(arg3)));
10158 #endif
10159 #ifdef TARGET_NR_getresuid
10160     case TARGET_NR_getresuid:
10161         {
10162             uid_t ruid, euid, suid;
10163             ret = get_errno(getresuid(&ruid, &euid, &suid));
10164             if (!is_error(ret)) {
10165                 if (put_user_id(high2lowuid(ruid), arg1)
10166                     || put_user_id(high2lowuid(euid), arg2)
10167                     || put_user_id(high2lowuid(suid), arg3))
10168                     return -TARGET_EFAULT;
10169             }
10170         }
10171         return ret;
10172 #endif
10173 #ifdef TARGET_NR_getresgid
10174     case TARGET_NR_setresgid:
10175         return get_errno(sys_setresgid(low2highgid(arg1),
10176                                        low2highgid(arg2),
10177                                        low2highgid(arg3)));
10178 #endif
10179 #ifdef TARGET_NR_getresgid
10180     case TARGET_NR_getresgid:
10181         {
10182             gid_t rgid, egid, sgid;
10183             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10184             if (!is_error(ret)) {
10185                 if (put_user_id(high2lowgid(rgid), arg1)
10186                     || put_user_id(high2lowgid(egid), arg2)
10187                     || put_user_id(high2lowgid(sgid), arg3))
10188                     return -TARGET_EFAULT;
10189             }
10190         }
10191         return ret;
10192 #endif
10193 #ifdef TARGET_NR_chown
10194     case TARGET_NR_chown:
10195         if (!(p = lock_user_string(arg1)))
10196             return -TARGET_EFAULT;
10197         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10198         unlock_user(p, arg1, 0);
10199         return ret;
10200 #endif
10201     case TARGET_NR_setuid:
10202         return get_errno(sys_setuid(low2highuid(arg1)));
10203     case TARGET_NR_setgid:
10204         return get_errno(sys_setgid(low2highgid(arg1)));
10205     case TARGET_NR_setfsuid:
10206         return get_errno(setfsuid(arg1));
10207     case TARGET_NR_setfsgid:
10208         return get_errno(setfsgid(arg1));
10209 
10210 #ifdef TARGET_NR_lchown32
10211     case TARGET_NR_lchown32:
10212         if (!(p = lock_user_string(arg1)))
10213             return -TARGET_EFAULT;
10214         ret = get_errno(lchown(p, arg2, arg3));
10215         unlock_user(p, arg1, 0);
10216         return ret;
10217 #endif
10218 #ifdef TARGET_NR_getuid32
10219     case TARGET_NR_getuid32:
10220         return get_errno(getuid());
10221 #endif
10222 
10223 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10224    /* Alpha specific */
10225     case TARGET_NR_getxuid:
10226          {
10227             uid_t euid;
10228             euid=geteuid();
10229             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10230          }
10231         return get_errno(getuid());
10232 #endif
10233 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10234    /* Alpha specific */
10235     case TARGET_NR_getxgid:
10236          {
10237             uid_t egid;
10238             egid=getegid();
10239             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10240          }
10241         return get_errno(getgid());
10242 #endif
10243 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10244     /* Alpha specific */
10245     case TARGET_NR_osf_getsysinfo:
10246         ret = -TARGET_EOPNOTSUPP;
10247         switch (arg1) {
10248           case TARGET_GSI_IEEE_FP_CONTROL:
10249             {
10250                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10251                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10252 
10253                 swcr &= ~SWCR_STATUS_MASK;
10254                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10255 
10256                 if (put_user_u64 (swcr, arg2))
10257                         return -TARGET_EFAULT;
10258                 ret = 0;
10259             }
10260             break;
10261 
10262           /* case GSI_IEEE_STATE_AT_SIGNAL:
10263              -- Not implemented in linux kernel.
10264              case GSI_UACPROC:
10265              -- Retrieves current unaligned access state; not much used.
10266              case GSI_PROC_TYPE:
10267              -- Retrieves implver information; surely not used.
10268              case GSI_GET_HWRPB:
10269              -- Grabs a copy of the HWRPB; surely not used.
10270           */
10271         }
10272         return ret;
10273 #endif
10274 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10275     /* Alpha specific */
10276     case TARGET_NR_osf_setsysinfo:
10277         ret = -TARGET_EOPNOTSUPP;
10278         switch (arg1) {
10279           case TARGET_SSI_IEEE_FP_CONTROL:
10280             {
10281                 uint64_t swcr, fpcr;
10282 
10283                 if (get_user_u64 (swcr, arg2)) {
10284                     return -TARGET_EFAULT;
10285                 }
10286 
10287                 /*
10288                  * The kernel calls swcr_update_status to update the
10289                  * status bits from the fpcr at every point that it
10290                  * could be queried.  Therefore, we store the status
10291                  * bits only in FPCR.
10292                  */
10293                 ((CPUAlphaState *)cpu_env)->swcr
10294                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10295 
10296                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10297                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10298                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10299                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10300                 ret = 0;
10301             }
10302             break;
10303 
10304           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10305             {
10306                 uint64_t exc, fpcr, fex;
10307 
10308                 if (get_user_u64(exc, arg2)) {
10309                     return -TARGET_EFAULT;
10310                 }
10311                 exc &= SWCR_STATUS_MASK;
10312                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10313 
10314                 /* Old exceptions are not signaled.  */
10315                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10316                 fex = exc & ~fex;
10317                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10318                 fex &= ((CPUArchState *)cpu_env)->swcr;
10319 
10320                 /* Update the hardware fpcr.  */
10321                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10322                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10323 
10324                 if (fex) {
10325                     int si_code = TARGET_FPE_FLTUNK;
10326                     target_siginfo_t info;
10327 
10328                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10329                         si_code = TARGET_FPE_FLTUND;
10330                     }
10331                     if (fex & SWCR_TRAP_ENABLE_INE) {
10332                         si_code = TARGET_FPE_FLTRES;
10333                     }
10334                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10335                         si_code = TARGET_FPE_FLTUND;
10336                     }
10337                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10338                         si_code = TARGET_FPE_FLTOVF;
10339                     }
10340                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10341                         si_code = TARGET_FPE_FLTDIV;
10342                     }
10343                     if (fex & SWCR_TRAP_ENABLE_INV) {
10344                         si_code = TARGET_FPE_FLTINV;
10345                     }
10346 
10347                     info.si_signo = SIGFPE;
10348                     info.si_errno = 0;
10349                     info.si_code = si_code;
10350                     info._sifields._sigfault._addr
10351                         = ((CPUArchState *)cpu_env)->pc;
10352                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10353                                  QEMU_SI_FAULT, &info);
10354                 }
10355                 ret = 0;
10356             }
10357             break;
10358 
10359           /* case SSI_NVPAIRS:
10360              -- Used with SSIN_UACPROC to enable unaligned accesses.
10361              case SSI_IEEE_STATE_AT_SIGNAL:
10362              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10363              -- Not implemented in linux kernel
10364           */
10365         }
10366         return ret;
10367 #endif
10368 #ifdef TARGET_NR_osf_sigprocmask
10369     /* Alpha specific.  */
10370     case TARGET_NR_osf_sigprocmask:
10371         {
10372             abi_ulong mask;
10373             int how;
10374             sigset_t set, oldset;
10375 
10376             switch(arg1) {
10377             case TARGET_SIG_BLOCK:
10378                 how = SIG_BLOCK;
10379                 break;
10380             case TARGET_SIG_UNBLOCK:
10381                 how = SIG_UNBLOCK;
10382                 break;
10383             case TARGET_SIG_SETMASK:
10384                 how = SIG_SETMASK;
10385                 break;
10386             default:
10387                 return -TARGET_EINVAL;
10388             }
10389             mask = arg2;
10390             target_to_host_old_sigset(&set, &mask);
10391             ret = do_sigprocmask(how, &set, &oldset);
10392             if (!ret) {
10393                 host_to_target_old_sigset(&mask, &oldset);
10394                 ret = mask;
10395             }
10396         }
10397         return ret;
10398 #endif
10399 
10400 #ifdef TARGET_NR_getgid32
10401     case TARGET_NR_getgid32:
10402         return get_errno(getgid());
10403 #endif
10404 #ifdef TARGET_NR_geteuid32
10405     case TARGET_NR_geteuid32:
10406         return get_errno(geteuid());
10407 #endif
10408 #ifdef TARGET_NR_getegid32
10409     case TARGET_NR_getegid32:
10410         return get_errno(getegid());
10411 #endif
10412 #ifdef TARGET_NR_setreuid32
10413     case TARGET_NR_setreuid32:
10414         return get_errno(setreuid(arg1, arg2));
10415 #endif
10416 #ifdef TARGET_NR_setregid32
10417     case TARGET_NR_setregid32:
10418         return get_errno(setregid(arg1, arg2));
10419 #endif
10420 #ifdef TARGET_NR_getgroups32
10421     case TARGET_NR_getgroups32:
10422         {
10423             int gidsetsize = arg1;
10424             uint32_t *target_grouplist;
10425             gid_t *grouplist;
10426             int i;
10427 
10428             grouplist = alloca(gidsetsize * sizeof(gid_t));
10429             ret = get_errno(getgroups(gidsetsize, grouplist));
10430             if (gidsetsize == 0)
10431                 return ret;
10432             if (!is_error(ret)) {
10433                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10434                 if (!target_grouplist) {
10435                     return -TARGET_EFAULT;
10436                 }
10437                 for(i = 0;i < ret; i++)
10438                     target_grouplist[i] = tswap32(grouplist[i]);
10439                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10440             }
10441         }
10442         return ret;
10443 #endif
10444 #ifdef TARGET_NR_setgroups32
10445     case TARGET_NR_setgroups32:
10446         {
10447             int gidsetsize = arg1;
10448             uint32_t *target_grouplist;
10449             gid_t *grouplist;
10450             int i;
10451 
10452             grouplist = alloca(gidsetsize * sizeof(gid_t));
10453             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10454             if (!target_grouplist) {
10455                 return -TARGET_EFAULT;
10456             }
10457             for(i = 0;i < gidsetsize; i++)
10458                 grouplist[i] = tswap32(target_grouplist[i]);
10459             unlock_user(target_grouplist, arg2, 0);
10460             return get_errno(setgroups(gidsetsize, grouplist));
10461         }
10462 #endif
10463 #ifdef TARGET_NR_fchown32
10464     case TARGET_NR_fchown32:
10465         return get_errno(fchown(arg1, arg2, arg3));
10466 #endif
10467 #ifdef TARGET_NR_setresuid32
10468     case TARGET_NR_setresuid32:
10469         return get_errno(sys_setresuid(arg1, arg2, arg3));
10470 #endif
10471 #ifdef TARGET_NR_getresuid32
10472     case TARGET_NR_getresuid32:
10473         {
10474             uid_t ruid, euid, suid;
10475             ret = get_errno(getresuid(&ruid, &euid, &suid));
10476             if (!is_error(ret)) {
10477                 if (put_user_u32(ruid, arg1)
10478                     || put_user_u32(euid, arg2)
10479                     || put_user_u32(suid, arg3))
10480                     return -TARGET_EFAULT;
10481             }
10482         }
10483         return ret;
10484 #endif
10485 #ifdef TARGET_NR_setresgid32
10486     case TARGET_NR_setresgid32:
10487         return get_errno(sys_setresgid(arg1, arg2, arg3));
10488 #endif
10489 #ifdef TARGET_NR_getresgid32
10490     case TARGET_NR_getresgid32:
10491         {
10492             gid_t rgid, egid, sgid;
10493             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10494             if (!is_error(ret)) {
10495                 if (put_user_u32(rgid, arg1)
10496                     || put_user_u32(egid, arg2)
10497                     || put_user_u32(sgid, arg3))
10498                     return -TARGET_EFAULT;
10499             }
10500         }
10501         return ret;
10502 #endif
10503 #ifdef TARGET_NR_chown32
10504     case TARGET_NR_chown32:
10505         if (!(p = lock_user_string(arg1)))
10506             return -TARGET_EFAULT;
10507         ret = get_errno(chown(p, arg2, arg3));
10508         unlock_user(p, arg1, 0);
10509         return ret;
10510 #endif
10511 #ifdef TARGET_NR_setuid32
10512     case TARGET_NR_setuid32:
10513         return get_errno(sys_setuid(arg1));
10514 #endif
10515 #ifdef TARGET_NR_setgid32
10516     case TARGET_NR_setgid32:
10517         return get_errno(sys_setgid(arg1));
10518 #endif
10519 #ifdef TARGET_NR_setfsuid32
10520     case TARGET_NR_setfsuid32:
10521         return get_errno(setfsuid(arg1));
10522 #endif
10523 #ifdef TARGET_NR_setfsgid32
10524     case TARGET_NR_setfsgid32:
10525         return get_errno(setfsgid(arg1));
10526 #endif
10527 #ifdef TARGET_NR_mincore
10528     case TARGET_NR_mincore:
10529         {
10530             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10531             if (!a) {
10532                 return -TARGET_ENOMEM;
10533             }
10534             p = lock_user_string(arg3);
10535             if (!p) {
10536                 ret = -TARGET_EFAULT;
10537             } else {
10538                 ret = get_errno(mincore(a, arg2, p));
10539                 unlock_user(p, arg3, ret);
10540             }
10541             unlock_user(a, arg1, 0);
10542         }
10543         return ret;
10544 #endif
10545 #ifdef TARGET_NR_arm_fadvise64_64
10546     case TARGET_NR_arm_fadvise64_64:
10547         /* arm_fadvise64_64 looks like fadvise64_64 but
10548          * with different argument order: fd, advice, offset, len
10549          * rather than the usual fd, offset, len, advice.
10550          * Note that offset and len are both 64-bit so appear as
10551          * pairs of 32-bit registers.
10552          */
10553         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10554                             target_offset64(arg5, arg6), arg2);
10555         return -host_to_target_errno(ret);
10556 #endif
10557 
10558 #if TARGET_ABI_BITS == 32
10559 
10560 #ifdef TARGET_NR_fadvise64_64
10561     case TARGET_NR_fadvise64_64:
10562 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10563         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10564         ret = arg2;
10565         arg2 = arg3;
10566         arg3 = arg4;
10567         arg4 = arg5;
10568         arg5 = arg6;
10569         arg6 = ret;
10570 #else
10571         /* 6 args: fd, offset (high, low), len (high, low), advice */
10572         if (regpairs_aligned(cpu_env, num)) {
10573             /* offset is in (3,4), len in (5,6) and advice in 7 */
10574             arg2 = arg3;
10575             arg3 = arg4;
10576             arg4 = arg5;
10577             arg5 = arg6;
10578             arg6 = arg7;
10579         }
10580 #endif
10581         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10582                             target_offset64(arg4, arg5), arg6);
10583         return -host_to_target_errno(ret);
10584 #endif
10585 
10586 #ifdef TARGET_NR_fadvise64
10587     case TARGET_NR_fadvise64:
10588         /* 5 args: fd, offset (high, low), len, advice */
10589         if (regpairs_aligned(cpu_env, num)) {
10590             /* offset is in (3,4), len in 5 and advice in 6 */
10591             arg2 = arg3;
10592             arg3 = arg4;
10593             arg4 = arg5;
10594             arg5 = arg6;
10595         }
10596         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10597         return -host_to_target_errno(ret);
10598 #endif
10599 
10600 #else /* not a 32-bit ABI */
10601 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10602 #ifdef TARGET_NR_fadvise64_64
10603     case TARGET_NR_fadvise64_64:
10604 #endif
10605 #ifdef TARGET_NR_fadvise64
10606     case TARGET_NR_fadvise64:
10607 #endif
10608 #ifdef TARGET_S390X
10609         switch (arg4) {
10610         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10611         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10612         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10613         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10614         default: break;
10615         }
10616 #endif
10617         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10618 #endif
10619 #endif /* end of 64-bit ABI fadvise handling */
10620 
10621 #ifdef TARGET_NR_madvise
10622     case TARGET_NR_madvise:
10623         /* A straight passthrough may not be safe because qemu sometimes
10624            turns private file-backed mappings into anonymous mappings.
10625            This will break MADV_DONTNEED.
10626            This is a hint, so ignoring and returning success is ok.  */
10627         return 0;
10628 #endif
10629 #if TARGET_ABI_BITS == 32
10630     case TARGET_NR_fcntl64:
10631     {
10632 	int cmd;
10633 	struct flock64 fl;
10634         from_flock64_fn *copyfrom = copy_from_user_flock64;
10635         to_flock64_fn *copyto = copy_to_user_flock64;
10636 
10637 #ifdef TARGET_ARM
10638         if (!((CPUARMState *)cpu_env)->eabi) {
10639             copyfrom = copy_from_user_oabi_flock64;
10640             copyto = copy_to_user_oabi_flock64;
10641         }
10642 #endif
10643 
10644 	cmd = target_to_host_fcntl_cmd(arg2);
10645         if (cmd == -TARGET_EINVAL) {
10646             return cmd;
10647         }
10648 
10649         switch(arg2) {
10650         case TARGET_F_GETLK64:
10651             ret = copyfrom(&fl, arg3);
10652             if (ret) {
10653                 break;
10654             }
10655             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10656             if (ret == 0) {
10657                 ret = copyto(arg3, &fl);
10658             }
10659 	    break;
10660 
10661         case TARGET_F_SETLK64:
10662         case TARGET_F_SETLKW64:
10663             ret = copyfrom(&fl, arg3);
10664             if (ret) {
10665                 break;
10666             }
10667             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10668 	    break;
10669         default:
10670             ret = do_fcntl(arg1, arg2, arg3);
10671             break;
10672         }
10673         return ret;
10674     }
10675 #endif
10676 #ifdef TARGET_NR_cacheflush
10677     case TARGET_NR_cacheflush:
10678         /* self-modifying code is handled automatically, so nothing needed */
10679         return 0;
10680 #endif
10681 #ifdef TARGET_NR_getpagesize
10682     case TARGET_NR_getpagesize:
10683         return TARGET_PAGE_SIZE;
10684 #endif
10685     case TARGET_NR_gettid:
10686         return get_errno(sys_gettid());
10687 #ifdef TARGET_NR_readahead
10688     case TARGET_NR_readahead:
10689 #if TARGET_ABI_BITS == 32
10690         if (regpairs_aligned(cpu_env, num)) {
10691             arg2 = arg3;
10692             arg3 = arg4;
10693             arg4 = arg5;
10694         }
10695         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10696 #else
10697         ret = get_errno(readahead(arg1, arg2, arg3));
10698 #endif
10699         return ret;
10700 #endif
10701 #ifdef CONFIG_ATTR
10702 #ifdef TARGET_NR_setxattr
10703     case TARGET_NR_listxattr:
10704     case TARGET_NR_llistxattr:
10705     {
10706         void *p, *b = 0;
10707         if (arg2) {
10708             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10709             if (!b) {
10710                 return -TARGET_EFAULT;
10711             }
10712         }
10713         p = lock_user_string(arg1);
10714         if (p) {
10715             if (num == TARGET_NR_listxattr) {
10716                 ret = get_errno(listxattr(p, b, arg3));
10717             } else {
10718                 ret = get_errno(llistxattr(p, b, arg3));
10719             }
10720         } else {
10721             ret = -TARGET_EFAULT;
10722         }
10723         unlock_user(p, arg1, 0);
10724         unlock_user(b, arg2, arg3);
10725         return ret;
10726     }
10727     case TARGET_NR_flistxattr:
10728     {
10729         void *b = 0;
10730         if (arg2) {
10731             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10732             if (!b) {
10733                 return -TARGET_EFAULT;
10734             }
10735         }
10736         ret = get_errno(flistxattr(arg1, b, arg3));
10737         unlock_user(b, arg2, arg3);
10738         return ret;
10739     }
10740     case TARGET_NR_setxattr:
10741     case TARGET_NR_lsetxattr:
10742         {
10743             void *p, *n, *v = 0;
10744             if (arg3) {
10745                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10746                 if (!v) {
10747                     return -TARGET_EFAULT;
10748                 }
10749             }
10750             p = lock_user_string(arg1);
10751             n = lock_user_string(arg2);
10752             if (p && n) {
10753                 if (num == TARGET_NR_setxattr) {
10754                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10755                 } else {
10756                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10757                 }
10758             } else {
10759                 ret = -TARGET_EFAULT;
10760             }
10761             unlock_user(p, arg1, 0);
10762             unlock_user(n, arg2, 0);
10763             unlock_user(v, arg3, 0);
10764         }
10765         return ret;
10766     case TARGET_NR_fsetxattr:
10767         {
10768             void *n, *v = 0;
10769             if (arg3) {
10770                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10771                 if (!v) {
10772                     return -TARGET_EFAULT;
10773                 }
10774             }
10775             n = lock_user_string(arg2);
10776             if (n) {
10777                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10778             } else {
10779                 ret = -TARGET_EFAULT;
10780             }
10781             unlock_user(n, arg2, 0);
10782             unlock_user(v, arg3, 0);
10783         }
10784         return ret;
10785     case TARGET_NR_getxattr:
10786     case TARGET_NR_lgetxattr:
10787         {
10788             void *p, *n, *v = 0;
10789             if (arg3) {
10790                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10791                 if (!v) {
10792                     return -TARGET_EFAULT;
10793                 }
10794             }
10795             p = lock_user_string(arg1);
10796             n = lock_user_string(arg2);
10797             if (p && n) {
10798                 if (num == TARGET_NR_getxattr) {
10799                     ret = get_errno(getxattr(p, n, v, arg4));
10800                 } else {
10801                     ret = get_errno(lgetxattr(p, n, v, arg4));
10802                 }
10803             } else {
10804                 ret = -TARGET_EFAULT;
10805             }
10806             unlock_user(p, arg1, 0);
10807             unlock_user(n, arg2, 0);
10808             unlock_user(v, arg3, arg4);
10809         }
10810         return ret;
10811     case TARGET_NR_fgetxattr:
10812         {
10813             void *n, *v = 0;
10814             if (arg3) {
10815                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10816                 if (!v) {
10817                     return -TARGET_EFAULT;
10818                 }
10819             }
10820             n = lock_user_string(arg2);
10821             if (n) {
10822                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10823             } else {
10824                 ret = -TARGET_EFAULT;
10825             }
10826             unlock_user(n, arg2, 0);
10827             unlock_user(v, arg3, arg4);
10828         }
10829         return ret;
10830     case TARGET_NR_removexattr:
10831     case TARGET_NR_lremovexattr:
10832         {
10833             void *p, *n;
10834             p = lock_user_string(arg1);
10835             n = lock_user_string(arg2);
10836             if (p && n) {
10837                 if (num == TARGET_NR_removexattr) {
10838                     ret = get_errno(removexattr(p, n));
10839                 } else {
10840                     ret = get_errno(lremovexattr(p, n));
10841                 }
10842             } else {
10843                 ret = -TARGET_EFAULT;
10844             }
10845             unlock_user(p, arg1, 0);
10846             unlock_user(n, arg2, 0);
10847         }
10848         return ret;
10849     case TARGET_NR_fremovexattr:
10850         {
10851             void *n;
10852             n = lock_user_string(arg2);
10853             if (n) {
10854                 ret = get_errno(fremovexattr(arg1, n));
10855             } else {
10856                 ret = -TARGET_EFAULT;
10857             }
10858             unlock_user(n, arg2, 0);
10859         }
10860         return ret;
10861 #endif
10862 #endif /* CONFIG_ATTR */
10863 #ifdef TARGET_NR_set_thread_area
10864     case TARGET_NR_set_thread_area:
10865 #if defined(TARGET_MIPS)
10866       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10867       return 0;
10868 #elif defined(TARGET_CRIS)
10869       if (arg1 & 0xff)
10870           ret = -TARGET_EINVAL;
10871       else {
10872           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10873           ret = 0;
10874       }
10875       return ret;
10876 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10877       return do_set_thread_area(cpu_env, arg1);
10878 #elif defined(TARGET_M68K)
10879       {
10880           TaskState *ts = cpu->opaque;
10881           ts->tp_value = arg1;
10882           return 0;
10883       }
10884 #else
10885       return -TARGET_ENOSYS;
10886 #endif
10887 #endif
10888 #ifdef TARGET_NR_get_thread_area
10889     case TARGET_NR_get_thread_area:
10890 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10891         return do_get_thread_area(cpu_env, arg1);
10892 #elif defined(TARGET_M68K)
10893         {
10894             TaskState *ts = cpu->opaque;
10895             return ts->tp_value;
10896         }
10897 #else
10898         return -TARGET_ENOSYS;
10899 #endif
10900 #endif
10901 #ifdef TARGET_NR_getdomainname
10902     case TARGET_NR_getdomainname:
10903         return -TARGET_ENOSYS;
10904 #endif
10905 
10906 #ifdef TARGET_NR_clock_settime
10907     case TARGET_NR_clock_settime:
10908     {
10909         struct timespec ts;
10910 
10911         ret = target_to_host_timespec(&ts, arg2);
10912         if (!is_error(ret)) {
10913             ret = get_errno(clock_settime(arg1, &ts));
10914         }
10915         return ret;
10916     }
10917 #endif
10918 #ifdef TARGET_NR_clock_gettime
10919     case TARGET_NR_clock_gettime:
10920     {
10921         struct timespec ts;
10922         ret = get_errno(clock_gettime(arg1, &ts));
10923         if (!is_error(ret)) {
10924             ret = host_to_target_timespec(arg2, &ts);
10925         }
10926         return ret;
10927     }
10928 #endif
10929 #ifdef TARGET_NR_clock_getres
10930     case TARGET_NR_clock_getres:
10931     {
10932         struct timespec ts;
10933         ret = get_errno(clock_getres(arg1, &ts));
10934         if (!is_error(ret)) {
10935             host_to_target_timespec(arg2, &ts);
10936         }
10937         return ret;
10938     }
10939 #endif
10940 #ifdef TARGET_NR_clock_nanosleep
10941     case TARGET_NR_clock_nanosleep:
10942     {
10943         struct timespec ts;
10944         target_to_host_timespec(&ts, arg3);
10945         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10946                                              &ts, arg4 ? &ts : NULL));
10947         if (arg4)
10948             host_to_target_timespec(arg4, &ts);
10949 
10950 #if defined(TARGET_PPC)
10951         /* clock_nanosleep is odd in that it returns positive errno values.
10952          * On PPC, CR0 bit 3 should be set in such a situation. */
10953         if (ret && ret != -TARGET_ERESTARTSYS) {
10954             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10955         }
10956 #endif
10957         return ret;
10958     }
10959 #endif
10960 
10961 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10962     case TARGET_NR_set_tid_address:
10963         return get_errno(set_tid_address((int *)g2h(arg1)));
10964 #endif
10965 
10966     case TARGET_NR_tkill:
10967         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10968 
10969     case TARGET_NR_tgkill:
10970         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10971                          target_to_host_signal(arg3)));
10972 
10973 #ifdef TARGET_NR_set_robust_list
10974     case TARGET_NR_set_robust_list:
10975     case TARGET_NR_get_robust_list:
10976         /* The ABI for supporting robust futexes has userspace pass
10977          * the kernel a pointer to a linked list which is updated by
10978          * userspace after the syscall; the list is walked by the kernel
10979          * when the thread exits. Since the linked list in QEMU guest
10980          * memory isn't a valid linked list for the host and we have
10981          * no way to reliably intercept the thread-death event, we can't
10982          * support these. Silently return ENOSYS so that guest userspace
10983          * falls back to a non-robust futex implementation (which should
10984          * be OK except in the corner case of the guest crashing while
10985          * holding a mutex that is shared with another process via
10986          * shared memory).
10987          */
10988         return -TARGET_ENOSYS;
10989 #endif
10990 
10991 #if defined(TARGET_NR_utimensat)
10992     case TARGET_NR_utimensat:
10993         {
10994             struct timespec *tsp, ts[2];
10995             if (!arg3) {
10996                 tsp = NULL;
10997             } else {
10998                 target_to_host_timespec(ts, arg3);
10999                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11000                 tsp = ts;
11001             }
11002             if (!arg2)
11003                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11004             else {
11005                 if (!(p = lock_user_string(arg2))) {
11006                     return -TARGET_EFAULT;
11007                 }
11008                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11009                 unlock_user(p, arg2, 0);
11010             }
11011         }
11012         return ret;
11013 #endif
11014     case TARGET_NR_futex:
11015         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11016 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11017     case TARGET_NR_inotify_init:
11018         ret = get_errno(sys_inotify_init());
11019         if (ret >= 0) {
11020             fd_trans_register(ret, &target_inotify_trans);
11021         }
11022         return ret;
11023 #endif
11024 #ifdef CONFIG_INOTIFY1
11025 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11026     case TARGET_NR_inotify_init1:
11027         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11028                                           fcntl_flags_tbl)));
11029         if (ret >= 0) {
11030             fd_trans_register(ret, &target_inotify_trans);
11031         }
11032         return ret;
11033 #endif
11034 #endif
11035 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11036     case TARGET_NR_inotify_add_watch:
11037         p = lock_user_string(arg2);
11038         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11039         unlock_user(p, arg2, 0);
11040         return ret;
11041 #endif
11042 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11043     case TARGET_NR_inotify_rm_watch:
11044         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11045 #endif
11046 
11047 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11048     case TARGET_NR_mq_open:
11049         {
11050             struct mq_attr posix_mq_attr;
11051             struct mq_attr *pposix_mq_attr;
11052             int host_flags;
11053 
11054             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11055             pposix_mq_attr = NULL;
11056             if (arg4) {
11057                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11058                     return -TARGET_EFAULT;
11059                 }
11060                 pposix_mq_attr = &posix_mq_attr;
11061             }
11062             p = lock_user_string(arg1 - 1);
11063             if (!p) {
11064                 return -TARGET_EFAULT;
11065             }
11066             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11067             unlock_user (p, arg1, 0);
11068         }
11069         return ret;
11070 
11071     case TARGET_NR_mq_unlink:
11072         p = lock_user_string(arg1 - 1);
11073         if (!p) {
11074             return -TARGET_EFAULT;
11075         }
11076         ret = get_errno(mq_unlink(p));
11077         unlock_user (p, arg1, 0);
11078         return ret;
11079 
11080     case TARGET_NR_mq_timedsend:
11081         {
11082             struct timespec ts;
11083 
11084             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11085             if (arg5 != 0) {
11086                 target_to_host_timespec(&ts, arg5);
11087                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11088                 host_to_target_timespec(arg5, &ts);
11089             } else {
11090                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11091             }
11092             unlock_user (p, arg2, arg3);
11093         }
11094         return ret;
11095 
11096     case TARGET_NR_mq_timedreceive:
11097         {
11098             struct timespec ts;
11099             unsigned int prio;
11100 
11101             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11102             if (arg5 != 0) {
11103                 target_to_host_timespec(&ts, arg5);
11104                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11105                                                      &prio, &ts));
11106                 host_to_target_timespec(arg5, &ts);
11107             } else {
11108                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11109                                                      &prio, NULL));
11110             }
11111             unlock_user (p, arg2, arg3);
11112             if (arg4 != 0)
11113                 put_user_u32(prio, arg4);
11114         }
11115         return ret;
11116 
11117     /* Not implemented for now... */
11118 /*     case TARGET_NR_mq_notify: */
11119 /*         break; */
11120 
11121     case TARGET_NR_mq_getsetattr:
11122         {
11123             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11124             ret = 0;
11125             if (arg2 != 0) {
11126                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11127                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11128                                            &posix_mq_attr_out));
11129             } else if (arg3 != 0) {
11130                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11131             }
11132             if (ret == 0 && arg3 != 0) {
11133                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11134             }
11135         }
11136         return ret;
11137 #endif
11138 
11139 #ifdef CONFIG_SPLICE
11140 #ifdef TARGET_NR_tee
11141     case TARGET_NR_tee:
11142         {
11143             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11144         }
11145         return ret;
11146 #endif
11147 #ifdef TARGET_NR_splice
11148     case TARGET_NR_splice:
11149         {
11150             loff_t loff_in, loff_out;
11151             loff_t *ploff_in = NULL, *ploff_out = NULL;
11152             if (arg2) {
11153                 if (get_user_u64(loff_in, arg2)) {
11154                     return -TARGET_EFAULT;
11155                 }
11156                 ploff_in = &loff_in;
11157             }
11158             if (arg4) {
11159                 if (get_user_u64(loff_out, arg4)) {
11160                     return -TARGET_EFAULT;
11161                 }
11162                 ploff_out = &loff_out;
11163             }
11164             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11165             if (arg2) {
11166                 if (put_user_u64(loff_in, arg2)) {
11167                     return -TARGET_EFAULT;
11168                 }
11169             }
11170             if (arg4) {
11171                 if (put_user_u64(loff_out, arg4)) {
11172                     return -TARGET_EFAULT;
11173                 }
11174             }
11175         }
11176         return ret;
11177 #endif
11178 #ifdef TARGET_NR_vmsplice
11179 	case TARGET_NR_vmsplice:
11180         {
11181             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11182             if (vec != NULL) {
11183                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11184                 unlock_iovec(vec, arg2, arg3, 0);
11185             } else {
11186                 ret = -host_to_target_errno(errno);
11187             }
11188         }
11189         return ret;
11190 #endif
11191 #endif /* CONFIG_SPLICE */
11192 #ifdef CONFIG_EVENTFD
11193 #if defined(TARGET_NR_eventfd)
11194     case TARGET_NR_eventfd:
11195         ret = get_errno(eventfd(arg1, 0));
11196         if (ret >= 0) {
11197             fd_trans_register(ret, &target_eventfd_trans);
11198         }
11199         return ret;
11200 #endif
11201 #if defined(TARGET_NR_eventfd2)
11202     case TARGET_NR_eventfd2:
11203     {
11204         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11205         if (arg2 & TARGET_O_NONBLOCK) {
11206             host_flags |= O_NONBLOCK;
11207         }
11208         if (arg2 & TARGET_O_CLOEXEC) {
11209             host_flags |= O_CLOEXEC;
11210         }
11211         ret = get_errno(eventfd(arg1, host_flags));
11212         if (ret >= 0) {
11213             fd_trans_register(ret, &target_eventfd_trans);
11214         }
11215         return ret;
11216     }
11217 #endif
11218 #endif /* CONFIG_EVENTFD  */
11219 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11220     case TARGET_NR_fallocate:
11221 #if TARGET_ABI_BITS == 32
11222         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11223                                   target_offset64(arg5, arg6)));
11224 #else
11225         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11226 #endif
11227         return ret;
11228 #endif
11229 #if defined(CONFIG_SYNC_FILE_RANGE)
11230 #if defined(TARGET_NR_sync_file_range)
11231     case TARGET_NR_sync_file_range:
11232 #if TARGET_ABI_BITS == 32
11233 #if defined(TARGET_MIPS)
11234         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11235                                         target_offset64(arg5, arg6), arg7));
11236 #else
11237         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11238                                         target_offset64(arg4, arg5), arg6));
11239 #endif /* !TARGET_MIPS */
11240 #else
11241         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11242 #endif
11243         return ret;
11244 #endif
11245 #if defined(TARGET_NR_sync_file_range2)
11246     case TARGET_NR_sync_file_range2:
11247         /* This is like sync_file_range but the arguments are reordered */
11248 #if TARGET_ABI_BITS == 32
11249         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11250                                         target_offset64(arg5, arg6), arg2));
11251 #else
11252         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11253 #endif
11254         return ret;
11255 #endif
11256 #endif
11257 #if defined(TARGET_NR_signalfd4)
11258     case TARGET_NR_signalfd4:
11259         return do_signalfd4(arg1, arg2, arg4);
11260 #endif
11261 #if defined(TARGET_NR_signalfd)
11262     case TARGET_NR_signalfd:
11263         return do_signalfd4(arg1, arg2, 0);
11264 #endif
11265 #if defined(CONFIG_EPOLL)
11266 #if defined(TARGET_NR_epoll_create)
11267     case TARGET_NR_epoll_create:
11268         return get_errno(epoll_create(arg1));
11269 #endif
11270 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11271     case TARGET_NR_epoll_create1:
11272         return get_errno(epoll_create1(arg1));
11273 #endif
11274 #if defined(TARGET_NR_epoll_ctl)
11275     case TARGET_NR_epoll_ctl:
11276     {
11277         struct epoll_event ep;
11278         struct epoll_event *epp = 0;
11279         if (arg4) {
11280             struct target_epoll_event *target_ep;
11281             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11282                 return -TARGET_EFAULT;
11283             }
11284             ep.events = tswap32(target_ep->events);
11285             /* The epoll_data_t union is just opaque data to the kernel,
11286              * so we transfer all 64 bits across and need not worry what
11287              * actual data type it is.
11288              */
11289             ep.data.u64 = tswap64(target_ep->data.u64);
11290             unlock_user_struct(target_ep, arg4, 0);
11291             epp = &ep;
11292         }
11293         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11294     }
11295 #endif
11296 
11297 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11298 #if defined(TARGET_NR_epoll_wait)
11299     case TARGET_NR_epoll_wait:
11300 #endif
11301 #if defined(TARGET_NR_epoll_pwait)
11302     case TARGET_NR_epoll_pwait:
11303 #endif
11304     {
11305         struct target_epoll_event *target_ep;
11306         struct epoll_event *ep;
11307         int epfd = arg1;
11308         int maxevents = arg3;
11309         int timeout = arg4;
11310 
11311         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11312             return -TARGET_EINVAL;
11313         }
11314 
11315         target_ep = lock_user(VERIFY_WRITE, arg2,
11316                               maxevents * sizeof(struct target_epoll_event), 1);
11317         if (!target_ep) {
11318             return -TARGET_EFAULT;
11319         }
11320 
11321         ep = g_try_new(struct epoll_event, maxevents);
11322         if (!ep) {
11323             unlock_user(target_ep, arg2, 0);
11324             return -TARGET_ENOMEM;
11325         }
11326 
11327         switch (num) {
11328 #if defined(TARGET_NR_epoll_pwait)
11329         case TARGET_NR_epoll_pwait:
11330         {
11331             target_sigset_t *target_set;
11332             sigset_t _set, *set = &_set;
11333 
11334             if (arg5) {
11335                 if (arg6 != sizeof(target_sigset_t)) {
11336                     ret = -TARGET_EINVAL;
11337                     break;
11338                 }
11339 
11340                 target_set = lock_user(VERIFY_READ, arg5,
11341                                        sizeof(target_sigset_t), 1);
11342                 if (!target_set) {
11343                     ret = -TARGET_EFAULT;
11344                     break;
11345                 }
11346                 target_to_host_sigset(set, target_set);
11347                 unlock_user(target_set, arg5, 0);
11348             } else {
11349                 set = NULL;
11350             }
11351 
11352             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11353                                              set, SIGSET_T_SIZE));
11354             break;
11355         }
11356 #endif
11357 #if defined(TARGET_NR_epoll_wait)
11358         case TARGET_NR_epoll_wait:
11359             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11360                                              NULL, 0));
11361             break;
11362 #endif
11363         default:
11364             ret = -TARGET_ENOSYS;
11365         }
11366         if (!is_error(ret)) {
11367             int i;
11368             for (i = 0; i < ret; i++) {
11369                 target_ep[i].events = tswap32(ep[i].events);
11370                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11371             }
11372             unlock_user(target_ep, arg2,
11373                         ret * sizeof(struct target_epoll_event));
11374         } else {
11375             unlock_user(target_ep, arg2, 0);
11376         }
11377         g_free(ep);
11378         return ret;
11379     }
11380 #endif
11381 #endif
11382 #ifdef TARGET_NR_prlimit64
11383     case TARGET_NR_prlimit64:
11384     {
11385         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11386         struct target_rlimit64 *target_rnew, *target_rold;
11387         struct host_rlimit64 rnew, rold, *rnewp = 0;
11388         int resource = target_to_host_resource(arg2);
11389         if (arg3) {
11390             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11391                 return -TARGET_EFAULT;
11392             }
11393             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11394             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11395             unlock_user_struct(target_rnew, arg3, 0);
11396             rnewp = &rnew;
11397         }
11398 
11399         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11400         if (!is_error(ret) && arg4) {
11401             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11402                 return -TARGET_EFAULT;
11403             }
11404             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11405             target_rold->rlim_max = tswap64(rold.rlim_max);
11406             unlock_user_struct(target_rold, arg4, 1);
11407         }
11408         return ret;
11409     }
11410 #endif
11411 #ifdef TARGET_NR_gethostname
11412     case TARGET_NR_gethostname:
11413     {
11414         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11415         if (name) {
11416             ret = get_errno(gethostname(name, arg2));
11417             unlock_user(name, arg1, arg2);
11418         } else {
11419             ret = -TARGET_EFAULT;
11420         }
11421         return ret;
11422     }
11423 #endif
11424 #ifdef TARGET_NR_atomic_cmpxchg_32
11425     case TARGET_NR_atomic_cmpxchg_32:
11426     {
11427         /* should use start_exclusive from main.c */
11428         abi_ulong mem_value;
11429         if (get_user_u32(mem_value, arg6)) {
11430             target_siginfo_t info;
11431             info.si_signo = SIGSEGV;
11432             info.si_errno = 0;
11433             info.si_code = TARGET_SEGV_MAPERR;
11434             info._sifields._sigfault._addr = arg6;
11435             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11436                          QEMU_SI_FAULT, &info);
11437             ret = 0xdeadbeef;
11438 
11439         }
11440         if (mem_value == arg2)
11441             put_user_u32(arg1, arg6);
11442         return mem_value;
11443     }
11444 #endif
11445 #ifdef TARGET_NR_atomic_barrier
11446     case TARGET_NR_atomic_barrier:
11447         /* Like the kernel implementation and the
11448            qemu arm barrier, no-op this? */
11449         return 0;
11450 #endif
11451 
11452 #ifdef TARGET_NR_timer_create
11453     case TARGET_NR_timer_create:
11454     {
11455         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11456 
11457         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11458 
11459         int clkid = arg1;
11460         int timer_index = next_free_host_timer();
11461 
11462         if (timer_index < 0) {
11463             ret = -TARGET_EAGAIN;
11464         } else {
11465             timer_t *phtimer = g_posix_timers  + timer_index;
11466 
11467             if (arg2) {
11468                 phost_sevp = &host_sevp;
11469                 ret = target_to_host_sigevent(phost_sevp, arg2);
11470                 if (ret != 0) {
11471                     return ret;
11472                 }
11473             }
11474 
11475             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11476             if (ret) {
11477                 phtimer = NULL;
11478             } else {
11479                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11480                     return -TARGET_EFAULT;
11481                 }
11482             }
11483         }
11484         return ret;
11485     }
11486 #endif
11487 
11488 #ifdef TARGET_NR_timer_settime
11489     case TARGET_NR_timer_settime:
11490     {
11491         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11492          * struct itimerspec * old_value */
11493         target_timer_t timerid = get_timer_id(arg1);
11494 
11495         if (timerid < 0) {
11496             ret = timerid;
11497         } else if (arg3 == 0) {
11498             ret = -TARGET_EINVAL;
11499         } else {
11500             timer_t htimer = g_posix_timers[timerid];
11501             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11502 
11503             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11504                 return -TARGET_EFAULT;
11505             }
11506             ret = get_errno(
11507                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11508             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11509                 return -TARGET_EFAULT;
11510             }
11511         }
11512         return ret;
11513     }
11514 #endif
11515 
11516 #ifdef TARGET_NR_timer_gettime
11517     case TARGET_NR_timer_gettime:
11518     {
11519         /* args: timer_t timerid, struct itimerspec *curr_value */
11520         target_timer_t timerid = get_timer_id(arg1);
11521 
11522         if (timerid < 0) {
11523             ret = timerid;
11524         } else if (!arg2) {
11525             ret = -TARGET_EFAULT;
11526         } else {
11527             timer_t htimer = g_posix_timers[timerid];
11528             struct itimerspec hspec;
11529             ret = get_errno(timer_gettime(htimer, &hspec));
11530 
11531             if (host_to_target_itimerspec(arg2, &hspec)) {
11532                 ret = -TARGET_EFAULT;
11533             }
11534         }
11535         return ret;
11536     }
11537 #endif
11538 
11539 #ifdef TARGET_NR_timer_getoverrun
11540     case TARGET_NR_timer_getoverrun:
11541     {
11542         /* args: timer_t timerid */
11543         target_timer_t timerid = get_timer_id(arg1);
11544 
11545         if (timerid < 0) {
11546             ret = timerid;
11547         } else {
11548             timer_t htimer = g_posix_timers[timerid];
11549             ret = get_errno(timer_getoverrun(htimer));
11550         }
11551         fd_trans_unregister(ret);
11552         return ret;
11553     }
11554 #endif
11555 
11556 #ifdef TARGET_NR_timer_delete
11557     case TARGET_NR_timer_delete:
11558     {
11559         /* args: timer_t timerid */
11560         target_timer_t timerid = get_timer_id(arg1);
11561 
11562         if (timerid < 0) {
11563             ret = timerid;
11564         } else {
11565             timer_t htimer = g_posix_timers[timerid];
11566             ret = get_errno(timer_delete(htimer));
11567             g_posix_timers[timerid] = 0;
11568         }
11569         return ret;
11570     }
11571 #endif
11572 
11573 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11574     case TARGET_NR_timerfd_create:
11575         return get_errno(timerfd_create(arg1,
11576                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11577 #endif
11578 
11579 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11580     case TARGET_NR_timerfd_gettime:
11581         {
11582             struct itimerspec its_curr;
11583 
11584             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11585 
11586             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11587                 return -TARGET_EFAULT;
11588             }
11589         }
11590         return ret;
11591 #endif
11592 
11593 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11594     case TARGET_NR_timerfd_settime:
11595         {
11596             struct itimerspec its_new, its_old, *p_new;
11597 
11598             if (arg3) {
11599                 if (target_to_host_itimerspec(&its_new, arg3)) {
11600                     return -TARGET_EFAULT;
11601                 }
11602                 p_new = &its_new;
11603             } else {
11604                 p_new = NULL;
11605             }
11606 
11607             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11608 
11609             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11610                 return -TARGET_EFAULT;
11611             }
11612         }
11613         return ret;
11614 #endif
11615 
11616 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11617     case TARGET_NR_ioprio_get:
11618         return get_errno(ioprio_get(arg1, arg2));
11619 #endif
11620 
11621 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11622     case TARGET_NR_ioprio_set:
11623         return get_errno(ioprio_set(arg1, arg2, arg3));
11624 #endif
11625 
11626 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11627     case TARGET_NR_setns:
11628         return get_errno(setns(arg1, arg2));
11629 #endif
11630 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11631     case TARGET_NR_unshare:
11632         return get_errno(unshare(arg1));
11633 #endif
11634 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11635     case TARGET_NR_kcmp:
11636         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11637 #endif
11638 #ifdef TARGET_NR_swapcontext
11639     case TARGET_NR_swapcontext:
11640         /* PowerPC specific.  */
11641         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11642 #endif
11643 
11644     default:
11645         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11646         return -TARGET_ENOSYS;
11647     }
11648     return ret;
11649 }
11650 
11651 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11652                     abi_long arg2, abi_long arg3, abi_long arg4,
11653                     abi_long arg5, abi_long arg6, abi_long arg7,
11654                     abi_long arg8)
11655 {
11656     CPUState *cpu = ENV_GET_CPU(cpu_env);
11657     abi_long ret;
11658 
11659 #ifdef DEBUG_ERESTARTSYS
11660     /* Debug-only code for exercising the syscall-restart code paths
11661      * in the per-architecture cpu main loops: restart every syscall
11662      * the guest makes once before letting it through.
11663      */
11664     {
11665         static bool flag;
11666         flag = !flag;
11667         if (flag) {
11668             return -TARGET_ERESTARTSYS;
11669         }
11670     }
11671 #endif
11672 
11673     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11674                              arg5, arg6, arg7, arg8);
11675 
11676     if (unlikely(do_strace)) {
11677         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11678         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11679                           arg5, arg6, arg7, arg8);
11680         print_syscall_ret(num, ret);
11681     } else {
11682         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11683                           arg5, arg6, arg7, arg8);
11684     }
11685 
11686     trace_guest_user_syscall_ret(cpu, num, ret);
11687     return ret;
11688 }
11689