xref: /openbmc/qemu/linux-user/syscall.c (revision 745a4f5e)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 static int sys_getcwd1(char *buf, size_t size)
364 {
365   if (getcwd(buf, size) == NULL) {
366       /* getcwd() sets errno */
367       return (-1);
368   }
369   return strlen(buf)+1;
370 }
371 
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376           const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379                          const struct timespec times[2], int flags)
380 {
381     errno = ENOSYS;
382     return -1;
383 }
384 #endif
385 #endif /* TARGET_NR_utimensat */
386 
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391           const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394                          int newfd, const char *new, int flags)
395 {
396     if (flags == 0) {
397         return renameat(oldfd, old, newfd, new);
398     }
399     errno = ENOSYS;
400     return -1;
401 }
402 #endif
403 #endif /* TARGET_NR_renameat2 */
404 
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
407 
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
410 {
411   return (inotify_init());
412 }
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
416 {
417   return (inotify_add_watch(fd, pathname, mask));
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
422 {
423   return (inotify_rm_watch(fd, wd));
424 }
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
429 {
430   return (inotify_init1(flags));
431 }
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY  */
441 
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449     uint64_t rlim_cur;
450     uint64_t rlim_max;
451 };
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453           const struct host_rlimit64 *, new_limit,
454           struct host_rlimit64 *, old_limit)
455 #endif
456 
457 
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
461 
462 static inline int next_free_host_timer(void)
463 {
464     int k ;
465     /* FIXME: Does finding the next free slot require a lock? */
466     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467         if (g_posix_timers[k] == 0) {
468             g_posix_timers[k] = (timer_t) 1;
469             return k;
470         }
471     }
472     return -1;
473 }
474 #endif
475 
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
479 {
480     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 }
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486  * of registers which translates to the same as ARM/MIPS, because we start with
487  * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
492 {
493     switch (num) {
494     case TARGET_NR_pread64:
495     case TARGET_NR_pwrite64:
496         return 1;
497 
498     default:
499         return 0;
500     }
501 }
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
507 
508 #define ERRNO_TABLE_SIZE 1200
509 
510 /* target_to_host_errno_table[] is initialized from
511  * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
513 };
514 
515 /*
516  * This list is the union of errno values overridden in asm-<arch>/errno.h
517  * minus the errnos that are not actually generic to all archs.
518  */
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520     [EAGAIN]		= TARGET_EAGAIN,
521     [EIDRM]		= TARGET_EIDRM,
522     [ECHRNG]		= TARGET_ECHRNG,
523     [EL2NSYNC]		= TARGET_EL2NSYNC,
524     [EL3HLT]		= TARGET_EL3HLT,
525     [EL3RST]		= TARGET_EL3RST,
526     [ELNRNG]		= TARGET_ELNRNG,
527     [EUNATCH]		= TARGET_EUNATCH,
528     [ENOCSI]		= TARGET_ENOCSI,
529     [EL2HLT]		= TARGET_EL2HLT,
530     [EDEADLK]		= TARGET_EDEADLK,
531     [ENOLCK]		= TARGET_ENOLCK,
532     [EBADE]		= TARGET_EBADE,
533     [EBADR]		= TARGET_EBADR,
534     [EXFULL]		= TARGET_EXFULL,
535     [ENOANO]		= TARGET_ENOANO,
536     [EBADRQC]		= TARGET_EBADRQC,
537     [EBADSLT]		= TARGET_EBADSLT,
538     [EBFONT]		= TARGET_EBFONT,
539     [ENOSTR]		= TARGET_ENOSTR,
540     [ENODATA]		= TARGET_ENODATA,
541     [ETIME]		= TARGET_ETIME,
542     [ENOSR]		= TARGET_ENOSR,
543     [ENONET]		= TARGET_ENONET,
544     [ENOPKG]		= TARGET_ENOPKG,
545     [EREMOTE]		= TARGET_EREMOTE,
546     [ENOLINK]		= TARGET_ENOLINK,
547     [EADV]		= TARGET_EADV,
548     [ESRMNT]		= TARGET_ESRMNT,
549     [ECOMM]		= TARGET_ECOMM,
550     [EPROTO]		= TARGET_EPROTO,
551     [EDOTDOT]		= TARGET_EDOTDOT,
552     [EMULTIHOP]		= TARGET_EMULTIHOP,
553     [EBADMSG]		= TARGET_EBADMSG,
554     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
555     [EOVERFLOW]		= TARGET_EOVERFLOW,
556     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
557     [EBADFD]		= TARGET_EBADFD,
558     [EREMCHG]		= TARGET_EREMCHG,
559     [ELIBACC]		= TARGET_ELIBACC,
560     [ELIBBAD]		= TARGET_ELIBBAD,
561     [ELIBSCN]		= TARGET_ELIBSCN,
562     [ELIBMAX]		= TARGET_ELIBMAX,
563     [ELIBEXEC]		= TARGET_ELIBEXEC,
564     [EILSEQ]		= TARGET_EILSEQ,
565     [ENOSYS]		= TARGET_ENOSYS,
566     [ELOOP]		= TARGET_ELOOP,
567     [ERESTART]		= TARGET_ERESTART,
568     [ESTRPIPE]		= TARGET_ESTRPIPE,
569     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
570     [EUSERS]		= TARGET_EUSERS,
571     [ENOTSOCK]		= TARGET_ENOTSOCK,
572     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
573     [EMSGSIZE]		= TARGET_EMSGSIZE,
574     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
575     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
576     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
577     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
578     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
579     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
580     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
581     [EADDRINUSE]	= TARGET_EADDRINUSE,
582     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
583     [ENETDOWN]		= TARGET_ENETDOWN,
584     [ENETUNREACH]	= TARGET_ENETUNREACH,
585     [ENETRESET]		= TARGET_ENETRESET,
586     [ECONNABORTED]	= TARGET_ECONNABORTED,
587     [ECONNRESET]	= TARGET_ECONNRESET,
588     [ENOBUFS]		= TARGET_ENOBUFS,
589     [EISCONN]		= TARGET_EISCONN,
590     [ENOTCONN]		= TARGET_ENOTCONN,
591     [EUCLEAN]		= TARGET_EUCLEAN,
592     [ENOTNAM]		= TARGET_ENOTNAM,
593     [ENAVAIL]		= TARGET_ENAVAIL,
594     [EISNAM]		= TARGET_EISNAM,
595     [EREMOTEIO]		= TARGET_EREMOTEIO,
596     [EDQUOT]            = TARGET_EDQUOT,
597     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
598     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
599     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
600     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
601     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
602     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
603     [EALREADY]		= TARGET_EALREADY,
604     [EINPROGRESS]	= TARGET_EINPROGRESS,
605     [ESTALE]		= TARGET_ESTALE,
606     [ECANCELED]		= TARGET_ECANCELED,
607     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
608     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610     [ENOKEY]		= TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628     [ENOMSG]            = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631     [ERFKILL]           = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634     [EHWPOISON]         = TARGET_EHWPOISON,
635 #endif
636 };
637 
638 static inline int host_to_target_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         host_to_target_errno_table[err]) {
642         return host_to_target_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline int target_to_host_errno(int err)
648 {
649     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650         target_to_host_errno_table[err]) {
651         return target_to_host_errno_table[err];
652     }
653     return err;
654 }
655 
656 static inline abi_long get_errno(abi_long ret)
657 {
658     if (ret == -1)
659         return -host_to_target_errno(errno);
660     else
661         return ret;
662 }
663 
664 const char *target_strerror(int err)
665 {
666     if (err == TARGET_ERESTARTSYS) {
667         return "To be restarted";
668     }
669     if (err == TARGET_QEMU_ESIGRETURN) {
670         return "Successful exit from sigreturn";
671     }
672 
673     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674         return NULL;
675     }
676     return strerror(target_to_host_errno(err));
677 }
678 
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
681 { \
682     return safe_syscall(__NR_##name); \
683 }
684 
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
687 { \
688     return safe_syscall(__NR_##name, arg1); \
689 }
690 
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2); \
695 }
696 
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
701 }
702 
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
708 }
709 
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
716 }
717 
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721     type5 arg5, type6 arg6) \
722 { \
723     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
724 }
725 
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729               int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731               struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733               int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744               const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752               unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754               unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756               socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765               const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767               int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769               struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772               const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776               int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778               long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780               unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783  * wrappers for the sub-operations to hide this implementation detail.
784  * Annoyingly we can't include linux/ipc.h to get the constant definitions
785  * for the call parameter because some structs in there conflict with the
786  * sys/ipc.h ones. So we just define them here, and rely on them being
787  * the same for all host architectures.
788  */
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
793 
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
799 }
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
801 {
802     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
803 }
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805                            const struct timespec *timeout)
806 {
807     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808                     (long)timeout);
809 }
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818  * "third argument might be integer or pointer or not present" behaviour of
819  * the libc function.
820  */
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824  *  use the flock64 struct rather than unsuffixed flock
825  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826  */
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
832 
833 static inline int host_to_target_sock_type(int host_type)
834 {
835     int target_type;
836 
837     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838     case SOCK_DGRAM:
839         target_type = TARGET_SOCK_DGRAM;
840         break;
841     case SOCK_STREAM:
842         target_type = TARGET_SOCK_STREAM;
843         break;
844     default:
845         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846         break;
847     }
848 
849 #if defined(SOCK_CLOEXEC)
850     if (host_type & SOCK_CLOEXEC) {
851         target_type |= TARGET_SOCK_CLOEXEC;
852     }
853 #endif
854 
855 #if defined(SOCK_NONBLOCK)
856     if (host_type & SOCK_NONBLOCK) {
857         target_type |= TARGET_SOCK_NONBLOCK;
858     }
859 #endif
860 
861     return target_type;
862 }
863 
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
867 
868 void target_set_brk(abi_ulong new_brk)
869 {
870     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871     brk_page = HOST_PAGE_ALIGN(target_brk);
872 }
873 
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
876 
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
879 {
880     abi_long mapped_addr;
881     abi_ulong new_alloc_size;
882 
883     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 
885     if (!new_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887         return target_brk;
888     }
889     if (new_brk < target_original_brk) {
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891                    target_brk);
892         return target_brk;
893     }
894 
895     /* If the new brk is less than the highest page reserved to the
896      * target heap allocation, set it and we're almost done...  */
897     if (new_brk <= brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  */
900         if (new_brk > target_brk) {
901             memset(g2h(target_brk), 0, new_brk - target_brk);
902         }
903 	target_brk = new_brk;
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905     	return target_brk;
906     }
907 
908     /* We need to allocate more memory after the brk... Note that
909      * we don't use MAP_FIXED because that will map over the top of
910      * any existing mapping (like the one with the host libc or qemu
911      * itself); instead we treat "mapped but at wrong address" as
912      * a failure and unmap again.
913      */
914     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916                                         PROT_READ|PROT_WRITE,
917                                         MAP_ANON|MAP_PRIVATE, 0, 0));
918 
919     if (mapped_addr == brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  Technically the new pages are already
922          * initialized to zero since they *are* anonymous mapped
923          * pages, however we have to take care with the contents that
924          * come from the remaining part of the previous page: it may
925          * contains garbage data due to a previous heap usage (grown
926          * then shrunken).  */
927         memset(g2h(target_brk), 0, brk_page - target_brk);
928 
929         target_brk = new_brk;
930         brk_page = HOST_PAGE_ALIGN(target_brk);
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932             target_brk);
933         return target_brk;
934     } else if (mapped_addr != -1) {
935         /* Mapped but at wrong address, meaning there wasn't actually
936          * enough space for this brk.
937          */
938         target_munmap(mapped_addr, new_alloc_size);
939         mapped_addr = -1;
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941     }
942     else {
943         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
944     }
945 
946 #if defined(TARGET_ALPHA)
947     /* We (partially) emulate OSF/1 on Alpha, which requires we
948        return a proper errno, not an unchanged brk value.  */
949     return -TARGET_ENOMEM;
950 #endif
951     /* For everything else, return the previous break. */
952     return target_brk;
953 }
954 
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956                                             abi_ulong target_fds_addr,
957                                             int n)
958 {
959     int i, nw, j, k;
960     abi_ulong b, *target_fds;
961 
962     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963     if (!(target_fds = lock_user(VERIFY_READ,
964                                  target_fds_addr,
965                                  sizeof(abi_ulong) * nw,
966                                  1)))
967         return -TARGET_EFAULT;
968 
969     FD_ZERO(fds);
970     k = 0;
971     for (i = 0; i < nw; i++) {
972         /* grab the abi_ulong */
973         __get_user(b, &target_fds[i]);
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             /* check the bit inside the abi_ulong */
976             if ((b >> j) & 1)
977                 FD_SET(k, fds);
978             k++;
979         }
980     }
981 
982     unlock_user(target_fds, target_fds_addr, 0);
983 
984     return 0;
985 }
986 
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988                                                  abi_ulong target_fds_addr,
989                                                  int n)
990 {
991     if (target_fds_addr) {
992         if (copy_from_user_fdset(fds, target_fds_addr, n))
993             return -TARGET_EFAULT;
994         *fds_ptr = fds;
995     } else {
996         *fds_ptr = NULL;
997     }
998     return 0;
999 }
1000 
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002                                           const fd_set *fds,
1003                                           int n)
1004 {
1005     int i, nw, j, k;
1006     abi_long v;
1007     abi_ulong *target_fds;
1008 
1009     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010     if (!(target_fds = lock_user(VERIFY_WRITE,
1011                                  target_fds_addr,
1012                                  sizeof(abi_ulong) * nw,
1013                                  0)))
1014         return -TARGET_EFAULT;
1015 
1016     k = 0;
1017     for (i = 0; i < nw; i++) {
1018         v = 0;
1019         for (j = 0; j < TARGET_ABI_BITS; j++) {
1020             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021             k++;
1022         }
1023         __put_user(v, &target_fds[i]);
1024     }
1025 
1026     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1027 
1028     return 0;
1029 }
1030 
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1036 
1037 static inline abi_long host_to_target_clock_t(long ticks)
1038 {
1039 #if HOST_HZ == TARGET_HZ
1040     return ticks;
1041 #else
1042     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1044 }
1045 
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047                                              const struct rusage *rusage)
1048 {
1049     struct target_rusage *target_rusage;
1050 
1051     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052         return -TARGET_EFAULT;
1053     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071     unlock_user_struct(target_rusage, target_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 {
1094     abi_ulong target_rlim_swap;
1095     abi_ulong result;
1096 
1097     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098         target_rlim_swap = TARGET_RLIM_INFINITY;
1099     else
1100         target_rlim_swap = rlim;
1101     result = tswapal(target_rlim_swap);
1102 
1103     return result;
1104 }
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150         return -TARGET_EFAULT;
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161                                             const struct timeval *tv)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166         return -TARGET_EFAULT;
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177                                                abi_ulong target_tz_addr)
1178 {
1179     struct target_timezone *target_tz;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184 
1185     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1187 
1188     unlock_user_struct(target_tz, target_tz_addr, 0);
1189 
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1195 
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197                                               abi_ulong target_mq_attr_addr)
1198 {
1199     struct target_mq_attr *target_mq_attr;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202                           target_mq_attr_addr, 1))
1203         return -TARGET_EFAULT;
1204 
1205     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1209 
1210     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1211 
1212     return 0;
1213 }
1214 
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216                                             const struct mq_attr *attr)
1217 {
1218     struct target_mq_attr *target_mq_attr;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221                           target_mq_attr_addr, 0))
1222         return -TARGET_EFAULT;
1223 
1224     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1228 
1229     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1239                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1240 {
1241     fd_set rfds, wfds, efds;
1242     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243     struct timeval tv;
1244     struct timespec ts, *ts_ptr;
1245     abi_long ret;
1246 
1247     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256     if (ret) {
1257         return ret;
1258     }
1259 
1260     if (target_tv_addr) {
1261         if (copy_from_user_timeval(&tv, target_tv_addr))
1262             return -TARGET_EFAULT;
1263         ts.tv_sec = tv.tv_sec;
1264         ts.tv_nsec = tv.tv_usec * 1000;
1265         ts_ptr = &ts;
1266     } else {
1267         ts_ptr = NULL;
1268     }
1269 
1270     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271                                   ts_ptr, NULL));
1272 
1273     if (!is_error(ret)) {
1274         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275             return -TARGET_EFAULT;
1276         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277             return -TARGET_EFAULT;
1278         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279             return -TARGET_EFAULT;
1280 
1281         if (target_tv_addr) {
1282             tv.tv_sec = ts.tv_sec;
1283             tv.tv_usec = ts.tv_nsec / 1000;
1284             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285                 return -TARGET_EFAULT;
1286             }
1287         }
1288     }
1289 
1290     return ret;
1291 }
1292 
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1295 {
1296     struct target_sel_arg_struct *sel;
1297     abi_ulong inp, outp, exp, tvp;
1298     long nsel;
1299 
1300     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     nsel = tswapal(sel->n);
1305     inp = tswapal(sel->inp);
1306     outp = tswapal(sel->outp);
1307     exp = tswapal(sel->exp);
1308     tvp = tswapal(sel->tvp);
1309 
1310     unlock_user_struct(sel, arg1, 0);
1311 
1312     return do_select(nsel, inp, outp, exp, tvp);
1313 }
1314 #endif
1315 #endif
1316 
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1318 {
1319 #ifdef CONFIG_PIPE2
1320     return pipe2(host_pipe, flags);
1321 #else
1322     return -ENOSYS;
1323 #endif
1324 }
1325 
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327                         int flags, int is_pipe2)
1328 {
1329     int host_pipe[2];
1330     abi_long ret;
1331     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1332 
1333     if (is_error(ret))
1334         return get_errno(ret);
1335 
1336     /* Several targets have special calling conventions for the original
1337        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1338     if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341         return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344         return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347         return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350         return host_pipe[0];
1351 #endif
1352     }
1353 
1354     if (put_user_s32(host_pipe[0], pipedes)
1355         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356         return -TARGET_EFAULT;
1357     return get_errno(ret);
1358 }
1359 
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361                                               abi_ulong target_addr,
1362                                               socklen_t len)
1363 {
1364     struct target_ip_mreqn *target_smreqn;
1365 
1366     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367     if (!target_smreqn)
1368         return -TARGET_EFAULT;
1369     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371     if (len == sizeof(struct target_ip_mreqn))
1372         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373     unlock_user(target_smreqn, target_addr, 0);
1374 
1375     return 0;
1376 }
1377 
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379                                                abi_ulong target_addr,
1380                                                socklen_t len)
1381 {
1382     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383     sa_family_t sa_family;
1384     struct target_sockaddr *target_saddr;
1385 
1386     if (fd_trans_target_to_host_addr(fd)) {
1387         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1388     }
1389 
1390     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391     if (!target_saddr)
1392         return -TARGET_EFAULT;
1393 
1394     sa_family = tswap16(target_saddr->sa_family);
1395 
1396     /* Oops. The caller might send a incomplete sun_path; sun_path
1397      * must be terminated by \0 (see the manual page), but
1398      * unfortunately it is quite common to specify sockaddr_un
1399      * length as "strlen(x->sun_path)" while it should be
1400      * "strlen(...) + 1". We'll fix that here if needed.
1401      * Linux kernel has a similar feature.
1402      */
1403 
1404     if (sa_family == AF_UNIX) {
1405         if (len < unix_maxlen && len > 0) {
1406             char *cp = (char*)target_saddr;
1407 
1408             if ( cp[len-1] && !cp[len] )
1409                 len++;
1410         }
1411         if (len > unix_maxlen)
1412             len = unix_maxlen;
1413     }
1414 
1415     memcpy(addr, target_saddr, len);
1416     addr->sa_family = sa_family;
1417     if (sa_family == AF_NETLINK) {
1418         struct sockaddr_nl *nladdr;
1419 
1420         nladdr = (struct sockaddr_nl *)addr;
1421         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423     } else if (sa_family == AF_PACKET) {
1424 	struct target_sockaddr_ll *lladdr;
1425 
1426 	lladdr = (struct target_sockaddr_ll *)addr;
1427 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1429     }
1430     unlock_user(target_saddr, target_addr, 0);
1431 
1432     return 0;
1433 }
1434 
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436                                                struct sockaddr *addr,
1437                                                socklen_t len)
1438 {
1439     struct target_sockaddr *target_saddr;
1440 
1441     if (len == 0) {
1442         return 0;
1443     }
1444     assert(addr);
1445 
1446     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447     if (!target_saddr)
1448         return -TARGET_EFAULT;
1449     memcpy(target_saddr, addr, len);
1450     if (len >= offsetof(struct target_sockaddr, sa_family) +
1451         sizeof(target_saddr->sa_family)) {
1452         target_saddr->sa_family = tswap16(addr->sa_family);
1453     }
1454     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458     } else if (addr->sa_family == AF_PACKET) {
1459         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462     } else if (addr->sa_family == AF_INET6 &&
1463                len >= sizeof(struct target_sockaddr_in6)) {
1464         struct target_sockaddr_in6 *target_in6 =
1465                (struct target_sockaddr_in6 *)target_saddr;
1466         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1467     }
1468     unlock_user(target_saddr, target_addr, len);
1469 
1470     return 0;
1471 }
1472 
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474                                            struct target_msghdr *target_msgh)
1475 {
1476     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477     abi_long msg_controllen;
1478     abi_ulong target_cmsg_addr;
1479     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480     socklen_t space = 0;
1481 
1482     msg_controllen = tswapal(target_msgh->msg_controllen);
1483     if (msg_controllen < sizeof (struct target_cmsghdr))
1484         goto the_end;
1485     target_cmsg_addr = tswapal(target_msgh->msg_control);
1486     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487     target_cmsg_start = target_cmsg;
1488     if (!target_cmsg)
1489         return -TARGET_EFAULT;
1490 
1491     while (cmsg && target_cmsg) {
1492         void *data = CMSG_DATA(cmsg);
1493         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1494 
1495         int len = tswapal(target_cmsg->cmsg_len)
1496             - sizeof(struct target_cmsghdr);
1497 
1498         space += CMSG_SPACE(len);
1499         if (space > msgh->msg_controllen) {
1500             space -= CMSG_SPACE(len);
1501             /* This is a QEMU bug, since we allocated the payload
1502              * area ourselves (unlike overflow in host-to-target
1503              * conversion, which is just the guest giving us a buffer
1504              * that's too small). It can't happen for the payload types
1505              * we currently support; if it becomes an issue in future
1506              * we would need to improve our allocation strategy to
1507              * something more intelligent than "twice the size of the
1508              * target buffer we're reading from".
1509              */
1510             gemu_log("Host cmsg overflow\n");
1511             break;
1512         }
1513 
1514         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515             cmsg->cmsg_level = SOL_SOCKET;
1516         } else {
1517             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1518         }
1519         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520         cmsg->cmsg_len = CMSG_LEN(len);
1521 
1522         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523             int *fd = (int *)data;
1524             int *target_fd = (int *)target_data;
1525             int i, numfds = len / sizeof(int);
1526 
1527             for (i = 0; i < numfds; i++) {
1528                 __get_user(fd[i], target_fd + i);
1529             }
1530         } else if (cmsg->cmsg_level == SOL_SOCKET
1531                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1532             struct ucred *cred = (struct ucred *)data;
1533             struct target_ucred *target_cred =
1534                 (struct target_ucred *)target_data;
1535 
1536             __get_user(cred->pid, &target_cred->pid);
1537             __get_user(cred->uid, &target_cred->uid);
1538             __get_user(cred->gid, &target_cred->gid);
1539         } else {
1540             gemu_log("Unsupported ancillary data: %d/%d\n",
1541                                         cmsg->cmsg_level, cmsg->cmsg_type);
1542             memcpy(data, target_data, len);
1543         }
1544 
1545         cmsg = CMSG_NXTHDR(msgh, cmsg);
1546         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547                                          target_cmsg_start);
1548     }
1549     unlock_user(target_cmsg, target_cmsg_addr, 0);
1550  the_end:
1551     msgh->msg_controllen = space;
1552     return 0;
1553 }
1554 
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556                                            struct msghdr *msgh)
1557 {
1558     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559     abi_long msg_controllen;
1560     abi_ulong target_cmsg_addr;
1561     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562     socklen_t space = 0;
1563 
1564     msg_controllen = tswapal(target_msgh->msg_controllen);
1565     if (msg_controllen < sizeof (struct target_cmsghdr))
1566         goto the_end;
1567     target_cmsg_addr = tswapal(target_msgh->msg_control);
1568     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569     target_cmsg_start = target_cmsg;
1570     if (!target_cmsg)
1571         return -TARGET_EFAULT;
1572 
1573     while (cmsg && target_cmsg) {
1574         void *data = CMSG_DATA(cmsg);
1575         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1576 
1577         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578         int tgt_len, tgt_space;
1579 
1580         /* We never copy a half-header but may copy half-data;
1581          * this is Linux's behaviour in put_cmsg(). Note that
1582          * truncation here is a guest problem (which we report
1583          * to the guest via the CTRUNC bit), unlike truncation
1584          * in target_to_host_cmsg, which is a QEMU bug.
1585          */
1586         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             break;
1589         }
1590 
1591         if (cmsg->cmsg_level == SOL_SOCKET) {
1592             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593         } else {
1594             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1595         }
1596         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1597 
1598         /* Payload types which need a different size of payload on
1599          * the target must adjust tgt_len here.
1600          */
1601         tgt_len = len;
1602         switch (cmsg->cmsg_level) {
1603         case SOL_SOCKET:
1604             switch (cmsg->cmsg_type) {
1605             case SO_TIMESTAMP:
1606                 tgt_len = sizeof(struct target_timeval);
1607                 break;
1608             default:
1609                 break;
1610             }
1611             break;
1612         default:
1613             break;
1614         }
1615 
1616         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1619         }
1620 
1621         /* We must now copy-and-convert len bytes of payload
1622          * into tgt_len bytes of destination space. Bear in mind
1623          * that in both source and destination we may be dealing
1624          * with a truncated value!
1625          */
1626         switch (cmsg->cmsg_level) {
1627         case SOL_SOCKET:
1628             switch (cmsg->cmsg_type) {
1629             case SCM_RIGHTS:
1630             {
1631                 int *fd = (int *)data;
1632                 int *target_fd = (int *)target_data;
1633                 int i, numfds = tgt_len / sizeof(int);
1634 
1635                 for (i = 0; i < numfds; i++) {
1636                     __put_user(fd[i], target_fd + i);
1637                 }
1638                 break;
1639             }
1640             case SO_TIMESTAMP:
1641             {
1642                 struct timeval *tv = (struct timeval *)data;
1643                 struct target_timeval *target_tv =
1644                     (struct target_timeval *)target_data;
1645 
1646                 if (len != sizeof(struct timeval) ||
1647                     tgt_len != sizeof(struct target_timeval)) {
1648                     goto unimplemented;
1649                 }
1650 
1651                 /* copy struct timeval to target */
1652                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654                 break;
1655             }
1656             case SCM_CREDENTIALS:
1657             {
1658                 struct ucred *cred = (struct ucred *)data;
1659                 struct target_ucred *target_cred =
1660                     (struct target_ucred *)target_data;
1661 
1662                 __put_user(cred->pid, &target_cred->pid);
1663                 __put_user(cred->uid, &target_cred->uid);
1664                 __put_user(cred->gid, &target_cred->gid);
1665                 break;
1666             }
1667             default:
1668                 goto unimplemented;
1669             }
1670             break;
1671 
1672         case SOL_IP:
1673             switch (cmsg->cmsg_type) {
1674             case IP_TTL:
1675             {
1676                 uint32_t *v = (uint32_t *)data;
1677                 uint32_t *t_int = (uint32_t *)target_data;
1678 
1679                 if (len != sizeof(uint32_t) ||
1680                     tgt_len != sizeof(uint32_t)) {
1681                     goto unimplemented;
1682                 }
1683                 __put_user(*v, t_int);
1684                 break;
1685             }
1686             case IP_RECVERR:
1687             {
1688                 struct errhdr_t {
1689                    struct sock_extended_err ee;
1690                    struct sockaddr_in offender;
1691                 };
1692                 struct errhdr_t *errh = (struct errhdr_t *)data;
1693                 struct errhdr_t *target_errh =
1694                     (struct errhdr_t *)target_data;
1695 
1696                 if (len != sizeof(struct errhdr_t) ||
1697                     tgt_len != sizeof(struct errhdr_t)) {
1698                     goto unimplemented;
1699                 }
1700                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1703                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708                     (void *) &errh->offender, sizeof(errh->offender));
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IPV6:
1717             switch (cmsg->cmsg_type) {
1718             case IPV6_HOPLIMIT:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IPV6_RECVERR:
1731             {
1732                 struct errhdr6_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in6 offender;
1735                 };
1736                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737                 struct errhdr6_t *target_errh =
1738                     (struct errhdr6_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr6_t) ||
1741                     tgt_len != sizeof(struct errhdr6_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         default:
1761         unimplemented:
1762             gemu_log("Unsupported ancillary data: %d/%d\n",
1763                                         cmsg->cmsg_level, cmsg->cmsg_type);
1764             memcpy(target_data, data, MIN(len, tgt_len));
1765             if (tgt_len > len) {
1766                 memset(target_data + len, 0, tgt_len - len);
1767             }
1768         }
1769 
1770         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772         if (msg_controllen < tgt_space) {
1773             tgt_space = msg_controllen;
1774         }
1775         msg_controllen -= tgt_space;
1776         space += tgt_space;
1777         cmsg = CMSG_NXTHDR(msgh, cmsg);
1778         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779                                          target_cmsg_start);
1780     }
1781     unlock_user(target_cmsg, target_cmsg_addr, space);
1782  the_end:
1783     target_msgh->msg_controllen = tswapal(space);
1784     return 0;
1785 }
1786 
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789                               abi_ulong optval_addr, socklen_t optlen)
1790 {
1791     abi_long ret;
1792     int val;
1793     struct ip_mreqn *ip_mreq;
1794     struct ip_mreq_source *ip_mreq_source;
1795 
1796     switch(level) {
1797     case SOL_TCP:
1798         /* TCP options all take an 'int' value.  */
1799         if (optlen < sizeof(uint32_t))
1800             return -TARGET_EINVAL;
1801 
1802         if (get_user_u32(val, optval_addr))
1803             return -TARGET_EFAULT;
1804         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805         break;
1806     case SOL_IP:
1807         switch(optname) {
1808         case IP_TOS:
1809         case IP_TTL:
1810         case IP_HDRINCL:
1811         case IP_ROUTER_ALERT:
1812         case IP_RECVOPTS:
1813         case IP_RETOPTS:
1814         case IP_PKTINFO:
1815         case IP_MTU_DISCOVER:
1816         case IP_RECVERR:
1817         case IP_RECVTTL:
1818         case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820         case IP_FREEBIND:
1821 #endif
1822         case IP_MULTICAST_TTL:
1823         case IP_MULTICAST_LOOP:
1824             val = 0;
1825             if (optlen >= sizeof(uint32_t)) {
1826                 if (get_user_u32(val, optval_addr))
1827                     return -TARGET_EFAULT;
1828             } else if (optlen >= 1) {
1829                 if (get_user_u8(val, optval_addr))
1830                     return -TARGET_EFAULT;
1831             }
1832             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833             break;
1834         case IP_ADD_MEMBERSHIP:
1835         case IP_DROP_MEMBERSHIP:
1836             if (optlen < sizeof (struct target_ip_mreq) ||
1837                 optlen > sizeof (struct target_ip_mreqn))
1838                 return -TARGET_EINVAL;
1839 
1840             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843             break;
1844 
1845         case IP_BLOCK_SOURCE:
1846         case IP_UNBLOCK_SOURCE:
1847         case IP_ADD_SOURCE_MEMBERSHIP:
1848         case IP_DROP_SOURCE_MEMBERSHIP:
1849             if (optlen != sizeof (struct target_ip_mreq_source))
1850                 return -TARGET_EINVAL;
1851 
1852             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854             unlock_user (ip_mreq_source, optval_addr, 0);
1855             break;
1856 
1857         default:
1858             goto unimplemented;
1859         }
1860         break;
1861     case SOL_IPV6:
1862         switch (optname) {
1863         case IPV6_MTU_DISCOVER:
1864         case IPV6_MTU:
1865         case IPV6_V6ONLY:
1866         case IPV6_RECVPKTINFO:
1867         case IPV6_UNICAST_HOPS:
1868         case IPV6_MULTICAST_HOPS:
1869         case IPV6_MULTICAST_LOOP:
1870         case IPV6_RECVERR:
1871         case IPV6_RECVHOPLIMIT:
1872         case IPV6_2292HOPLIMIT:
1873         case IPV6_CHECKSUM:
1874             val = 0;
1875             if (optlen < sizeof(uint32_t)) {
1876                 return -TARGET_EINVAL;
1877             }
1878             if (get_user_u32(val, optval_addr)) {
1879                 return -TARGET_EFAULT;
1880             }
1881             ret = get_errno(setsockopt(sockfd, level, optname,
1882                                        &val, sizeof(val)));
1883             break;
1884         case IPV6_PKTINFO:
1885         {
1886             struct in6_pktinfo pki;
1887 
1888             if (optlen < sizeof(pki)) {
1889                 return -TARGET_EINVAL;
1890             }
1891 
1892             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893                 return -TARGET_EFAULT;
1894             }
1895 
1896             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1897 
1898             ret = get_errno(setsockopt(sockfd, level, optname,
1899                                        &pki, sizeof(pki)));
1900             break;
1901         }
1902         default:
1903             goto unimplemented;
1904         }
1905         break;
1906     case SOL_ICMPV6:
1907         switch (optname) {
1908         case ICMPV6_FILTER:
1909         {
1910             struct icmp6_filter icmp6f;
1911 
1912             if (optlen > sizeof(icmp6f)) {
1913                 optlen = sizeof(icmp6f);
1914             }
1915 
1916             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917                 return -TARGET_EFAULT;
1918             }
1919 
1920             for (val = 0; val < 8; val++) {
1921                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1922             }
1923 
1924             ret = get_errno(setsockopt(sockfd, level, optname,
1925                                        &icmp6f, optlen));
1926             break;
1927         }
1928         default:
1929             goto unimplemented;
1930         }
1931         break;
1932     case SOL_RAW:
1933         switch (optname) {
1934         case ICMP_FILTER:
1935         case IPV6_CHECKSUM:
1936             /* those take an u32 value */
1937             if (optlen < sizeof(uint32_t)) {
1938                 return -TARGET_EINVAL;
1939             }
1940 
1941             if (get_user_u32(val, optval_addr)) {
1942                 return -TARGET_EFAULT;
1943             }
1944             ret = get_errno(setsockopt(sockfd, level, optname,
1945                                        &val, sizeof(val)));
1946             break;
1947 
1948         default:
1949             goto unimplemented;
1950         }
1951         break;
1952     case TARGET_SOL_SOCKET:
1953         switch (optname) {
1954         case TARGET_SO_RCVTIMEO:
1955         {
1956                 struct timeval tv;
1957 
1958                 optname = SO_RCVTIMEO;
1959 
1960 set_timeout:
1961                 if (optlen != sizeof(struct target_timeval)) {
1962                     return -TARGET_EINVAL;
1963                 }
1964 
1965                 if (copy_from_user_timeval(&tv, optval_addr)) {
1966                     return -TARGET_EFAULT;
1967                 }
1968 
1969                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970                                 &tv, sizeof(tv)));
1971                 return ret;
1972         }
1973         case TARGET_SO_SNDTIMEO:
1974                 optname = SO_SNDTIMEO;
1975                 goto set_timeout;
1976         case TARGET_SO_ATTACH_FILTER:
1977         {
1978                 struct target_sock_fprog *tfprog;
1979                 struct target_sock_filter *tfilter;
1980                 struct sock_fprog fprog;
1981                 struct sock_filter *filter;
1982                 int i;
1983 
1984                 if (optlen != sizeof(*tfprog)) {
1985                     return -TARGET_EINVAL;
1986                 }
1987                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988                     return -TARGET_EFAULT;
1989                 }
1990                 if (!lock_user_struct(VERIFY_READ, tfilter,
1991                                       tswapal(tfprog->filter), 0)) {
1992                     unlock_user_struct(tfprog, optval_addr, 1);
1993                     return -TARGET_EFAULT;
1994                 }
1995 
1996                 fprog.len = tswap16(tfprog->len);
1997                 filter = g_try_new(struct sock_filter, fprog.len);
1998                 if (filter == NULL) {
1999                     unlock_user_struct(tfilter, tfprog->filter, 1);
2000                     unlock_user_struct(tfprog, optval_addr, 1);
2001                     return -TARGET_ENOMEM;
2002                 }
2003                 for (i = 0; i < fprog.len; i++) {
2004                     filter[i].code = tswap16(tfilter[i].code);
2005                     filter[i].jt = tfilter[i].jt;
2006                     filter[i].jf = tfilter[i].jf;
2007                     filter[i].k = tswap32(tfilter[i].k);
2008                 }
2009                 fprog.filter = filter;
2010 
2011                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013                 g_free(filter);
2014 
2015                 unlock_user_struct(tfilter, tfprog->filter, 1);
2016                 unlock_user_struct(tfprog, optval_addr, 1);
2017                 return ret;
2018         }
2019 	case TARGET_SO_BINDTODEVICE:
2020 	{
2021 		char *dev_ifname, *addr_ifname;
2022 
2023 		if (optlen > IFNAMSIZ - 1) {
2024 		    optlen = IFNAMSIZ - 1;
2025 		}
2026 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 		if (!dev_ifname) {
2028 		    return -TARGET_EFAULT;
2029 		}
2030 		optname = SO_BINDTODEVICE;
2031 		addr_ifname = alloca(IFNAMSIZ);
2032 		memcpy(addr_ifname, dev_ifname, optlen);
2033 		addr_ifname[optlen] = 0;
2034 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035                                            addr_ifname, optlen));
2036 		unlock_user (dev_ifname, optval_addr, 0);
2037 		return ret;
2038 	}
2039         case TARGET_SO_LINGER:
2040         {
2041                 struct linger lg;
2042                 struct target_linger *tlg;
2043 
2044                 if (optlen != sizeof(struct target_linger)) {
2045                     return -TARGET_EINVAL;
2046                 }
2047                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048                     return -TARGET_EFAULT;
2049                 }
2050                 __get_user(lg.l_onoff, &tlg->l_onoff);
2051                 __get_user(lg.l_linger, &tlg->l_linger);
2052                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053                                 &lg, sizeof(lg)));
2054                 unlock_user_struct(tlg, optval_addr, 0);
2055                 return ret;
2056         }
2057             /* Options with 'int' argument.  */
2058         case TARGET_SO_DEBUG:
2059 		optname = SO_DEBUG;
2060 		break;
2061         case TARGET_SO_REUSEADDR:
2062 		optname = SO_REUSEADDR;
2063 		break;
2064 #ifdef SO_REUSEPORT
2065         case TARGET_SO_REUSEPORT:
2066                 optname = SO_REUSEPORT;
2067                 break;
2068 #endif
2069         case TARGET_SO_TYPE:
2070 		optname = SO_TYPE;
2071 		break;
2072         case TARGET_SO_ERROR:
2073 		optname = SO_ERROR;
2074 		break;
2075         case TARGET_SO_DONTROUTE:
2076 		optname = SO_DONTROUTE;
2077 		break;
2078         case TARGET_SO_BROADCAST:
2079 		optname = SO_BROADCAST;
2080 		break;
2081         case TARGET_SO_SNDBUF:
2082 		optname = SO_SNDBUF;
2083 		break;
2084         case TARGET_SO_SNDBUFFORCE:
2085                 optname = SO_SNDBUFFORCE;
2086                 break;
2087         case TARGET_SO_RCVBUF:
2088 		optname = SO_RCVBUF;
2089 		break;
2090         case TARGET_SO_RCVBUFFORCE:
2091                 optname = SO_RCVBUFFORCE;
2092                 break;
2093         case TARGET_SO_KEEPALIVE:
2094 		optname = SO_KEEPALIVE;
2095 		break;
2096         case TARGET_SO_OOBINLINE:
2097 		optname = SO_OOBINLINE;
2098 		break;
2099         case TARGET_SO_NO_CHECK:
2100 		optname = SO_NO_CHECK;
2101 		break;
2102         case TARGET_SO_PRIORITY:
2103 		optname = SO_PRIORITY;
2104 		break;
2105 #ifdef SO_BSDCOMPAT
2106         case TARGET_SO_BSDCOMPAT:
2107 		optname = SO_BSDCOMPAT;
2108 		break;
2109 #endif
2110         case TARGET_SO_PASSCRED:
2111 		optname = SO_PASSCRED;
2112 		break;
2113         case TARGET_SO_PASSSEC:
2114                 optname = SO_PASSSEC;
2115                 break;
2116         case TARGET_SO_TIMESTAMP:
2117 		optname = SO_TIMESTAMP;
2118 		break;
2119         case TARGET_SO_RCVLOWAT:
2120 		optname = SO_RCVLOWAT;
2121 		break;
2122         default:
2123             goto unimplemented;
2124         }
2125 	if (optlen < sizeof(uint32_t))
2126             return -TARGET_EINVAL;
2127 
2128 	if (get_user_u32(val, optval_addr))
2129             return -TARGET_EFAULT;
2130 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2131         break;
2132     default:
2133     unimplemented:
2134         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2135         ret = -TARGET_ENOPROTOOPT;
2136     }
2137     return ret;
2138 }
2139 
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long do_getsockopt(int sockfd, int level, int optname,
2142                               abi_ulong optval_addr, abi_ulong optlen)
2143 {
2144     abi_long ret;
2145     int len, val;
2146     socklen_t lv;
2147 
2148     switch(level) {
2149     case TARGET_SOL_SOCKET:
2150         level = SOL_SOCKET;
2151         switch (optname) {
2152         /* These don't just return a single integer */
2153         case TARGET_SO_RCVTIMEO:
2154         case TARGET_SO_SNDTIMEO:
2155         case TARGET_SO_PEERNAME:
2156             goto unimplemented;
2157         case TARGET_SO_PEERCRED: {
2158             struct ucred cr;
2159             socklen_t crlen;
2160             struct target_ucred *tcr;
2161 
2162             if (get_user_u32(len, optlen)) {
2163                 return -TARGET_EFAULT;
2164             }
2165             if (len < 0) {
2166                 return -TARGET_EINVAL;
2167             }
2168 
2169             crlen = sizeof(cr);
2170             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2171                                        &cr, &crlen));
2172             if (ret < 0) {
2173                 return ret;
2174             }
2175             if (len > crlen) {
2176                 len = crlen;
2177             }
2178             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2179                 return -TARGET_EFAULT;
2180             }
2181             __put_user(cr.pid, &tcr->pid);
2182             __put_user(cr.uid, &tcr->uid);
2183             __put_user(cr.gid, &tcr->gid);
2184             unlock_user_struct(tcr, optval_addr, 1);
2185             if (put_user_u32(len, optlen)) {
2186                 return -TARGET_EFAULT;
2187             }
2188             break;
2189         }
2190         case TARGET_SO_LINGER:
2191         {
2192             struct linger lg;
2193             socklen_t lglen;
2194             struct target_linger *tlg;
2195 
2196             if (get_user_u32(len, optlen)) {
2197                 return -TARGET_EFAULT;
2198             }
2199             if (len < 0) {
2200                 return -TARGET_EINVAL;
2201             }
2202 
2203             lglen = sizeof(lg);
2204             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2205                                        &lg, &lglen));
2206             if (ret < 0) {
2207                 return ret;
2208             }
2209             if (len > lglen) {
2210                 len = lglen;
2211             }
2212             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2213                 return -TARGET_EFAULT;
2214             }
2215             __put_user(lg.l_onoff, &tlg->l_onoff);
2216             __put_user(lg.l_linger, &tlg->l_linger);
2217             unlock_user_struct(tlg, optval_addr, 1);
2218             if (put_user_u32(len, optlen)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             break;
2222         }
2223         /* Options with 'int' argument.  */
2224         case TARGET_SO_DEBUG:
2225             optname = SO_DEBUG;
2226             goto int_case;
2227         case TARGET_SO_REUSEADDR:
2228             optname = SO_REUSEADDR;
2229             goto int_case;
2230 #ifdef SO_REUSEPORT
2231         case TARGET_SO_REUSEPORT:
2232             optname = SO_REUSEPORT;
2233             goto int_case;
2234 #endif
2235         case TARGET_SO_TYPE:
2236             optname = SO_TYPE;
2237             goto int_case;
2238         case TARGET_SO_ERROR:
2239             optname = SO_ERROR;
2240             goto int_case;
2241         case TARGET_SO_DONTROUTE:
2242             optname = SO_DONTROUTE;
2243             goto int_case;
2244         case TARGET_SO_BROADCAST:
2245             optname = SO_BROADCAST;
2246             goto int_case;
2247         case TARGET_SO_SNDBUF:
2248             optname = SO_SNDBUF;
2249             goto int_case;
2250         case TARGET_SO_RCVBUF:
2251             optname = SO_RCVBUF;
2252             goto int_case;
2253         case TARGET_SO_KEEPALIVE:
2254             optname = SO_KEEPALIVE;
2255             goto int_case;
2256         case TARGET_SO_OOBINLINE:
2257             optname = SO_OOBINLINE;
2258             goto int_case;
2259         case TARGET_SO_NO_CHECK:
2260             optname = SO_NO_CHECK;
2261             goto int_case;
2262         case TARGET_SO_PRIORITY:
2263             optname = SO_PRIORITY;
2264             goto int_case;
2265 #ifdef SO_BSDCOMPAT
2266         case TARGET_SO_BSDCOMPAT:
2267             optname = SO_BSDCOMPAT;
2268             goto int_case;
2269 #endif
2270         case TARGET_SO_PASSCRED:
2271             optname = SO_PASSCRED;
2272             goto int_case;
2273         case TARGET_SO_TIMESTAMP:
2274             optname = SO_TIMESTAMP;
2275             goto int_case;
2276         case TARGET_SO_RCVLOWAT:
2277             optname = SO_RCVLOWAT;
2278             goto int_case;
2279         case TARGET_SO_ACCEPTCONN:
2280             optname = SO_ACCEPTCONN;
2281             goto int_case;
2282         default:
2283             goto int_case;
2284         }
2285         break;
2286     case SOL_TCP:
2287         /* TCP options all take an 'int' value.  */
2288     int_case:
2289         if (get_user_u32(len, optlen))
2290             return -TARGET_EFAULT;
2291         if (len < 0)
2292             return -TARGET_EINVAL;
2293         lv = sizeof(lv);
2294         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2295         if (ret < 0)
2296             return ret;
2297         if (optname == SO_TYPE) {
2298             val = host_to_target_sock_type(val);
2299         }
2300         if (len > lv)
2301             len = lv;
2302         if (len == 4) {
2303             if (put_user_u32(val, optval_addr))
2304                 return -TARGET_EFAULT;
2305         } else {
2306             if (put_user_u8(val, optval_addr))
2307                 return -TARGET_EFAULT;
2308         }
2309         if (put_user_u32(len, optlen))
2310             return -TARGET_EFAULT;
2311         break;
2312     case SOL_IP:
2313         switch(optname) {
2314         case IP_TOS:
2315         case IP_TTL:
2316         case IP_HDRINCL:
2317         case IP_ROUTER_ALERT:
2318         case IP_RECVOPTS:
2319         case IP_RETOPTS:
2320         case IP_PKTINFO:
2321         case IP_MTU_DISCOVER:
2322         case IP_RECVERR:
2323         case IP_RECVTOS:
2324 #ifdef IP_FREEBIND
2325         case IP_FREEBIND:
2326 #endif
2327         case IP_MULTICAST_TTL:
2328         case IP_MULTICAST_LOOP:
2329             if (get_user_u32(len, optlen))
2330                 return -TARGET_EFAULT;
2331             if (len < 0)
2332                 return -TARGET_EINVAL;
2333             lv = sizeof(lv);
2334             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2335             if (ret < 0)
2336                 return ret;
2337             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2338                 len = 1;
2339                 if (put_user_u32(len, optlen)
2340                     || put_user_u8(val, optval_addr))
2341                     return -TARGET_EFAULT;
2342             } else {
2343                 if (len > sizeof(int))
2344                     len = sizeof(int);
2345                 if (put_user_u32(len, optlen)
2346                     || put_user_u32(val, optval_addr))
2347                     return -TARGET_EFAULT;
2348             }
2349             break;
2350         default:
2351             ret = -TARGET_ENOPROTOOPT;
2352             break;
2353         }
2354         break;
2355     default:
2356     unimplemented:
2357         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2358                  level, optname);
2359         ret = -TARGET_EOPNOTSUPP;
2360         break;
2361     }
2362     return ret;
2363 }
2364 
2365 /* Convert target low/high pair representing file offset into the host
2366  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2367  * as the kernel doesn't handle them either.
2368  */
2369 static void target_to_host_low_high(abi_ulong tlow,
2370                                     abi_ulong thigh,
2371                                     unsigned long *hlow,
2372                                     unsigned long *hhigh)
2373 {
2374     uint64_t off = tlow |
2375         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2376         TARGET_LONG_BITS / 2;
2377 
2378     *hlow = off;
2379     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2380 }
2381 
2382 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2383                                 abi_ulong count, int copy)
2384 {
2385     struct target_iovec *target_vec;
2386     struct iovec *vec;
2387     abi_ulong total_len, max_len;
2388     int i;
2389     int err = 0;
2390     bool bad_address = false;
2391 
2392     if (count == 0) {
2393         errno = 0;
2394         return NULL;
2395     }
2396     if (count > IOV_MAX) {
2397         errno = EINVAL;
2398         return NULL;
2399     }
2400 
2401     vec = g_try_new0(struct iovec, count);
2402     if (vec == NULL) {
2403         errno = ENOMEM;
2404         return NULL;
2405     }
2406 
2407     target_vec = lock_user(VERIFY_READ, target_addr,
2408                            count * sizeof(struct target_iovec), 1);
2409     if (target_vec == NULL) {
2410         err = EFAULT;
2411         goto fail2;
2412     }
2413 
2414     /* ??? If host page size > target page size, this will result in a
2415        value larger than what we can actually support.  */
2416     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2417     total_len = 0;
2418 
2419     for (i = 0; i < count; i++) {
2420         abi_ulong base = tswapal(target_vec[i].iov_base);
2421         abi_long len = tswapal(target_vec[i].iov_len);
2422 
2423         if (len < 0) {
2424             err = EINVAL;
2425             goto fail;
2426         } else if (len == 0) {
2427             /* Zero length pointer is ignored.  */
2428             vec[i].iov_base = 0;
2429         } else {
2430             vec[i].iov_base = lock_user(type, base, len, copy);
2431             /* If the first buffer pointer is bad, this is a fault.  But
2432              * subsequent bad buffers will result in a partial write; this
2433              * is realized by filling the vector with null pointers and
2434              * zero lengths. */
2435             if (!vec[i].iov_base) {
2436                 if (i == 0) {
2437                     err = EFAULT;
2438                     goto fail;
2439                 } else {
2440                     bad_address = true;
2441                 }
2442             }
2443             if (bad_address) {
2444                 len = 0;
2445             }
2446             if (len > max_len - total_len) {
2447                 len = max_len - total_len;
2448             }
2449         }
2450         vec[i].iov_len = len;
2451         total_len += len;
2452     }
2453 
2454     unlock_user(target_vec, target_addr, 0);
2455     return vec;
2456 
2457  fail:
2458     while (--i >= 0) {
2459         if (tswapal(target_vec[i].iov_len) > 0) {
2460             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2461         }
2462     }
2463     unlock_user(target_vec, target_addr, 0);
2464  fail2:
2465     g_free(vec);
2466     errno = err;
2467     return NULL;
2468 }
2469 
2470 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2471                          abi_ulong count, int copy)
2472 {
2473     struct target_iovec *target_vec;
2474     int i;
2475 
2476     target_vec = lock_user(VERIFY_READ, target_addr,
2477                            count * sizeof(struct target_iovec), 1);
2478     if (target_vec) {
2479         for (i = 0; i < count; i++) {
2480             abi_ulong base = tswapal(target_vec[i].iov_base);
2481             abi_long len = tswapal(target_vec[i].iov_len);
2482             if (len < 0) {
2483                 break;
2484             }
2485             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2486         }
2487         unlock_user(target_vec, target_addr, 0);
2488     }
2489 
2490     g_free(vec);
2491 }
2492 
2493 static inline int target_to_host_sock_type(int *type)
2494 {
2495     int host_type = 0;
2496     int target_type = *type;
2497 
2498     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2499     case TARGET_SOCK_DGRAM:
2500         host_type = SOCK_DGRAM;
2501         break;
2502     case TARGET_SOCK_STREAM:
2503         host_type = SOCK_STREAM;
2504         break;
2505     default:
2506         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2507         break;
2508     }
2509     if (target_type & TARGET_SOCK_CLOEXEC) {
2510 #if defined(SOCK_CLOEXEC)
2511         host_type |= SOCK_CLOEXEC;
2512 #else
2513         return -TARGET_EINVAL;
2514 #endif
2515     }
2516     if (target_type & TARGET_SOCK_NONBLOCK) {
2517 #if defined(SOCK_NONBLOCK)
2518         host_type |= SOCK_NONBLOCK;
2519 #elif !defined(O_NONBLOCK)
2520         return -TARGET_EINVAL;
2521 #endif
2522     }
2523     *type = host_type;
2524     return 0;
2525 }
2526 
2527 /* Try to emulate socket type flags after socket creation.  */
2528 static int sock_flags_fixup(int fd, int target_type)
2529 {
2530 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2531     if (target_type & TARGET_SOCK_NONBLOCK) {
2532         int flags = fcntl(fd, F_GETFL);
2533         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2534             close(fd);
2535             return -TARGET_EINVAL;
2536         }
2537     }
2538 #endif
2539     return fd;
2540 }
2541 
2542 /* do_socket() Must return target values and target errnos. */
2543 static abi_long do_socket(int domain, int type, int protocol)
2544 {
2545     int target_type = type;
2546     int ret;
2547 
2548     ret = target_to_host_sock_type(&type);
2549     if (ret) {
2550         return ret;
2551     }
2552 
2553     if (domain == PF_NETLINK && !(
2554 #ifdef CONFIG_RTNETLINK
2555          protocol == NETLINK_ROUTE ||
2556 #endif
2557          protocol == NETLINK_KOBJECT_UEVENT ||
2558          protocol == NETLINK_AUDIT)) {
2559         return -EPFNOSUPPORT;
2560     }
2561 
2562     if (domain == AF_PACKET ||
2563         (domain == AF_INET && type == SOCK_PACKET)) {
2564         protocol = tswap16(protocol);
2565     }
2566 
2567     ret = get_errno(socket(domain, type, protocol));
2568     if (ret >= 0) {
2569         ret = sock_flags_fixup(ret, target_type);
2570         if (type == SOCK_PACKET) {
2571             /* Manage an obsolete case :
2572              * if socket type is SOCK_PACKET, bind by name
2573              */
2574             fd_trans_register(ret, &target_packet_trans);
2575         } else if (domain == PF_NETLINK) {
2576             switch (protocol) {
2577 #ifdef CONFIG_RTNETLINK
2578             case NETLINK_ROUTE:
2579                 fd_trans_register(ret, &target_netlink_route_trans);
2580                 break;
2581 #endif
2582             case NETLINK_KOBJECT_UEVENT:
2583                 /* nothing to do: messages are strings */
2584                 break;
2585             case NETLINK_AUDIT:
2586                 fd_trans_register(ret, &target_netlink_audit_trans);
2587                 break;
2588             default:
2589                 g_assert_not_reached();
2590             }
2591         }
2592     }
2593     return ret;
2594 }
2595 
2596 /* do_bind() Must return target values and target errnos. */
2597 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2598                         socklen_t addrlen)
2599 {
2600     void *addr;
2601     abi_long ret;
2602 
2603     if ((int)addrlen < 0) {
2604         return -TARGET_EINVAL;
2605     }
2606 
2607     addr = alloca(addrlen+1);
2608 
2609     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2610     if (ret)
2611         return ret;
2612 
2613     return get_errno(bind(sockfd, addr, addrlen));
2614 }
2615 
2616 /* do_connect() Must return target values and target errnos. */
2617 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2618                            socklen_t addrlen)
2619 {
2620     void *addr;
2621     abi_long ret;
2622 
2623     if ((int)addrlen < 0) {
2624         return -TARGET_EINVAL;
2625     }
2626 
2627     addr = alloca(addrlen+1);
2628 
2629     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2630     if (ret)
2631         return ret;
2632 
2633     return get_errno(safe_connect(sockfd, addr, addrlen));
2634 }
2635 
2636 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2637 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2638                                       int flags, int send)
2639 {
2640     abi_long ret, len;
2641     struct msghdr msg;
2642     abi_ulong count;
2643     struct iovec *vec;
2644     abi_ulong target_vec;
2645 
2646     if (msgp->msg_name) {
2647         msg.msg_namelen = tswap32(msgp->msg_namelen);
2648         msg.msg_name = alloca(msg.msg_namelen+1);
2649         ret = target_to_host_sockaddr(fd, msg.msg_name,
2650                                       tswapal(msgp->msg_name),
2651                                       msg.msg_namelen);
2652         if (ret == -TARGET_EFAULT) {
2653             /* For connected sockets msg_name and msg_namelen must
2654              * be ignored, so returning EFAULT immediately is wrong.
2655              * Instead, pass a bad msg_name to the host kernel, and
2656              * let it decide whether to return EFAULT or not.
2657              */
2658             msg.msg_name = (void *)-1;
2659         } else if (ret) {
2660             goto out2;
2661         }
2662     } else {
2663         msg.msg_name = NULL;
2664         msg.msg_namelen = 0;
2665     }
2666     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2667     msg.msg_control = alloca(msg.msg_controllen);
2668     memset(msg.msg_control, 0, msg.msg_controllen);
2669 
2670     msg.msg_flags = tswap32(msgp->msg_flags);
2671 
2672     count = tswapal(msgp->msg_iovlen);
2673     target_vec = tswapal(msgp->msg_iov);
2674 
2675     if (count > IOV_MAX) {
2676         /* sendrcvmsg returns a different errno for this condition than
2677          * readv/writev, so we must catch it here before lock_iovec() does.
2678          */
2679         ret = -TARGET_EMSGSIZE;
2680         goto out2;
2681     }
2682 
2683     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2684                      target_vec, count, send);
2685     if (vec == NULL) {
2686         ret = -host_to_target_errno(errno);
2687         goto out2;
2688     }
2689     msg.msg_iovlen = count;
2690     msg.msg_iov = vec;
2691 
2692     if (send) {
2693         if (fd_trans_target_to_host_data(fd)) {
2694             void *host_msg;
2695 
2696             host_msg = g_malloc(msg.msg_iov->iov_len);
2697             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2698             ret = fd_trans_target_to_host_data(fd)(host_msg,
2699                                                    msg.msg_iov->iov_len);
2700             if (ret >= 0) {
2701                 msg.msg_iov->iov_base = host_msg;
2702                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2703             }
2704             g_free(host_msg);
2705         } else {
2706             ret = target_to_host_cmsg(&msg, msgp);
2707             if (ret == 0) {
2708                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2709             }
2710         }
2711     } else {
2712         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2713         if (!is_error(ret)) {
2714             len = ret;
2715             if (fd_trans_host_to_target_data(fd)) {
2716                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2717                                                MIN(msg.msg_iov->iov_len, len));
2718             } else {
2719                 ret = host_to_target_cmsg(msgp, &msg);
2720             }
2721             if (!is_error(ret)) {
2722                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2723                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2724                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2725                                     msg.msg_name, msg.msg_namelen);
2726                     if (ret) {
2727                         goto out;
2728                     }
2729                 }
2730 
2731                 ret = len;
2732             }
2733         }
2734     }
2735 
2736 out:
2737     unlock_iovec(vec, target_vec, count, !send);
2738 out2:
2739     return ret;
2740 }
2741 
2742 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2743                                int flags, int send)
2744 {
2745     abi_long ret;
2746     struct target_msghdr *msgp;
2747 
2748     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2749                           msgp,
2750                           target_msg,
2751                           send ? 1 : 0)) {
2752         return -TARGET_EFAULT;
2753     }
2754     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2755     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2756     return ret;
2757 }
2758 
2759 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2760  * so it might not have this *mmsg-specific flag either.
2761  */
2762 #ifndef MSG_WAITFORONE
2763 #define MSG_WAITFORONE 0x10000
2764 #endif
2765 
2766 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2767                                 unsigned int vlen, unsigned int flags,
2768                                 int send)
2769 {
2770     struct target_mmsghdr *mmsgp;
2771     abi_long ret = 0;
2772     int i;
2773 
2774     if (vlen > UIO_MAXIOV) {
2775         vlen = UIO_MAXIOV;
2776     }
2777 
2778     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2779     if (!mmsgp) {
2780         return -TARGET_EFAULT;
2781     }
2782 
2783     for (i = 0; i < vlen; i++) {
2784         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2785         if (is_error(ret)) {
2786             break;
2787         }
2788         mmsgp[i].msg_len = tswap32(ret);
2789         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2790         if (flags & MSG_WAITFORONE) {
2791             flags |= MSG_DONTWAIT;
2792         }
2793     }
2794 
2795     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2796 
2797     /* Return number of datagrams sent if we sent any at all;
2798      * otherwise return the error.
2799      */
2800     if (i) {
2801         return i;
2802     }
2803     return ret;
2804 }
2805 
2806 /* do_accept4() Must return target values and target errnos. */
2807 static abi_long do_accept4(int fd, abi_ulong target_addr,
2808                            abi_ulong target_addrlen_addr, int flags)
2809 {
2810     socklen_t addrlen;
2811     void *addr;
2812     abi_long ret;
2813     int host_flags;
2814 
2815     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2816 
2817     if (target_addr == 0) {
2818         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2819     }
2820 
2821     /* linux returns EINVAL if addrlen pointer is invalid */
2822     if (get_user_u32(addrlen, target_addrlen_addr))
2823         return -TARGET_EINVAL;
2824 
2825     if ((int)addrlen < 0) {
2826         return -TARGET_EINVAL;
2827     }
2828 
2829     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2830         return -TARGET_EINVAL;
2831 
2832     addr = alloca(addrlen);
2833 
2834     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2835     if (!is_error(ret)) {
2836         host_to_target_sockaddr(target_addr, addr, addrlen);
2837         if (put_user_u32(addrlen, target_addrlen_addr))
2838             ret = -TARGET_EFAULT;
2839     }
2840     return ret;
2841 }
2842 
2843 /* do_getpeername() Must return target values and target errnos. */
2844 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2845                                abi_ulong target_addrlen_addr)
2846 {
2847     socklen_t addrlen;
2848     void *addr;
2849     abi_long ret;
2850 
2851     if (get_user_u32(addrlen, target_addrlen_addr))
2852         return -TARGET_EFAULT;
2853 
2854     if ((int)addrlen < 0) {
2855         return -TARGET_EINVAL;
2856     }
2857 
2858     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2859         return -TARGET_EFAULT;
2860 
2861     addr = alloca(addrlen);
2862 
2863     ret = get_errno(getpeername(fd, addr, &addrlen));
2864     if (!is_error(ret)) {
2865         host_to_target_sockaddr(target_addr, addr, addrlen);
2866         if (put_user_u32(addrlen, target_addrlen_addr))
2867             ret = -TARGET_EFAULT;
2868     }
2869     return ret;
2870 }
2871 
2872 /* do_getsockname() Must return target values and target errnos. */
2873 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2874                                abi_ulong target_addrlen_addr)
2875 {
2876     socklen_t addrlen;
2877     void *addr;
2878     abi_long ret;
2879 
2880     if (get_user_u32(addrlen, target_addrlen_addr))
2881         return -TARGET_EFAULT;
2882 
2883     if ((int)addrlen < 0) {
2884         return -TARGET_EINVAL;
2885     }
2886 
2887     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2888         return -TARGET_EFAULT;
2889 
2890     addr = alloca(addrlen);
2891 
2892     ret = get_errno(getsockname(fd, addr, &addrlen));
2893     if (!is_error(ret)) {
2894         host_to_target_sockaddr(target_addr, addr, addrlen);
2895         if (put_user_u32(addrlen, target_addrlen_addr))
2896             ret = -TARGET_EFAULT;
2897     }
2898     return ret;
2899 }
2900 
2901 /* do_socketpair() Must return target values and target errnos. */
2902 static abi_long do_socketpair(int domain, int type, int protocol,
2903                               abi_ulong target_tab_addr)
2904 {
2905     int tab[2];
2906     abi_long ret;
2907 
2908     target_to_host_sock_type(&type);
2909 
2910     ret = get_errno(socketpair(domain, type, protocol, tab));
2911     if (!is_error(ret)) {
2912         if (put_user_s32(tab[0], target_tab_addr)
2913             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2914             ret = -TARGET_EFAULT;
2915     }
2916     return ret;
2917 }
2918 
2919 /* do_sendto() Must return target values and target errnos. */
2920 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2921                           abi_ulong target_addr, socklen_t addrlen)
2922 {
2923     void *addr;
2924     void *host_msg;
2925     void *copy_msg = NULL;
2926     abi_long ret;
2927 
2928     if ((int)addrlen < 0) {
2929         return -TARGET_EINVAL;
2930     }
2931 
2932     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2933     if (!host_msg)
2934         return -TARGET_EFAULT;
2935     if (fd_trans_target_to_host_data(fd)) {
2936         copy_msg = host_msg;
2937         host_msg = g_malloc(len);
2938         memcpy(host_msg, copy_msg, len);
2939         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2940         if (ret < 0) {
2941             goto fail;
2942         }
2943     }
2944     if (target_addr) {
2945         addr = alloca(addrlen+1);
2946         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2947         if (ret) {
2948             goto fail;
2949         }
2950         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2951     } else {
2952         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2953     }
2954 fail:
2955     if (copy_msg) {
2956         g_free(host_msg);
2957         host_msg = copy_msg;
2958     }
2959     unlock_user(host_msg, msg, 0);
2960     return ret;
2961 }
2962 
2963 /* do_recvfrom() Must return target values and target errnos. */
2964 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2965                             abi_ulong target_addr,
2966                             abi_ulong target_addrlen)
2967 {
2968     socklen_t addrlen;
2969     void *addr;
2970     void *host_msg;
2971     abi_long ret;
2972 
2973     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2974     if (!host_msg)
2975         return -TARGET_EFAULT;
2976     if (target_addr) {
2977         if (get_user_u32(addrlen, target_addrlen)) {
2978             ret = -TARGET_EFAULT;
2979             goto fail;
2980         }
2981         if ((int)addrlen < 0) {
2982             ret = -TARGET_EINVAL;
2983             goto fail;
2984         }
2985         addr = alloca(addrlen);
2986         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
2987                                       addr, &addrlen));
2988     } else {
2989         addr = NULL; /* To keep compiler quiet.  */
2990         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
2991     }
2992     if (!is_error(ret)) {
2993         if (fd_trans_host_to_target_data(fd)) {
2994             abi_long trans;
2995             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
2996             if (is_error(trans)) {
2997                 ret = trans;
2998                 goto fail;
2999             }
3000         }
3001         if (target_addr) {
3002             host_to_target_sockaddr(target_addr, addr, addrlen);
3003             if (put_user_u32(addrlen, target_addrlen)) {
3004                 ret = -TARGET_EFAULT;
3005                 goto fail;
3006             }
3007         }
3008         unlock_user(host_msg, msg, len);
3009     } else {
3010 fail:
3011         unlock_user(host_msg, msg, 0);
3012     }
3013     return ret;
3014 }
3015 
3016 #ifdef TARGET_NR_socketcall
3017 /* do_socketcall() must return target values and target errnos. */
3018 static abi_long do_socketcall(int num, abi_ulong vptr)
3019 {
3020     static const unsigned nargs[] = { /* number of arguments per operation */
3021         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3022         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3023         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3024         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3025         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3026         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3027         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3028         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3029         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3030         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3031         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3032         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3033         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3034         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3035         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3036         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3037         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3038         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3039         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3040         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3041     };
3042     abi_long a[6]; /* max 6 args */
3043     unsigned i;
3044 
3045     /* check the range of the first argument num */
3046     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3047     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3048         return -TARGET_EINVAL;
3049     }
3050     /* ensure we have space for args */
3051     if (nargs[num] > ARRAY_SIZE(a)) {
3052         return -TARGET_EINVAL;
3053     }
3054     /* collect the arguments in a[] according to nargs[] */
3055     for (i = 0; i < nargs[num]; ++i) {
3056         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3057             return -TARGET_EFAULT;
3058         }
3059     }
3060     /* now when we have the args, invoke the appropriate underlying function */
3061     switch (num) {
3062     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3063         return do_socket(a[0], a[1], a[2]);
3064     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3065         return do_bind(a[0], a[1], a[2]);
3066     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3067         return do_connect(a[0], a[1], a[2]);
3068     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3069         return get_errno(listen(a[0], a[1]));
3070     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3071         return do_accept4(a[0], a[1], a[2], 0);
3072     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3073         return do_getsockname(a[0], a[1], a[2]);
3074     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3075         return do_getpeername(a[0], a[1], a[2]);
3076     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3077         return do_socketpair(a[0], a[1], a[2], a[3]);
3078     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3079         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3080     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3081         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3082     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3083         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3084     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3085         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3086     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3087         return get_errno(shutdown(a[0], a[1]));
3088     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3089         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3090     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3091         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3092     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3093         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3094     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3095         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3096     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3097         return do_accept4(a[0], a[1], a[2], a[3]);
3098     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3099         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3100     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3101         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3102     default:
3103         gemu_log("Unsupported socketcall: %d\n", num);
3104         return -TARGET_EINVAL;
3105     }
3106 }
3107 #endif
3108 
3109 #define N_SHM_REGIONS	32
3110 
3111 static struct shm_region {
3112     abi_ulong start;
3113     abi_ulong size;
3114     bool in_use;
3115 } shm_regions[N_SHM_REGIONS];
3116 
3117 #ifndef TARGET_SEMID64_DS
3118 /* asm-generic version of this struct */
3119 struct target_semid64_ds
3120 {
3121   struct target_ipc_perm sem_perm;
3122   abi_ulong sem_otime;
3123 #if TARGET_ABI_BITS == 32
3124   abi_ulong __unused1;
3125 #endif
3126   abi_ulong sem_ctime;
3127 #if TARGET_ABI_BITS == 32
3128   abi_ulong __unused2;
3129 #endif
3130   abi_ulong sem_nsems;
3131   abi_ulong __unused3;
3132   abi_ulong __unused4;
3133 };
3134 #endif
3135 
3136 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3137                                                abi_ulong target_addr)
3138 {
3139     struct target_ipc_perm *target_ip;
3140     struct target_semid64_ds *target_sd;
3141 
3142     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3143         return -TARGET_EFAULT;
3144     target_ip = &(target_sd->sem_perm);
3145     host_ip->__key = tswap32(target_ip->__key);
3146     host_ip->uid = tswap32(target_ip->uid);
3147     host_ip->gid = tswap32(target_ip->gid);
3148     host_ip->cuid = tswap32(target_ip->cuid);
3149     host_ip->cgid = tswap32(target_ip->cgid);
3150 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3151     host_ip->mode = tswap32(target_ip->mode);
3152 #else
3153     host_ip->mode = tswap16(target_ip->mode);
3154 #endif
3155 #if defined(TARGET_PPC)
3156     host_ip->__seq = tswap32(target_ip->__seq);
3157 #else
3158     host_ip->__seq = tswap16(target_ip->__seq);
3159 #endif
3160     unlock_user_struct(target_sd, target_addr, 0);
3161     return 0;
3162 }
3163 
3164 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3165                                                struct ipc_perm *host_ip)
3166 {
3167     struct target_ipc_perm *target_ip;
3168     struct target_semid64_ds *target_sd;
3169 
3170     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3171         return -TARGET_EFAULT;
3172     target_ip = &(target_sd->sem_perm);
3173     target_ip->__key = tswap32(host_ip->__key);
3174     target_ip->uid = tswap32(host_ip->uid);
3175     target_ip->gid = tswap32(host_ip->gid);
3176     target_ip->cuid = tswap32(host_ip->cuid);
3177     target_ip->cgid = tswap32(host_ip->cgid);
3178 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3179     target_ip->mode = tswap32(host_ip->mode);
3180 #else
3181     target_ip->mode = tswap16(host_ip->mode);
3182 #endif
3183 #if defined(TARGET_PPC)
3184     target_ip->__seq = tswap32(host_ip->__seq);
3185 #else
3186     target_ip->__seq = tswap16(host_ip->__seq);
3187 #endif
3188     unlock_user_struct(target_sd, target_addr, 1);
3189     return 0;
3190 }
3191 
3192 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3193                                                abi_ulong target_addr)
3194 {
3195     struct target_semid64_ds *target_sd;
3196 
3197     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3198         return -TARGET_EFAULT;
3199     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3200         return -TARGET_EFAULT;
3201     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3202     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3203     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3204     unlock_user_struct(target_sd, target_addr, 0);
3205     return 0;
3206 }
3207 
3208 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3209                                                struct semid_ds *host_sd)
3210 {
3211     struct target_semid64_ds *target_sd;
3212 
3213     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3214         return -TARGET_EFAULT;
3215     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3216         return -TARGET_EFAULT;
3217     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3218     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3219     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3220     unlock_user_struct(target_sd, target_addr, 1);
3221     return 0;
3222 }
3223 
3224 struct target_seminfo {
3225     int semmap;
3226     int semmni;
3227     int semmns;
3228     int semmnu;
3229     int semmsl;
3230     int semopm;
3231     int semume;
3232     int semusz;
3233     int semvmx;
3234     int semaem;
3235 };
3236 
3237 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3238                                               struct seminfo *host_seminfo)
3239 {
3240     struct target_seminfo *target_seminfo;
3241     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3242         return -TARGET_EFAULT;
3243     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3244     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3245     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3246     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3247     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3248     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3249     __put_user(host_seminfo->semume, &target_seminfo->semume);
3250     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3251     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3252     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3253     unlock_user_struct(target_seminfo, target_addr, 1);
3254     return 0;
3255 }
3256 
3257 union semun {
3258 	int val;
3259 	struct semid_ds *buf;
3260 	unsigned short *array;
3261 	struct seminfo *__buf;
3262 };
3263 
3264 union target_semun {
3265 	int val;
3266 	abi_ulong buf;
3267 	abi_ulong array;
3268 	abi_ulong __buf;
3269 };
3270 
3271 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3272                                                abi_ulong target_addr)
3273 {
3274     int nsems;
3275     unsigned short *array;
3276     union semun semun;
3277     struct semid_ds semid_ds;
3278     int i, ret;
3279 
3280     semun.buf = &semid_ds;
3281 
3282     ret = semctl(semid, 0, IPC_STAT, semun);
3283     if (ret == -1)
3284         return get_errno(ret);
3285 
3286     nsems = semid_ds.sem_nsems;
3287 
3288     *host_array = g_try_new(unsigned short, nsems);
3289     if (!*host_array) {
3290         return -TARGET_ENOMEM;
3291     }
3292     array = lock_user(VERIFY_READ, target_addr,
3293                       nsems*sizeof(unsigned short), 1);
3294     if (!array) {
3295         g_free(*host_array);
3296         return -TARGET_EFAULT;
3297     }
3298 
3299     for(i=0; i<nsems; i++) {
3300         __get_user((*host_array)[i], &array[i]);
3301     }
3302     unlock_user(array, target_addr, 0);
3303 
3304     return 0;
3305 }
3306 
3307 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3308                                                unsigned short **host_array)
3309 {
3310     int nsems;
3311     unsigned short *array;
3312     union semun semun;
3313     struct semid_ds semid_ds;
3314     int i, ret;
3315 
3316     semun.buf = &semid_ds;
3317 
3318     ret = semctl(semid, 0, IPC_STAT, semun);
3319     if (ret == -1)
3320         return get_errno(ret);
3321 
3322     nsems = semid_ds.sem_nsems;
3323 
3324     array = lock_user(VERIFY_WRITE, target_addr,
3325                       nsems*sizeof(unsigned short), 0);
3326     if (!array)
3327         return -TARGET_EFAULT;
3328 
3329     for(i=0; i<nsems; i++) {
3330         __put_user((*host_array)[i], &array[i]);
3331     }
3332     g_free(*host_array);
3333     unlock_user(array, target_addr, 1);
3334 
3335     return 0;
3336 }
3337 
3338 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3339                                  abi_ulong target_arg)
3340 {
3341     union target_semun target_su = { .buf = target_arg };
3342     union semun arg;
3343     struct semid_ds dsarg;
3344     unsigned short *array = NULL;
3345     struct seminfo seminfo;
3346     abi_long ret = -TARGET_EINVAL;
3347     abi_long err;
3348     cmd &= 0xff;
3349 
3350     switch( cmd ) {
3351 	case GETVAL:
3352 	case SETVAL:
3353             /* In 64 bit cross-endian situations, we will erroneously pick up
3354              * the wrong half of the union for the "val" element.  To rectify
3355              * this, the entire 8-byte structure is byteswapped, followed by
3356 	     * a swap of the 4 byte val field. In other cases, the data is
3357 	     * already in proper host byte order. */
3358 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3359 		target_su.buf = tswapal(target_su.buf);
3360 		arg.val = tswap32(target_su.val);
3361 	    } else {
3362 		arg.val = target_su.val;
3363 	    }
3364             ret = get_errno(semctl(semid, semnum, cmd, arg));
3365             break;
3366 	case GETALL:
3367 	case SETALL:
3368             err = target_to_host_semarray(semid, &array, target_su.array);
3369             if (err)
3370                 return err;
3371             arg.array = array;
3372             ret = get_errno(semctl(semid, semnum, cmd, arg));
3373             err = host_to_target_semarray(semid, target_su.array, &array);
3374             if (err)
3375                 return err;
3376             break;
3377 	case IPC_STAT:
3378 	case IPC_SET:
3379 	case SEM_STAT:
3380             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3381             if (err)
3382                 return err;
3383             arg.buf = &dsarg;
3384             ret = get_errno(semctl(semid, semnum, cmd, arg));
3385             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3386             if (err)
3387                 return err;
3388             break;
3389 	case IPC_INFO:
3390 	case SEM_INFO:
3391             arg.__buf = &seminfo;
3392             ret = get_errno(semctl(semid, semnum, cmd, arg));
3393             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3394             if (err)
3395                 return err;
3396             break;
3397 	case IPC_RMID:
3398 	case GETPID:
3399 	case GETNCNT:
3400 	case GETZCNT:
3401             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3402             break;
3403     }
3404 
3405     return ret;
3406 }
3407 
3408 struct target_sembuf {
3409     unsigned short sem_num;
3410     short sem_op;
3411     short sem_flg;
3412 };
3413 
3414 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3415                                              abi_ulong target_addr,
3416                                              unsigned nsops)
3417 {
3418     struct target_sembuf *target_sembuf;
3419     int i;
3420 
3421     target_sembuf = lock_user(VERIFY_READ, target_addr,
3422                               nsops*sizeof(struct target_sembuf), 1);
3423     if (!target_sembuf)
3424         return -TARGET_EFAULT;
3425 
3426     for(i=0; i<nsops; i++) {
3427         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3428         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3429         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3430     }
3431 
3432     unlock_user(target_sembuf, target_addr, 0);
3433 
3434     return 0;
3435 }
3436 
3437 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3438 {
3439     struct sembuf sops[nsops];
3440 
3441     if (target_to_host_sembuf(sops, ptr, nsops))
3442         return -TARGET_EFAULT;
3443 
3444     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3445 }
3446 
3447 struct target_msqid_ds
3448 {
3449     struct target_ipc_perm msg_perm;
3450     abi_ulong msg_stime;
3451 #if TARGET_ABI_BITS == 32
3452     abi_ulong __unused1;
3453 #endif
3454     abi_ulong msg_rtime;
3455 #if TARGET_ABI_BITS == 32
3456     abi_ulong __unused2;
3457 #endif
3458     abi_ulong msg_ctime;
3459 #if TARGET_ABI_BITS == 32
3460     abi_ulong __unused3;
3461 #endif
3462     abi_ulong __msg_cbytes;
3463     abi_ulong msg_qnum;
3464     abi_ulong msg_qbytes;
3465     abi_ulong msg_lspid;
3466     abi_ulong msg_lrpid;
3467     abi_ulong __unused4;
3468     abi_ulong __unused5;
3469 };
3470 
3471 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3472                                                abi_ulong target_addr)
3473 {
3474     struct target_msqid_ds *target_md;
3475 
3476     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3477         return -TARGET_EFAULT;
3478     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3479         return -TARGET_EFAULT;
3480     host_md->msg_stime = tswapal(target_md->msg_stime);
3481     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3482     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3483     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3484     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3485     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3486     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3487     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3488     unlock_user_struct(target_md, target_addr, 0);
3489     return 0;
3490 }
3491 
3492 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3493                                                struct msqid_ds *host_md)
3494 {
3495     struct target_msqid_ds *target_md;
3496 
3497     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3498         return -TARGET_EFAULT;
3499     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3500         return -TARGET_EFAULT;
3501     target_md->msg_stime = tswapal(host_md->msg_stime);
3502     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3503     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3504     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3505     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3506     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3507     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3508     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3509     unlock_user_struct(target_md, target_addr, 1);
3510     return 0;
3511 }
3512 
3513 struct target_msginfo {
3514     int msgpool;
3515     int msgmap;
3516     int msgmax;
3517     int msgmnb;
3518     int msgmni;
3519     int msgssz;
3520     int msgtql;
3521     unsigned short int msgseg;
3522 };
3523 
3524 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3525                                               struct msginfo *host_msginfo)
3526 {
3527     struct target_msginfo *target_msginfo;
3528     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3529         return -TARGET_EFAULT;
3530     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3531     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3532     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3533     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3534     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3535     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3536     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3537     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3538     unlock_user_struct(target_msginfo, target_addr, 1);
3539     return 0;
3540 }
3541 
3542 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3543 {
3544     struct msqid_ds dsarg;
3545     struct msginfo msginfo;
3546     abi_long ret = -TARGET_EINVAL;
3547 
3548     cmd &= 0xff;
3549 
3550     switch (cmd) {
3551     case IPC_STAT:
3552     case IPC_SET:
3553     case MSG_STAT:
3554         if (target_to_host_msqid_ds(&dsarg,ptr))
3555             return -TARGET_EFAULT;
3556         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3557         if (host_to_target_msqid_ds(ptr,&dsarg))
3558             return -TARGET_EFAULT;
3559         break;
3560     case IPC_RMID:
3561         ret = get_errno(msgctl(msgid, cmd, NULL));
3562         break;
3563     case IPC_INFO:
3564     case MSG_INFO:
3565         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3566         if (host_to_target_msginfo(ptr, &msginfo))
3567             return -TARGET_EFAULT;
3568         break;
3569     }
3570 
3571     return ret;
3572 }
3573 
3574 struct target_msgbuf {
3575     abi_long mtype;
3576     char	mtext[1];
3577 };
3578 
3579 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3580                                  ssize_t msgsz, int msgflg)
3581 {
3582     struct target_msgbuf *target_mb;
3583     struct msgbuf *host_mb;
3584     abi_long ret = 0;
3585 
3586     if (msgsz < 0) {
3587         return -TARGET_EINVAL;
3588     }
3589 
3590     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3591         return -TARGET_EFAULT;
3592     host_mb = g_try_malloc(msgsz + sizeof(long));
3593     if (!host_mb) {
3594         unlock_user_struct(target_mb, msgp, 0);
3595         return -TARGET_ENOMEM;
3596     }
3597     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3598     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3599     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3600     g_free(host_mb);
3601     unlock_user_struct(target_mb, msgp, 0);
3602 
3603     return ret;
3604 }
3605 
3606 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3607                                  ssize_t msgsz, abi_long msgtyp,
3608                                  int msgflg)
3609 {
3610     struct target_msgbuf *target_mb;
3611     char *target_mtext;
3612     struct msgbuf *host_mb;
3613     abi_long ret = 0;
3614 
3615     if (msgsz < 0) {
3616         return -TARGET_EINVAL;
3617     }
3618 
3619     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3620         return -TARGET_EFAULT;
3621 
3622     host_mb = g_try_malloc(msgsz + sizeof(long));
3623     if (!host_mb) {
3624         ret = -TARGET_ENOMEM;
3625         goto end;
3626     }
3627     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3628 
3629     if (ret > 0) {
3630         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3631         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3632         if (!target_mtext) {
3633             ret = -TARGET_EFAULT;
3634             goto end;
3635         }
3636         memcpy(target_mb->mtext, host_mb->mtext, ret);
3637         unlock_user(target_mtext, target_mtext_addr, ret);
3638     }
3639 
3640     target_mb->mtype = tswapal(host_mb->mtype);
3641 
3642 end:
3643     if (target_mb)
3644         unlock_user_struct(target_mb, msgp, 1);
3645     g_free(host_mb);
3646     return ret;
3647 }
3648 
3649 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3650                                                abi_ulong target_addr)
3651 {
3652     struct target_shmid_ds *target_sd;
3653 
3654     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3655         return -TARGET_EFAULT;
3656     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3657         return -TARGET_EFAULT;
3658     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3659     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3660     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3661     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3662     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3663     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3664     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3665     unlock_user_struct(target_sd, target_addr, 0);
3666     return 0;
3667 }
3668 
3669 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3670                                                struct shmid_ds *host_sd)
3671 {
3672     struct target_shmid_ds *target_sd;
3673 
3674     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3675         return -TARGET_EFAULT;
3676     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3677         return -TARGET_EFAULT;
3678     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3679     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3680     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3681     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3682     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3683     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3684     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3685     unlock_user_struct(target_sd, target_addr, 1);
3686     return 0;
3687 }
3688 
3689 struct  target_shminfo {
3690     abi_ulong shmmax;
3691     abi_ulong shmmin;
3692     abi_ulong shmmni;
3693     abi_ulong shmseg;
3694     abi_ulong shmall;
3695 };
3696 
3697 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3698                                               struct shminfo *host_shminfo)
3699 {
3700     struct target_shminfo *target_shminfo;
3701     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3702         return -TARGET_EFAULT;
3703     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3704     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3705     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3706     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3707     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3708     unlock_user_struct(target_shminfo, target_addr, 1);
3709     return 0;
3710 }
3711 
3712 struct target_shm_info {
3713     int used_ids;
3714     abi_ulong shm_tot;
3715     abi_ulong shm_rss;
3716     abi_ulong shm_swp;
3717     abi_ulong swap_attempts;
3718     abi_ulong swap_successes;
3719 };
3720 
3721 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3722                                                struct shm_info *host_shm_info)
3723 {
3724     struct target_shm_info *target_shm_info;
3725     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3726         return -TARGET_EFAULT;
3727     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3728     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3729     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3730     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3731     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3732     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3733     unlock_user_struct(target_shm_info, target_addr, 1);
3734     return 0;
3735 }
3736 
3737 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3738 {
3739     struct shmid_ds dsarg;
3740     struct shminfo shminfo;
3741     struct shm_info shm_info;
3742     abi_long ret = -TARGET_EINVAL;
3743 
3744     cmd &= 0xff;
3745 
3746     switch(cmd) {
3747     case IPC_STAT:
3748     case IPC_SET:
3749     case SHM_STAT:
3750         if (target_to_host_shmid_ds(&dsarg, buf))
3751             return -TARGET_EFAULT;
3752         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3753         if (host_to_target_shmid_ds(buf, &dsarg))
3754             return -TARGET_EFAULT;
3755         break;
3756     case IPC_INFO:
3757         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3758         if (host_to_target_shminfo(buf, &shminfo))
3759             return -TARGET_EFAULT;
3760         break;
3761     case SHM_INFO:
3762         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3763         if (host_to_target_shm_info(buf, &shm_info))
3764             return -TARGET_EFAULT;
3765         break;
3766     case IPC_RMID:
3767     case SHM_LOCK:
3768     case SHM_UNLOCK:
3769         ret = get_errno(shmctl(shmid, cmd, NULL));
3770         break;
3771     }
3772 
3773     return ret;
3774 }
3775 
3776 #ifndef TARGET_FORCE_SHMLBA
3777 /* For most architectures, SHMLBA is the same as the page size;
3778  * some architectures have larger values, in which case they should
3779  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3780  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3781  * and defining its own value for SHMLBA.
3782  *
3783  * The kernel also permits SHMLBA to be set by the architecture to a
3784  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3785  * this means that addresses are rounded to the large size if
3786  * SHM_RND is set but addresses not aligned to that size are not rejected
3787  * as long as they are at least page-aligned. Since the only architecture
3788  * which uses this is ia64 this code doesn't provide for that oddity.
3789  */
3790 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3791 {
3792     return TARGET_PAGE_SIZE;
3793 }
3794 #endif
3795 
3796 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3797                                  int shmid, abi_ulong shmaddr, int shmflg)
3798 {
3799     abi_long raddr;
3800     void *host_raddr;
3801     struct shmid_ds shm_info;
3802     int i,ret;
3803     abi_ulong shmlba;
3804 
3805     /* find out the length of the shared memory segment */
3806     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3807     if (is_error(ret)) {
3808         /* can't get length, bail out */
3809         return ret;
3810     }
3811 
3812     shmlba = target_shmlba(cpu_env);
3813 
3814     if (shmaddr & (shmlba - 1)) {
3815         if (shmflg & SHM_RND) {
3816             shmaddr &= ~(shmlba - 1);
3817         } else {
3818             return -TARGET_EINVAL;
3819         }
3820     }
3821     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3822         return -TARGET_EINVAL;
3823     }
3824 
3825     mmap_lock();
3826 
3827     if (shmaddr)
3828         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3829     else {
3830         abi_ulong mmap_start;
3831 
3832         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3833 
3834         if (mmap_start == -1) {
3835             errno = ENOMEM;
3836             host_raddr = (void *)-1;
3837         } else
3838             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3839     }
3840 
3841     if (host_raddr == (void *)-1) {
3842         mmap_unlock();
3843         return get_errno((long)host_raddr);
3844     }
3845     raddr=h2g((unsigned long)host_raddr);
3846 
3847     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3848                    PAGE_VALID | PAGE_READ |
3849                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3850 
3851     for (i = 0; i < N_SHM_REGIONS; i++) {
3852         if (!shm_regions[i].in_use) {
3853             shm_regions[i].in_use = true;
3854             shm_regions[i].start = raddr;
3855             shm_regions[i].size = shm_info.shm_segsz;
3856             break;
3857         }
3858     }
3859 
3860     mmap_unlock();
3861     return raddr;
3862 
3863 }
3864 
3865 static inline abi_long do_shmdt(abi_ulong shmaddr)
3866 {
3867     int i;
3868     abi_long rv;
3869 
3870     mmap_lock();
3871 
3872     for (i = 0; i < N_SHM_REGIONS; ++i) {
3873         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3874             shm_regions[i].in_use = false;
3875             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3876             break;
3877         }
3878     }
3879     rv = get_errno(shmdt(g2h(shmaddr)));
3880 
3881     mmap_unlock();
3882 
3883     return rv;
3884 }
3885 
3886 #ifdef TARGET_NR_ipc
3887 /* ??? This only works with linear mappings.  */
3888 /* do_ipc() must return target values and target errnos. */
3889 static abi_long do_ipc(CPUArchState *cpu_env,
3890                        unsigned int call, abi_long first,
3891                        abi_long second, abi_long third,
3892                        abi_long ptr, abi_long fifth)
3893 {
3894     int version;
3895     abi_long ret = 0;
3896 
3897     version = call >> 16;
3898     call &= 0xffff;
3899 
3900     switch (call) {
3901     case IPCOP_semop:
3902         ret = do_semop(first, ptr, second);
3903         break;
3904 
3905     case IPCOP_semget:
3906         ret = get_errno(semget(first, second, third));
3907         break;
3908 
3909     case IPCOP_semctl: {
3910         /* The semun argument to semctl is passed by value, so dereference the
3911          * ptr argument. */
3912         abi_ulong atptr;
3913         get_user_ual(atptr, ptr);
3914         ret = do_semctl(first, second, third, atptr);
3915         break;
3916     }
3917 
3918     case IPCOP_msgget:
3919         ret = get_errno(msgget(first, second));
3920         break;
3921 
3922     case IPCOP_msgsnd:
3923         ret = do_msgsnd(first, ptr, second, third);
3924         break;
3925 
3926     case IPCOP_msgctl:
3927         ret = do_msgctl(first, second, ptr);
3928         break;
3929 
3930     case IPCOP_msgrcv:
3931         switch (version) {
3932         case 0:
3933             {
3934                 struct target_ipc_kludge {
3935                     abi_long msgp;
3936                     abi_long msgtyp;
3937                 } *tmp;
3938 
3939                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3940                     ret = -TARGET_EFAULT;
3941                     break;
3942                 }
3943 
3944                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3945 
3946                 unlock_user_struct(tmp, ptr, 0);
3947                 break;
3948             }
3949         default:
3950             ret = do_msgrcv(first, ptr, second, fifth, third);
3951         }
3952         break;
3953 
3954     case IPCOP_shmat:
3955         switch (version) {
3956         default:
3957         {
3958             abi_ulong raddr;
3959             raddr = do_shmat(cpu_env, first, ptr, second);
3960             if (is_error(raddr))
3961                 return get_errno(raddr);
3962             if (put_user_ual(raddr, third))
3963                 return -TARGET_EFAULT;
3964             break;
3965         }
3966         case 1:
3967             ret = -TARGET_EINVAL;
3968             break;
3969         }
3970 	break;
3971     case IPCOP_shmdt:
3972         ret = do_shmdt(ptr);
3973 	break;
3974 
3975     case IPCOP_shmget:
3976 	/* IPC_* flag values are the same on all linux platforms */
3977 	ret = get_errno(shmget(first, second, third));
3978 	break;
3979 
3980 	/* IPC_* and SHM_* command values are the same on all linux platforms */
3981     case IPCOP_shmctl:
3982         ret = do_shmctl(first, second, ptr);
3983         break;
3984     default:
3985 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3986 	ret = -TARGET_ENOSYS;
3987 	break;
3988     }
3989     return ret;
3990 }
3991 #endif
3992 
3993 /* kernel structure types definitions */
3994 
3995 #define STRUCT(name, ...) STRUCT_ ## name,
3996 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3997 enum {
3998 #include "syscall_types.h"
3999 STRUCT_MAX
4000 };
4001 #undef STRUCT
4002 #undef STRUCT_SPECIAL
4003 
4004 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4005 #define STRUCT_SPECIAL(name)
4006 #include "syscall_types.h"
4007 #undef STRUCT
4008 #undef STRUCT_SPECIAL
4009 
4010 typedef struct IOCTLEntry IOCTLEntry;
4011 
4012 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4013                              int fd, int cmd, abi_long arg);
4014 
4015 struct IOCTLEntry {
4016     int target_cmd;
4017     unsigned int host_cmd;
4018     const char *name;
4019     int access;
4020     do_ioctl_fn *do_ioctl;
4021     const argtype arg_type[5];
4022 };
4023 
4024 #define IOC_R 0x0001
4025 #define IOC_W 0x0002
4026 #define IOC_RW (IOC_R | IOC_W)
4027 
4028 #define MAX_STRUCT_SIZE 4096
4029 
4030 #ifdef CONFIG_FIEMAP
4031 /* So fiemap access checks don't overflow on 32 bit systems.
4032  * This is very slightly smaller than the limit imposed by
4033  * the underlying kernel.
4034  */
4035 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4036                             / sizeof(struct fiemap_extent))
4037 
4038 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4039                                        int fd, int cmd, abi_long arg)
4040 {
4041     /* The parameter for this ioctl is a struct fiemap followed
4042      * by an array of struct fiemap_extent whose size is set
4043      * in fiemap->fm_extent_count. The array is filled in by the
4044      * ioctl.
4045      */
4046     int target_size_in, target_size_out;
4047     struct fiemap *fm;
4048     const argtype *arg_type = ie->arg_type;
4049     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4050     void *argptr, *p;
4051     abi_long ret;
4052     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4053     uint32_t outbufsz;
4054     int free_fm = 0;
4055 
4056     assert(arg_type[0] == TYPE_PTR);
4057     assert(ie->access == IOC_RW);
4058     arg_type++;
4059     target_size_in = thunk_type_size(arg_type, 0);
4060     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4061     if (!argptr) {
4062         return -TARGET_EFAULT;
4063     }
4064     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4065     unlock_user(argptr, arg, 0);
4066     fm = (struct fiemap *)buf_temp;
4067     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4068         return -TARGET_EINVAL;
4069     }
4070 
4071     outbufsz = sizeof (*fm) +
4072         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4073 
4074     if (outbufsz > MAX_STRUCT_SIZE) {
4075         /* We can't fit all the extents into the fixed size buffer.
4076          * Allocate one that is large enough and use it instead.
4077          */
4078         fm = g_try_malloc(outbufsz);
4079         if (!fm) {
4080             return -TARGET_ENOMEM;
4081         }
4082         memcpy(fm, buf_temp, sizeof(struct fiemap));
4083         free_fm = 1;
4084     }
4085     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4086     if (!is_error(ret)) {
4087         target_size_out = target_size_in;
4088         /* An extent_count of 0 means we were only counting the extents
4089          * so there are no structs to copy
4090          */
4091         if (fm->fm_extent_count != 0) {
4092             target_size_out += fm->fm_mapped_extents * extent_size;
4093         }
4094         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4095         if (!argptr) {
4096             ret = -TARGET_EFAULT;
4097         } else {
4098             /* Convert the struct fiemap */
4099             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4100             if (fm->fm_extent_count != 0) {
4101                 p = argptr + target_size_in;
4102                 /* ...and then all the struct fiemap_extents */
4103                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4104                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4105                                   THUNK_TARGET);
4106                     p += extent_size;
4107                 }
4108             }
4109             unlock_user(argptr, arg, target_size_out);
4110         }
4111     }
4112     if (free_fm) {
4113         g_free(fm);
4114     }
4115     return ret;
4116 }
4117 #endif
4118 
4119 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4120                                 int fd, int cmd, abi_long arg)
4121 {
4122     const argtype *arg_type = ie->arg_type;
4123     int target_size;
4124     void *argptr;
4125     int ret;
4126     struct ifconf *host_ifconf;
4127     uint32_t outbufsz;
4128     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4129     int target_ifreq_size;
4130     int nb_ifreq;
4131     int free_buf = 0;
4132     int i;
4133     int target_ifc_len;
4134     abi_long target_ifc_buf;
4135     int host_ifc_len;
4136     char *host_ifc_buf;
4137 
4138     assert(arg_type[0] == TYPE_PTR);
4139     assert(ie->access == IOC_RW);
4140 
4141     arg_type++;
4142     target_size = thunk_type_size(arg_type, 0);
4143 
4144     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4145     if (!argptr)
4146         return -TARGET_EFAULT;
4147     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4148     unlock_user(argptr, arg, 0);
4149 
4150     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4151     target_ifc_len = host_ifconf->ifc_len;
4152     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4153 
4154     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4155     nb_ifreq = target_ifc_len / target_ifreq_size;
4156     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4157 
4158     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4159     if (outbufsz > MAX_STRUCT_SIZE) {
4160         /* We can't fit all the extents into the fixed size buffer.
4161          * Allocate one that is large enough and use it instead.
4162          */
4163         host_ifconf = malloc(outbufsz);
4164         if (!host_ifconf) {
4165             return -TARGET_ENOMEM;
4166         }
4167         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4168         free_buf = 1;
4169     }
4170     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4171 
4172     host_ifconf->ifc_len = host_ifc_len;
4173     host_ifconf->ifc_buf = host_ifc_buf;
4174 
4175     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4176     if (!is_error(ret)) {
4177 	/* convert host ifc_len to target ifc_len */
4178 
4179         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4180         target_ifc_len = nb_ifreq * target_ifreq_size;
4181         host_ifconf->ifc_len = target_ifc_len;
4182 
4183 	/* restore target ifc_buf */
4184 
4185         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4186 
4187 	/* copy struct ifconf to target user */
4188 
4189         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4190         if (!argptr)
4191             return -TARGET_EFAULT;
4192         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4193         unlock_user(argptr, arg, target_size);
4194 
4195 	/* copy ifreq[] to target user */
4196 
4197         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4198         for (i = 0; i < nb_ifreq ; i++) {
4199             thunk_convert(argptr + i * target_ifreq_size,
4200                           host_ifc_buf + i * sizeof(struct ifreq),
4201                           ifreq_arg_type, THUNK_TARGET);
4202         }
4203         unlock_user(argptr, target_ifc_buf, target_ifc_len);
4204     }
4205 
4206     if (free_buf) {
4207         free(host_ifconf);
4208     }
4209 
4210     return ret;
4211 }
4212 
4213 #if defined(CONFIG_USBFS)
4214 #if HOST_LONG_BITS > 64
4215 #error USBDEVFS thunks do not support >64 bit hosts yet.
4216 #endif
4217 struct live_urb {
4218     uint64_t target_urb_adr;
4219     uint64_t target_buf_adr;
4220     char *target_buf_ptr;
4221     struct usbdevfs_urb host_urb;
4222 };
4223 
4224 static GHashTable *usbdevfs_urb_hashtable(void)
4225 {
4226     static GHashTable *urb_hashtable;
4227 
4228     if (!urb_hashtable) {
4229         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4230     }
4231     return urb_hashtable;
4232 }
4233 
4234 static void urb_hashtable_insert(struct live_urb *urb)
4235 {
4236     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4237     g_hash_table_insert(urb_hashtable, urb, urb);
4238 }
4239 
4240 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4241 {
4242     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4243     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4244 }
4245 
4246 static void urb_hashtable_remove(struct live_urb *urb)
4247 {
4248     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4249     g_hash_table_remove(urb_hashtable, urb);
4250 }
4251 
4252 static abi_long
4253 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4254                           int fd, int cmd, abi_long arg)
4255 {
4256     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4257     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4258     struct live_urb *lurb;
4259     void *argptr;
4260     uint64_t hurb;
4261     int target_size;
4262     uintptr_t target_urb_adr;
4263     abi_long ret;
4264 
4265     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4266 
4267     memset(buf_temp, 0, sizeof(uint64_t));
4268     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4269     if (is_error(ret)) {
4270         return ret;
4271     }
4272 
4273     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4274     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4275     if (!lurb->target_urb_adr) {
4276         return -TARGET_EFAULT;
4277     }
4278     urb_hashtable_remove(lurb);
4279     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4280         lurb->host_urb.buffer_length);
4281     lurb->target_buf_ptr = NULL;
4282 
4283     /* restore the guest buffer pointer */
4284     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4285 
4286     /* update the guest urb struct */
4287     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4288     if (!argptr) {
4289         g_free(lurb);
4290         return -TARGET_EFAULT;
4291     }
4292     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4293     unlock_user(argptr, lurb->target_urb_adr, target_size);
4294 
4295     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4296     /* write back the urb handle */
4297     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4298     if (!argptr) {
4299         g_free(lurb);
4300         return -TARGET_EFAULT;
4301     }
4302 
4303     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4304     target_urb_adr = lurb->target_urb_adr;
4305     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4306     unlock_user(argptr, arg, target_size);
4307 
4308     g_free(lurb);
4309     return ret;
4310 }
4311 
4312 static abi_long
4313 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4314                              uint8_t *buf_temp __attribute__((unused)),
4315                              int fd, int cmd, abi_long arg)
4316 {
4317     struct live_urb *lurb;
4318 
4319     /* map target address back to host URB with metadata. */
4320     lurb = urb_hashtable_lookup(arg);
4321     if (!lurb) {
4322         return -TARGET_EFAULT;
4323     }
4324     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4325 }
4326 
4327 static abi_long
4328 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4329                             int fd, int cmd, abi_long arg)
4330 {
4331     const argtype *arg_type = ie->arg_type;
4332     int target_size;
4333     abi_long ret;
4334     void *argptr;
4335     int rw_dir;
4336     struct live_urb *lurb;
4337 
4338     /*
4339      * each submitted URB needs to map to a unique ID for the
4340      * kernel, and that unique ID needs to be a pointer to
4341      * host memory.  hence, we need to malloc for each URB.
4342      * isochronous transfers have a variable length struct.
4343      */
4344     arg_type++;
4345     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4346 
4347     /* construct host copy of urb and metadata */
4348     lurb = g_try_malloc0(sizeof(struct live_urb));
4349     if (!lurb) {
4350         return -TARGET_ENOMEM;
4351     }
4352 
4353     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4354     if (!argptr) {
4355         g_free(lurb);
4356         return -TARGET_EFAULT;
4357     }
4358     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4359     unlock_user(argptr, arg, 0);
4360 
4361     lurb->target_urb_adr = arg;
4362     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4363 
4364     /* buffer space used depends on endpoint type so lock the entire buffer */
4365     /* control type urbs should check the buffer contents for true direction */
4366     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4367     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4368         lurb->host_urb.buffer_length, 1);
4369     if (lurb->target_buf_ptr == NULL) {
4370         g_free(lurb);
4371         return -TARGET_EFAULT;
4372     }
4373 
4374     /* update buffer pointer in host copy */
4375     lurb->host_urb.buffer = lurb->target_buf_ptr;
4376 
4377     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4378     if (is_error(ret)) {
4379         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4380         g_free(lurb);
4381     } else {
4382         urb_hashtable_insert(lurb);
4383     }
4384 
4385     return ret;
4386 }
4387 #endif /* CONFIG_USBFS */
4388 
4389 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4390                             int cmd, abi_long arg)
4391 {
4392     void *argptr;
4393     struct dm_ioctl *host_dm;
4394     abi_long guest_data;
4395     uint32_t guest_data_size;
4396     int target_size;
4397     const argtype *arg_type = ie->arg_type;
4398     abi_long ret;
4399     void *big_buf = NULL;
4400     char *host_data;
4401 
4402     arg_type++;
4403     target_size = thunk_type_size(arg_type, 0);
4404     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4405     if (!argptr) {
4406         ret = -TARGET_EFAULT;
4407         goto out;
4408     }
4409     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4410     unlock_user(argptr, arg, 0);
4411 
4412     /* buf_temp is too small, so fetch things into a bigger buffer */
4413     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4414     memcpy(big_buf, buf_temp, target_size);
4415     buf_temp = big_buf;
4416     host_dm = big_buf;
4417 
4418     guest_data = arg + host_dm->data_start;
4419     if ((guest_data - arg) < 0) {
4420         ret = -TARGET_EINVAL;
4421         goto out;
4422     }
4423     guest_data_size = host_dm->data_size - host_dm->data_start;
4424     host_data = (char*)host_dm + host_dm->data_start;
4425 
4426     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4427     if (!argptr) {
4428         ret = -TARGET_EFAULT;
4429         goto out;
4430     }
4431 
4432     switch (ie->host_cmd) {
4433     case DM_REMOVE_ALL:
4434     case DM_LIST_DEVICES:
4435     case DM_DEV_CREATE:
4436     case DM_DEV_REMOVE:
4437     case DM_DEV_SUSPEND:
4438     case DM_DEV_STATUS:
4439     case DM_DEV_WAIT:
4440     case DM_TABLE_STATUS:
4441     case DM_TABLE_CLEAR:
4442     case DM_TABLE_DEPS:
4443     case DM_LIST_VERSIONS:
4444         /* no input data */
4445         break;
4446     case DM_DEV_RENAME:
4447     case DM_DEV_SET_GEOMETRY:
4448         /* data contains only strings */
4449         memcpy(host_data, argptr, guest_data_size);
4450         break;
4451     case DM_TARGET_MSG:
4452         memcpy(host_data, argptr, guest_data_size);
4453         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4454         break;
4455     case DM_TABLE_LOAD:
4456     {
4457         void *gspec = argptr;
4458         void *cur_data = host_data;
4459         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4460         int spec_size = thunk_type_size(arg_type, 0);
4461         int i;
4462 
4463         for (i = 0; i < host_dm->target_count; i++) {
4464             struct dm_target_spec *spec = cur_data;
4465             uint32_t next;
4466             int slen;
4467 
4468             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4469             slen = strlen((char*)gspec + spec_size) + 1;
4470             next = spec->next;
4471             spec->next = sizeof(*spec) + slen;
4472             strcpy((char*)&spec[1], gspec + spec_size);
4473             gspec += next;
4474             cur_data += spec->next;
4475         }
4476         break;
4477     }
4478     default:
4479         ret = -TARGET_EINVAL;
4480         unlock_user(argptr, guest_data, 0);
4481         goto out;
4482     }
4483     unlock_user(argptr, guest_data, 0);
4484 
4485     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4486     if (!is_error(ret)) {
4487         guest_data = arg + host_dm->data_start;
4488         guest_data_size = host_dm->data_size - host_dm->data_start;
4489         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4490         switch (ie->host_cmd) {
4491         case DM_REMOVE_ALL:
4492         case DM_DEV_CREATE:
4493         case DM_DEV_REMOVE:
4494         case DM_DEV_RENAME:
4495         case DM_DEV_SUSPEND:
4496         case DM_DEV_STATUS:
4497         case DM_TABLE_LOAD:
4498         case DM_TABLE_CLEAR:
4499         case DM_TARGET_MSG:
4500         case DM_DEV_SET_GEOMETRY:
4501             /* no return data */
4502             break;
4503         case DM_LIST_DEVICES:
4504         {
4505             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4506             uint32_t remaining_data = guest_data_size;
4507             void *cur_data = argptr;
4508             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4509             int nl_size = 12; /* can't use thunk_size due to alignment */
4510 
4511             while (1) {
4512                 uint32_t next = nl->next;
4513                 if (next) {
4514                     nl->next = nl_size + (strlen(nl->name) + 1);
4515                 }
4516                 if (remaining_data < nl->next) {
4517                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4518                     break;
4519                 }
4520                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4521                 strcpy(cur_data + nl_size, nl->name);
4522                 cur_data += nl->next;
4523                 remaining_data -= nl->next;
4524                 if (!next) {
4525                     break;
4526                 }
4527                 nl = (void*)nl + next;
4528             }
4529             break;
4530         }
4531         case DM_DEV_WAIT:
4532         case DM_TABLE_STATUS:
4533         {
4534             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4535             void *cur_data = argptr;
4536             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4537             int spec_size = thunk_type_size(arg_type, 0);
4538             int i;
4539 
4540             for (i = 0; i < host_dm->target_count; i++) {
4541                 uint32_t next = spec->next;
4542                 int slen = strlen((char*)&spec[1]) + 1;
4543                 spec->next = (cur_data - argptr) + spec_size + slen;
4544                 if (guest_data_size < spec->next) {
4545                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4546                     break;
4547                 }
4548                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4549                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4550                 cur_data = argptr + spec->next;
4551                 spec = (void*)host_dm + host_dm->data_start + next;
4552             }
4553             break;
4554         }
4555         case DM_TABLE_DEPS:
4556         {
4557             void *hdata = (void*)host_dm + host_dm->data_start;
4558             int count = *(uint32_t*)hdata;
4559             uint64_t *hdev = hdata + 8;
4560             uint64_t *gdev = argptr + 8;
4561             int i;
4562 
4563             *(uint32_t*)argptr = tswap32(count);
4564             for (i = 0; i < count; i++) {
4565                 *gdev = tswap64(*hdev);
4566                 gdev++;
4567                 hdev++;
4568             }
4569             break;
4570         }
4571         case DM_LIST_VERSIONS:
4572         {
4573             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4574             uint32_t remaining_data = guest_data_size;
4575             void *cur_data = argptr;
4576             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4577             int vers_size = thunk_type_size(arg_type, 0);
4578 
4579             while (1) {
4580                 uint32_t next = vers->next;
4581                 if (next) {
4582                     vers->next = vers_size + (strlen(vers->name) + 1);
4583                 }
4584                 if (remaining_data < vers->next) {
4585                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4586                     break;
4587                 }
4588                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4589                 strcpy(cur_data + vers_size, vers->name);
4590                 cur_data += vers->next;
4591                 remaining_data -= vers->next;
4592                 if (!next) {
4593                     break;
4594                 }
4595                 vers = (void*)vers + next;
4596             }
4597             break;
4598         }
4599         default:
4600             unlock_user(argptr, guest_data, 0);
4601             ret = -TARGET_EINVAL;
4602             goto out;
4603         }
4604         unlock_user(argptr, guest_data, guest_data_size);
4605 
4606         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4607         if (!argptr) {
4608             ret = -TARGET_EFAULT;
4609             goto out;
4610         }
4611         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4612         unlock_user(argptr, arg, target_size);
4613     }
4614 out:
4615     g_free(big_buf);
4616     return ret;
4617 }
4618 
4619 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4620                                int cmd, abi_long arg)
4621 {
4622     void *argptr;
4623     int target_size;
4624     const argtype *arg_type = ie->arg_type;
4625     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4626     abi_long ret;
4627 
4628     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4629     struct blkpg_partition host_part;
4630 
4631     /* Read and convert blkpg */
4632     arg_type++;
4633     target_size = thunk_type_size(arg_type, 0);
4634     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4635     if (!argptr) {
4636         ret = -TARGET_EFAULT;
4637         goto out;
4638     }
4639     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4640     unlock_user(argptr, arg, 0);
4641 
4642     switch (host_blkpg->op) {
4643     case BLKPG_ADD_PARTITION:
4644     case BLKPG_DEL_PARTITION:
4645         /* payload is struct blkpg_partition */
4646         break;
4647     default:
4648         /* Unknown opcode */
4649         ret = -TARGET_EINVAL;
4650         goto out;
4651     }
4652 
4653     /* Read and convert blkpg->data */
4654     arg = (abi_long)(uintptr_t)host_blkpg->data;
4655     target_size = thunk_type_size(part_arg_type, 0);
4656     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4657     if (!argptr) {
4658         ret = -TARGET_EFAULT;
4659         goto out;
4660     }
4661     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4662     unlock_user(argptr, arg, 0);
4663 
4664     /* Swizzle the data pointer to our local copy and call! */
4665     host_blkpg->data = &host_part;
4666     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4667 
4668 out:
4669     return ret;
4670 }
4671 
4672 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4673                                 int fd, int cmd, abi_long arg)
4674 {
4675     const argtype *arg_type = ie->arg_type;
4676     const StructEntry *se;
4677     const argtype *field_types;
4678     const int *dst_offsets, *src_offsets;
4679     int target_size;
4680     void *argptr;
4681     abi_ulong *target_rt_dev_ptr;
4682     unsigned long *host_rt_dev_ptr;
4683     abi_long ret;
4684     int i;
4685 
4686     assert(ie->access == IOC_W);
4687     assert(*arg_type == TYPE_PTR);
4688     arg_type++;
4689     assert(*arg_type == TYPE_STRUCT);
4690     target_size = thunk_type_size(arg_type, 0);
4691     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4692     if (!argptr) {
4693         return -TARGET_EFAULT;
4694     }
4695     arg_type++;
4696     assert(*arg_type == (int)STRUCT_rtentry);
4697     se = struct_entries + *arg_type++;
4698     assert(se->convert[0] == NULL);
4699     /* convert struct here to be able to catch rt_dev string */
4700     field_types = se->field_types;
4701     dst_offsets = se->field_offsets[THUNK_HOST];
4702     src_offsets = se->field_offsets[THUNK_TARGET];
4703     for (i = 0; i < se->nb_fields; i++) {
4704         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4705             assert(*field_types == TYPE_PTRVOID);
4706             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4707             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4708             if (*target_rt_dev_ptr != 0) {
4709                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4710                                                   tswapal(*target_rt_dev_ptr));
4711                 if (!*host_rt_dev_ptr) {
4712                     unlock_user(argptr, arg, 0);
4713                     return -TARGET_EFAULT;
4714                 }
4715             } else {
4716                 *host_rt_dev_ptr = 0;
4717             }
4718             field_types++;
4719             continue;
4720         }
4721         field_types = thunk_convert(buf_temp + dst_offsets[i],
4722                                     argptr + src_offsets[i],
4723                                     field_types, THUNK_HOST);
4724     }
4725     unlock_user(argptr, arg, 0);
4726 
4727     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4728     if (*host_rt_dev_ptr != 0) {
4729         unlock_user((void *)*host_rt_dev_ptr,
4730                     *target_rt_dev_ptr, 0);
4731     }
4732     return ret;
4733 }
4734 
4735 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4736                                      int fd, int cmd, abi_long arg)
4737 {
4738     int sig = target_to_host_signal(arg);
4739     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4740 }
4741 
4742 #ifdef TIOCGPTPEER
4743 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4744                                      int fd, int cmd, abi_long arg)
4745 {
4746     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4747     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4748 }
4749 #endif
4750 
4751 static IOCTLEntry ioctl_entries[] = {
4752 #define IOCTL(cmd, access, ...) \
4753     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4754 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4755     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4756 #define IOCTL_IGNORE(cmd) \
4757     { TARGET_ ## cmd, 0, #cmd },
4758 #include "ioctls.h"
4759     { 0, 0, },
4760 };
4761 
4762 /* ??? Implement proper locking for ioctls.  */
4763 /* do_ioctl() Must return target values and target errnos. */
4764 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4765 {
4766     const IOCTLEntry *ie;
4767     const argtype *arg_type;
4768     abi_long ret;
4769     uint8_t buf_temp[MAX_STRUCT_SIZE];
4770     int target_size;
4771     void *argptr;
4772 
4773     ie = ioctl_entries;
4774     for(;;) {
4775         if (ie->target_cmd == 0) {
4776             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4777             return -TARGET_ENOSYS;
4778         }
4779         if (ie->target_cmd == cmd)
4780             break;
4781         ie++;
4782     }
4783     arg_type = ie->arg_type;
4784     if (ie->do_ioctl) {
4785         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4786     } else if (!ie->host_cmd) {
4787         /* Some architectures define BSD ioctls in their headers
4788            that are not implemented in Linux.  */
4789         return -TARGET_ENOSYS;
4790     }
4791 
4792     switch(arg_type[0]) {
4793     case TYPE_NULL:
4794         /* no argument */
4795         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4796         break;
4797     case TYPE_PTRVOID:
4798     case TYPE_INT:
4799         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4800         break;
4801     case TYPE_PTR:
4802         arg_type++;
4803         target_size = thunk_type_size(arg_type, 0);
4804         switch(ie->access) {
4805         case IOC_R:
4806             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4807             if (!is_error(ret)) {
4808                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4809                 if (!argptr)
4810                     return -TARGET_EFAULT;
4811                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4812                 unlock_user(argptr, arg, target_size);
4813             }
4814             break;
4815         case IOC_W:
4816             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4817             if (!argptr)
4818                 return -TARGET_EFAULT;
4819             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4820             unlock_user(argptr, arg, 0);
4821             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4822             break;
4823         default:
4824         case IOC_RW:
4825             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4826             if (!argptr)
4827                 return -TARGET_EFAULT;
4828             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4829             unlock_user(argptr, arg, 0);
4830             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4831             if (!is_error(ret)) {
4832                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4833                 if (!argptr)
4834                     return -TARGET_EFAULT;
4835                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4836                 unlock_user(argptr, arg, target_size);
4837             }
4838             break;
4839         }
4840         break;
4841     default:
4842         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4843                  (long)cmd, arg_type[0]);
4844         ret = -TARGET_ENOSYS;
4845         break;
4846     }
4847     return ret;
4848 }
4849 
4850 static const bitmask_transtbl iflag_tbl[] = {
4851         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4852         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4853         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4854         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4855         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4856         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4857         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4858         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4859         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4860         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4861         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4862         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4863         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4864         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4865         { 0, 0, 0, 0 }
4866 };
4867 
4868 static const bitmask_transtbl oflag_tbl[] = {
4869 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4870 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4871 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4872 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4873 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4874 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4875 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4876 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4877 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4878 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4879 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4880 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4881 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4882 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4883 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4884 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4885 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4886 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4887 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4888 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4889 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4890 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4891 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4892 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4893 	{ 0, 0, 0, 0 }
4894 };
4895 
4896 static const bitmask_transtbl cflag_tbl[] = {
4897 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4898 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4899 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4900 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4901 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4902 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4903 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4904 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4905 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4906 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4907 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4908 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4909 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4910 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4911 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4912 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4913 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4914 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4915 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4916 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4917 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4918 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4919 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4920 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4921 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4922 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4923 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4924 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4925 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4926 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4927 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4928 	{ 0, 0, 0, 0 }
4929 };
4930 
4931 static const bitmask_transtbl lflag_tbl[] = {
4932 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4933 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4934 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4935 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4936 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4937 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4938 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4939 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4940 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4941 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4942 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4943 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4944 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4945 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4946 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4947 	{ 0, 0, 0, 0 }
4948 };
4949 
4950 static void target_to_host_termios (void *dst, const void *src)
4951 {
4952     struct host_termios *host = dst;
4953     const struct target_termios *target = src;
4954 
4955     host->c_iflag =
4956         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4957     host->c_oflag =
4958         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4959     host->c_cflag =
4960         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4961     host->c_lflag =
4962         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4963     host->c_line = target->c_line;
4964 
4965     memset(host->c_cc, 0, sizeof(host->c_cc));
4966     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4967     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4968     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4969     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4970     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4971     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4972     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4973     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4974     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4975     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4976     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4977     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4978     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4979     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4980     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4981     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4982     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4983 }
4984 
4985 static void host_to_target_termios (void *dst, const void *src)
4986 {
4987     struct target_termios *target = dst;
4988     const struct host_termios *host = src;
4989 
4990     target->c_iflag =
4991         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4992     target->c_oflag =
4993         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4994     target->c_cflag =
4995         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4996     target->c_lflag =
4997         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4998     target->c_line = host->c_line;
4999 
5000     memset(target->c_cc, 0, sizeof(target->c_cc));
5001     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5002     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5003     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5004     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5005     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5006     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5007     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5008     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5009     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5010     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5011     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5012     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5013     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5014     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5015     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5016     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5017     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5018 }
5019 
5020 static const StructEntry struct_termios_def = {
5021     .convert = { host_to_target_termios, target_to_host_termios },
5022     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5023     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5024 };
5025 
5026 static bitmask_transtbl mmap_flags_tbl[] = {
5027     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5028     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5029     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5030     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5031       MAP_ANONYMOUS, MAP_ANONYMOUS },
5032     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5033       MAP_GROWSDOWN, MAP_GROWSDOWN },
5034     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5035       MAP_DENYWRITE, MAP_DENYWRITE },
5036     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5037       MAP_EXECUTABLE, MAP_EXECUTABLE },
5038     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5039     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5040       MAP_NORESERVE, MAP_NORESERVE },
5041     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5042     /* MAP_STACK had been ignored by the kernel for quite some time.
5043        Recognize it for the target insofar as we do not want to pass
5044        it through to the host.  */
5045     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5046     { 0, 0, 0, 0 }
5047 };
5048 
5049 #if defined(TARGET_I386)
5050 
5051 /* NOTE: there is really one LDT for all the threads */
5052 static uint8_t *ldt_table;
5053 
5054 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5055 {
5056     int size;
5057     void *p;
5058 
5059     if (!ldt_table)
5060         return 0;
5061     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5062     if (size > bytecount)
5063         size = bytecount;
5064     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5065     if (!p)
5066         return -TARGET_EFAULT;
5067     /* ??? Should this by byteswapped?  */
5068     memcpy(p, ldt_table, size);
5069     unlock_user(p, ptr, size);
5070     return size;
5071 }
5072 
5073 /* XXX: add locking support */
5074 static abi_long write_ldt(CPUX86State *env,
5075                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5076 {
5077     struct target_modify_ldt_ldt_s ldt_info;
5078     struct target_modify_ldt_ldt_s *target_ldt_info;
5079     int seg_32bit, contents, read_exec_only, limit_in_pages;
5080     int seg_not_present, useable, lm;
5081     uint32_t *lp, entry_1, entry_2;
5082 
5083     if (bytecount != sizeof(ldt_info))
5084         return -TARGET_EINVAL;
5085     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5086         return -TARGET_EFAULT;
5087     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5088     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5089     ldt_info.limit = tswap32(target_ldt_info->limit);
5090     ldt_info.flags = tswap32(target_ldt_info->flags);
5091     unlock_user_struct(target_ldt_info, ptr, 0);
5092 
5093     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5094         return -TARGET_EINVAL;
5095     seg_32bit = ldt_info.flags & 1;
5096     contents = (ldt_info.flags >> 1) & 3;
5097     read_exec_only = (ldt_info.flags >> 3) & 1;
5098     limit_in_pages = (ldt_info.flags >> 4) & 1;
5099     seg_not_present = (ldt_info.flags >> 5) & 1;
5100     useable = (ldt_info.flags >> 6) & 1;
5101 #ifdef TARGET_ABI32
5102     lm = 0;
5103 #else
5104     lm = (ldt_info.flags >> 7) & 1;
5105 #endif
5106     if (contents == 3) {
5107         if (oldmode)
5108             return -TARGET_EINVAL;
5109         if (seg_not_present == 0)
5110             return -TARGET_EINVAL;
5111     }
5112     /* allocate the LDT */
5113     if (!ldt_table) {
5114         env->ldt.base = target_mmap(0,
5115                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5116                                     PROT_READ|PROT_WRITE,
5117                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5118         if (env->ldt.base == -1)
5119             return -TARGET_ENOMEM;
5120         memset(g2h(env->ldt.base), 0,
5121                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5122         env->ldt.limit = 0xffff;
5123         ldt_table = g2h(env->ldt.base);
5124     }
5125 
5126     /* NOTE: same code as Linux kernel */
5127     /* Allow LDTs to be cleared by the user. */
5128     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5129         if (oldmode ||
5130             (contents == 0		&&
5131              read_exec_only == 1	&&
5132              seg_32bit == 0		&&
5133              limit_in_pages == 0	&&
5134              seg_not_present == 1	&&
5135              useable == 0 )) {
5136             entry_1 = 0;
5137             entry_2 = 0;
5138             goto install;
5139         }
5140     }
5141 
5142     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5143         (ldt_info.limit & 0x0ffff);
5144     entry_2 = (ldt_info.base_addr & 0xff000000) |
5145         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5146         (ldt_info.limit & 0xf0000) |
5147         ((read_exec_only ^ 1) << 9) |
5148         (contents << 10) |
5149         ((seg_not_present ^ 1) << 15) |
5150         (seg_32bit << 22) |
5151         (limit_in_pages << 23) |
5152         (lm << 21) |
5153         0x7000;
5154     if (!oldmode)
5155         entry_2 |= (useable << 20);
5156 
5157     /* Install the new entry ...  */
5158 install:
5159     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5160     lp[0] = tswap32(entry_1);
5161     lp[1] = tswap32(entry_2);
5162     return 0;
5163 }
5164 
5165 /* specific and weird i386 syscalls */
5166 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5167                               unsigned long bytecount)
5168 {
5169     abi_long ret;
5170 
5171     switch (func) {
5172     case 0:
5173         ret = read_ldt(ptr, bytecount);
5174         break;
5175     case 1:
5176         ret = write_ldt(env, ptr, bytecount, 1);
5177         break;
5178     case 0x11:
5179         ret = write_ldt(env, ptr, bytecount, 0);
5180         break;
5181     default:
5182         ret = -TARGET_ENOSYS;
5183         break;
5184     }
5185     return ret;
5186 }
5187 
5188 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5189 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5190 {
5191     uint64_t *gdt_table = g2h(env->gdt.base);
5192     struct target_modify_ldt_ldt_s ldt_info;
5193     struct target_modify_ldt_ldt_s *target_ldt_info;
5194     int seg_32bit, contents, read_exec_only, limit_in_pages;
5195     int seg_not_present, useable, lm;
5196     uint32_t *lp, entry_1, entry_2;
5197     int i;
5198 
5199     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5200     if (!target_ldt_info)
5201         return -TARGET_EFAULT;
5202     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5203     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5204     ldt_info.limit = tswap32(target_ldt_info->limit);
5205     ldt_info.flags = tswap32(target_ldt_info->flags);
5206     if (ldt_info.entry_number == -1) {
5207         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5208             if (gdt_table[i] == 0) {
5209                 ldt_info.entry_number = i;
5210                 target_ldt_info->entry_number = tswap32(i);
5211                 break;
5212             }
5213         }
5214     }
5215     unlock_user_struct(target_ldt_info, ptr, 1);
5216 
5217     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5218         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5219            return -TARGET_EINVAL;
5220     seg_32bit = ldt_info.flags & 1;
5221     contents = (ldt_info.flags >> 1) & 3;
5222     read_exec_only = (ldt_info.flags >> 3) & 1;
5223     limit_in_pages = (ldt_info.flags >> 4) & 1;
5224     seg_not_present = (ldt_info.flags >> 5) & 1;
5225     useable = (ldt_info.flags >> 6) & 1;
5226 #ifdef TARGET_ABI32
5227     lm = 0;
5228 #else
5229     lm = (ldt_info.flags >> 7) & 1;
5230 #endif
5231 
5232     if (contents == 3) {
5233         if (seg_not_present == 0)
5234             return -TARGET_EINVAL;
5235     }
5236 
5237     /* NOTE: same code as Linux kernel */
5238     /* Allow LDTs to be cleared by the user. */
5239     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5240         if ((contents == 0             &&
5241              read_exec_only == 1       &&
5242              seg_32bit == 0            &&
5243              limit_in_pages == 0       &&
5244              seg_not_present == 1      &&
5245              useable == 0 )) {
5246             entry_1 = 0;
5247             entry_2 = 0;
5248             goto install;
5249         }
5250     }
5251 
5252     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5253         (ldt_info.limit & 0x0ffff);
5254     entry_2 = (ldt_info.base_addr & 0xff000000) |
5255         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5256         (ldt_info.limit & 0xf0000) |
5257         ((read_exec_only ^ 1) << 9) |
5258         (contents << 10) |
5259         ((seg_not_present ^ 1) << 15) |
5260         (seg_32bit << 22) |
5261         (limit_in_pages << 23) |
5262         (useable << 20) |
5263         (lm << 21) |
5264         0x7000;
5265 
5266     /* Install the new entry ...  */
5267 install:
5268     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5269     lp[0] = tswap32(entry_1);
5270     lp[1] = tswap32(entry_2);
5271     return 0;
5272 }
5273 
5274 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5275 {
5276     struct target_modify_ldt_ldt_s *target_ldt_info;
5277     uint64_t *gdt_table = g2h(env->gdt.base);
5278     uint32_t base_addr, limit, flags;
5279     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5280     int seg_not_present, useable, lm;
5281     uint32_t *lp, entry_1, entry_2;
5282 
5283     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5284     if (!target_ldt_info)
5285         return -TARGET_EFAULT;
5286     idx = tswap32(target_ldt_info->entry_number);
5287     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5288         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5289         unlock_user_struct(target_ldt_info, ptr, 1);
5290         return -TARGET_EINVAL;
5291     }
5292     lp = (uint32_t *)(gdt_table + idx);
5293     entry_1 = tswap32(lp[0]);
5294     entry_2 = tswap32(lp[1]);
5295 
5296     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5297     contents = (entry_2 >> 10) & 3;
5298     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5299     seg_32bit = (entry_2 >> 22) & 1;
5300     limit_in_pages = (entry_2 >> 23) & 1;
5301     useable = (entry_2 >> 20) & 1;
5302 #ifdef TARGET_ABI32
5303     lm = 0;
5304 #else
5305     lm = (entry_2 >> 21) & 1;
5306 #endif
5307     flags = (seg_32bit << 0) | (contents << 1) |
5308         (read_exec_only << 3) | (limit_in_pages << 4) |
5309         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5310     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5311     base_addr = (entry_1 >> 16) |
5312         (entry_2 & 0xff000000) |
5313         ((entry_2 & 0xff) << 16);
5314     target_ldt_info->base_addr = tswapal(base_addr);
5315     target_ldt_info->limit = tswap32(limit);
5316     target_ldt_info->flags = tswap32(flags);
5317     unlock_user_struct(target_ldt_info, ptr, 1);
5318     return 0;
5319 }
5320 #endif /* TARGET_I386 && TARGET_ABI32 */
5321 
5322 #ifndef TARGET_ABI32
5323 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5324 {
5325     abi_long ret = 0;
5326     abi_ulong val;
5327     int idx;
5328 
5329     switch(code) {
5330     case TARGET_ARCH_SET_GS:
5331     case TARGET_ARCH_SET_FS:
5332         if (code == TARGET_ARCH_SET_GS)
5333             idx = R_GS;
5334         else
5335             idx = R_FS;
5336         cpu_x86_load_seg(env, idx, 0);
5337         env->segs[idx].base = addr;
5338         break;
5339     case TARGET_ARCH_GET_GS:
5340     case TARGET_ARCH_GET_FS:
5341         if (code == TARGET_ARCH_GET_GS)
5342             idx = R_GS;
5343         else
5344             idx = R_FS;
5345         val = env->segs[idx].base;
5346         if (put_user(val, addr, abi_ulong))
5347             ret = -TARGET_EFAULT;
5348         break;
5349     default:
5350         ret = -TARGET_EINVAL;
5351         break;
5352     }
5353     return ret;
5354 }
5355 #endif
5356 
5357 #endif /* defined(TARGET_I386) */
5358 
5359 #define NEW_STACK_SIZE 0x40000
5360 
5361 
5362 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5363 typedef struct {
5364     CPUArchState *env;
5365     pthread_mutex_t mutex;
5366     pthread_cond_t cond;
5367     pthread_t thread;
5368     uint32_t tid;
5369     abi_ulong child_tidptr;
5370     abi_ulong parent_tidptr;
5371     sigset_t sigmask;
5372 } new_thread_info;
5373 
5374 static void *clone_func(void *arg)
5375 {
5376     new_thread_info *info = arg;
5377     CPUArchState *env;
5378     CPUState *cpu;
5379     TaskState *ts;
5380 
5381     rcu_register_thread();
5382     tcg_register_thread();
5383     env = info->env;
5384     cpu = ENV_GET_CPU(env);
5385     thread_cpu = cpu;
5386     ts = (TaskState *)cpu->opaque;
5387     info->tid = gettid();
5388     task_settid(ts);
5389     if (info->child_tidptr)
5390         put_user_u32(info->tid, info->child_tidptr);
5391     if (info->parent_tidptr)
5392         put_user_u32(info->tid, info->parent_tidptr);
5393     /* Enable signals.  */
5394     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5395     /* Signal to the parent that we're ready.  */
5396     pthread_mutex_lock(&info->mutex);
5397     pthread_cond_broadcast(&info->cond);
5398     pthread_mutex_unlock(&info->mutex);
5399     /* Wait until the parent has finished initializing the tls state.  */
5400     pthread_mutex_lock(&clone_lock);
5401     pthread_mutex_unlock(&clone_lock);
5402     cpu_loop(env);
5403     /* never exits */
5404     return NULL;
5405 }
5406 
5407 /* do_fork() Must return host values and target errnos (unlike most
5408    do_*() functions). */
5409 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5410                    abi_ulong parent_tidptr, target_ulong newtls,
5411                    abi_ulong child_tidptr)
5412 {
5413     CPUState *cpu = ENV_GET_CPU(env);
5414     int ret;
5415     TaskState *ts;
5416     CPUState *new_cpu;
5417     CPUArchState *new_env;
5418     sigset_t sigmask;
5419 
5420     flags &= ~CLONE_IGNORED_FLAGS;
5421 
5422     /* Emulate vfork() with fork() */
5423     if (flags & CLONE_VFORK)
5424         flags &= ~(CLONE_VFORK | CLONE_VM);
5425 
5426     if (flags & CLONE_VM) {
5427         TaskState *parent_ts = (TaskState *)cpu->opaque;
5428         new_thread_info info;
5429         pthread_attr_t attr;
5430 
5431         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5432             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5433             return -TARGET_EINVAL;
5434         }
5435 
5436         ts = g_new0(TaskState, 1);
5437         init_task_state(ts);
5438 
5439         /* Grab a mutex so that thread setup appears atomic.  */
5440         pthread_mutex_lock(&clone_lock);
5441 
5442         /* we create a new CPU instance. */
5443         new_env = cpu_copy(env);
5444         /* Init regs that differ from the parent.  */
5445         cpu_clone_regs(new_env, newsp);
5446         new_cpu = ENV_GET_CPU(new_env);
5447         new_cpu->opaque = ts;
5448         ts->bprm = parent_ts->bprm;
5449         ts->info = parent_ts->info;
5450         ts->signal_mask = parent_ts->signal_mask;
5451 
5452         if (flags & CLONE_CHILD_CLEARTID) {
5453             ts->child_tidptr = child_tidptr;
5454         }
5455 
5456         if (flags & CLONE_SETTLS) {
5457             cpu_set_tls (new_env, newtls);
5458         }
5459 
5460         memset(&info, 0, sizeof(info));
5461         pthread_mutex_init(&info.mutex, NULL);
5462         pthread_mutex_lock(&info.mutex);
5463         pthread_cond_init(&info.cond, NULL);
5464         info.env = new_env;
5465         if (flags & CLONE_CHILD_SETTID) {
5466             info.child_tidptr = child_tidptr;
5467         }
5468         if (flags & CLONE_PARENT_SETTID) {
5469             info.parent_tidptr = parent_tidptr;
5470         }
5471 
5472         ret = pthread_attr_init(&attr);
5473         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5474         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5475         /* It is not safe to deliver signals until the child has finished
5476            initializing, so temporarily block all signals.  */
5477         sigfillset(&sigmask);
5478         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5479 
5480         /* If this is our first additional thread, we need to ensure we
5481          * generate code for parallel execution and flush old translations.
5482          */
5483         if (!parallel_cpus) {
5484             parallel_cpus = true;
5485             tb_flush(cpu);
5486         }
5487 
5488         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5489         /* TODO: Free new CPU state if thread creation failed.  */
5490 
5491         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5492         pthread_attr_destroy(&attr);
5493         if (ret == 0) {
5494             /* Wait for the child to initialize.  */
5495             pthread_cond_wait(&info.cond, &info.mutex);
5496             ret = info.tid;
5497         } else {
5498             ret = -1;
5499         }
5500         pthread_mutex_unlock(&info.mutex);
5501         pthread_cond_destroy(&info.cond);
5502         pthread_mutex_destroy(&info.mutex);
5503         pthread_mutex_unlock(&clone_lock);
5504     } else {
5505         /* if no CLONE_VM, we consider it is a fork */
5506         if (flags & CLONE_INVALID_FORK_FLAGS) {
5507             return -TARGET_EINVAL;
5508         }
5509 
5510         /* We can't support custom termination signals */
5511         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5512             return -TARGET_EINVAL;
5513         }
5514 
5515         if (block_signals()) {
5516             return -TARGET_ERESTARTSYS;
5517         }
5518 
5519         fork_start();
5520         ret = fork();
5521         if (ret == 0) {
5522             /* Child Process.  */
5523             cpu_clone_regs(env, newsp);
5524             fork_end(1);
5525             /* There is a race condition here.  The parent process could
5526                theoretically read the TID in the child process before the child
5527                tid is set.  This would require using either ptrace
5528                (not implemented) or having *_tidptr to point at a shared memory
5529                mapping.  We can't repeat the spinlock hack used above because
5530                the child process gets its own copy of the lock.  */
5531             if (flags & CLONE_CHILD_SETTID)
5532                 put_user_u32(gettid(), child_tidptr);
5533             if (flags & CLONE_PARENT_SETTID)
5534                 put_user_u32(gettid(), parent_tidptr);
5535             ts = (TaskState *)cpu->opaque;
5536             if (flags & CLONE_SETTLS)
5537                 cpu_set_tls (env, newtls);
5538             if (flags & CLONE_CHILD_CLEARTID)
5539                 ts->child_tidptr = child_tidptr;
5540         } else {
5541             fork_end(0);
5542         }
5543     }
5544     return ret;
5545 }
5546 
5547 /* warning : doesn't handle linux specific flags... */
5548 static int target_to_host_fcntl_cmd(int cmd)
5549 {
5550     int ret;
5551 
5552     switch(cmd) {
5553     case TARGET_F_DUPFD:
5554     case TARGET_F_GETFD:
5555     case TARGET_F_SETFD:
5556     case TARGET_F_GETFL:
5557     case TARGET_F_SETFL:
5558         ret = cmd;
5559         break;
5560     case TARGET_F_GETLK:
5561         ret = F_GETLK64;
5562         break;
5563     case TARGET_F_SETLK:
5564         ret = F_SETLK64;
5565         break;
5566     case TARGET_F_SETLKW:
5567         ret = F_SETLKW64;
5568         break;
5569     case TARGET_F_GETOWN:
5570         ret = F_GETOWN;
5571         break;
5572     case TARGET_F_SETOWN:
5573         ret = F_SETOWN;
5574         break;
5575     case TARGET_F_GETSIG:
5576         ret = F_GETSIG;
5577         break;
5578     case TARGET_F_SETSIG:
5579         ret = F_SETSIG;
5580         break;
5581 #if TARGET_ABI_BITS == 32
5582     case TARGET_F_GETLK64:
5583         ret = F_GETLK64;
5584         break;
5585     case TARGET_F_SETLK64:
5586         ret = F_SETLK64;
5587         break;
5588     case TARGET_F_SETLKW64:
5589         ret = F_SETLKW64;
5590         break;
5591 #endif
5592     case TARGET_F_SETLEASE:
5593         ret = F_SETLEASE;
5594         break;
5595     case TARGET_F_GETLEASE:
5596         ret = F_GETLEASE;
5597         break;
5598 #ifdef F_DUPFD_CLOEXEC
5599     case TARGET_F_DUPFD_CLOEXEC:
5600         ret = F_DUPFD_CLOEXEC;
5601         break;
5602 #endif
5603     case TARGET_F_NOTIFY:
5604         ret = F_NOTIFY;
5605         break;
5606 #ifdef F_GETOWN_EX
5607     case TARGET_F_GETOWN_EX:
5608         ret = F_GETOWN_EX;
5609         break;
5610 #endif
5611 #ifdef F_SETOWN_EX
5612     case TARGET_F_SETOWN_EX:
5613         ret = F_SETOWN_EX;
5614         break;
5615 #endif
5616 #ifdef F_SETPIPE_SZ
5617     case TARGET_F_SETPIPE_SZ:
5618         ret = F_SETPIPE_SZ;
5619         break;
5620     case TARGET_F_GETPIPE_SZ:
5621         ret = F_GETPIPE_SZ;
5622         break;
5623 #endif
5624     default:
5625         ret = -TARGET_EINVAL;
5626         break;
5627     }
5628 
5629 #if defined(__powerpc64__)
5630     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5631      * is not supported by kernel. The glibc fcntl call actually adjusts
5632      * them to 5, 6 and 7 before making the syscall(). Since we make the
5633      * syscall directly, adjust to what is supported by the kernel.
5634      */
5635     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5636         ret -= F_GETLK64 - 5;
5637     }
5638 #endif
5639 
5640     return ret;
5641 }
5642 
5643 #define FLOCK_TRANSTBL \
5644     switch (type) { \
5645     TRANSTBL_CONVERT(F_RDLCK); \
5646     TRANSTBL_CONVERT(F_WRLCK); \
5647     TRANSTBL_CONVERT(F_UNLCK); \
5648     TRANSTBL_CONVERT(F_EXLCK); \
5649     TRANSTBL_CONVERT(F_SHLCK); \
5650     }
5651 
5652 static int target_to_host_flock(int type)
5653 {
5654 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5655     FLOCK_TRANSTBL
5656 #undef  TRANSTBL_CONVERT
5657     return -TARGET_EINVAL;
5658 }
5659 
5660 static int host_to_target_flock(int type)
5661 {
5662 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5663     FLOCK_TRANSTBL
5664 #undef  TRANSTBL_CONVERT
5665     /* if we don't know how to convert the value coming
5666      * from the host we copy to the target field as-is
5667      */
5668     return type;
5669 }
5670 
5671 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5672                                             abi_ulong target_flock_addr)
5673 {
5674     struct target_flock *target_fl;
5675     int l_type;
5676 
5677     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5678         return -TARGET_EFAULT;
5679     }
5680 
5681     __get_user(l_type, &target_fl->l_type);
5682     l_type = target_to_host_flock(l_type);
5683     if (l_type < 0) {
5684         return l_type;
5685     }
5686     fl->l_type = l_type;
5687     __get_user(fl->l_whence, &target_fl->l_whence);
5688     __get_user(fl->l_start, &target_fl->l_start);
5689     __get_user(fl->l_len, &target_fl->l_len);
5690     __get_user(fl->l_pid, &target_fl->l_pid);
5691     unlock_user_struct(target_fl, target_flock_addr, 0);
5692     return 0;
5693 }
5694 
5695 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5696                                           const struct flock64 *fl)
5697 {
5698     struct target_flock *target_fl;
5699     short l_type;
5700 
5701     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5702         return -TARGET_EFAULT;
5703     }
5704 
5705     l_type = host_to_target_flock(fl->l_type);
5706     __put_user(l_type, &target_fl->l_type);
5707     __put_user(fl->l_whence, &target_fl->l_whence);
5708     __put_user(fl->l_start, &target_fl->l_start);
5709     __put_user(fl->l_len, &target_fl->l_len);
5710     __put_user(fl->l_pid, &target_fl->l_pid);
5711     unlock_user_struct(target_fl, target_flock_addr, 1);
5712     return 0;
5713 }
5714 
5715 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5716 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5717 
5718 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5719 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5720                                                    abi_ulong target_flock_addr)
5721 {
5722     struct target_oabi_flock64 *target_fl;
5723     int l_type;
5724 
5725     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5726         return -TARGET_EFAULT;
5727     }
5728 
5729     __get_user(l_type, &target_fl->l_type);
5730     l_type = target_to_host_flock(l_type);
5731     if (l_type < 0) {
5732         return l_type;
5733     }
5734     fl->l_type = l_type;
5735     __get_user(fl->l_whence, &target_fl->l_whence);
5736     __get_user(fl->l_start, &target_fl->l_start);
5737     __get_user(fl->l_len, &target_fl->l_len);
5738     __get_user(fl->l_pid, &target_fl->l_pid);
5739     unlock_user_struct(target_fl, target_flock_addr, 0);
5740     return 0;
5741 }
5742 
5743 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5744                                                  const struct flock64 *fl)
5745 {
5746     struct target_oabi_flock64 *target_fl;
5747     short l_type;
5748 
5749     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5750         return -TARGET_EFAULT;
5751     }
5752 
5753     l_type = host_to_target_flock(fl->l_type);
5754     __put_user(l_type, &target_fl->l_type);
5755     __put_user(fl->l_whence, &target_fl->l_whence);
5756     __put_user(fl->l_start, &target_fl->l_start);
5757     __put_user(fl->l_len, &target_fl->l_len);
5758     __put_user(fl->l_pid, &target_fl->l_pid);
5759     unlock_user_struct(target_fl, target_flock_addr, 1);
5760     return 0;
5761 }
5762 #endif
5763 
5764 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5765                                               abi_ulong target_flock_addr)
5766 {
5767     struct target_flock64 *target_fl;
5768     int l_type;
5769 
5770     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5771         return -TARGET_EFAULT;
5772     }
5773 
5774     __get_user(l_type, &target_fl->l_type);
5775     l_type = target_to_host_flock(l_type);
5776     if (l_type < 0) {
5777         return l_type;
5778     }
5779     fl->l_type = l_type;
5780     __get_user(fl->l_whence, &target_fl->l_whence);
5781     __get_user(fl->l_start, &target_fl->l_start);
5782     __get_user(fl->l_len, &target_fl->l_len);
5783     __get_user(fl->l_pid, &target_fl->l_pid);
5784     unlock_user_struct(target_fl, target_flock_addr, 0);
5785     return 0;
5786 }
5787 
5788 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5789                                             const struct flock64 *fl)
5790 {
5791     struct target_flock64 *target_fl;
5792     short l_type;
5793 
5794     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5795         return -TARGET_EFAULT;
5796     }
5797 
5798     l_type = host_to_target_flock(fl->l_type);
5799     __put_user(l_type, &target_fl->l_type);
5800     __put_user(fl->l_whence, &target_fl->l_whence);
5801     __put_user(fl->l_start, &target_fl->l_start);
5802     __put_user(fl->l_len, &target_fl->l_len);
5803     __put_user(fl->l_pid, &target_fl->l_pid);
5804     unlock_user_struct(target_fl, target_flock_addr, 1);
5805     return 0;
5806 }
5807 
5808 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5809 {
5810     struct flock64 fl64;
5811 #ifdef F_GETOWN_EX
5812     struct f_owner_ex fox;
5813     struct target_f_owner_ex *target_fox;
5814 #endif
5815     abi_long ret;
5816     int host_cmd = target_to_host_fcntl_cmd(cmd);
5817 
5818     if (host_cmd == -TARGET_EINVAL)
5819 	    return host_cmd;
5820 
5821     switch(cmd) {
5822     case TARGET_F_GETLK:
5823         ret = copy_from_user_flock(&fl64, arg);
5824         if (ret) {
5825             return ret;
5826         }
5827         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5828         if (ret == 0) {
5829             ret = copy_to_user_flock(arg, &fl64);
5830         }
5831         break;
5832 
5833     case TARGET_F_SETLK:
5834     case TARGET_F_SETLKW:
5835         ret = copy_from_user_flock(&fl64, arg);
5836         if (ret) {
5837             return ret;
5838         }
5839         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5840         break;
5841 
5842     case TARGET_F_GETLK64:
5843         ret = copy_from_user_flock64(&fl64, arg);
5844         if (ret) {
5845             return ret;
5846         }
5847         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5848         if (ret == 0) {
5849             ret = copy_to_user_flock64(arg, &fl64);
5850         }
5851         break;
5852     case TARGET_F_SETLK64:
5853     case TARGET_F_SETLKW64:
5854         ret = copy_from_user_flock64(&fl64, arg);
5855         if (ret) {
5856             return ret;
5857         }
5858         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5859         break;
5860 
5861     case TARGET_F_GETFL:
5862         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5863         if (ret >= 0) {
5864             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5865         }
5866         break;
5867 
5868     case TARGET_F_SETFL:
5869         ret = get_errno(safe_fcntl(fd, host_cmd,
5870                                    target_to_host_bitmask(arg,
5871                                                           fcntl_flags_tbl)));
5872         break;
5873 
5874 #ifdef F_GETOWN_EX
5875     case TARGET_F_GETOWN_EX:
5876         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5877         if (ret >= 0) {
5878             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5879                 return -TARGET_EFAULT;
5880             target_fox->type = tswap32(fox.type);
5881             target_fox->pid = tswap32(fox.pid);
5882             unlock_user_struct(target_fox, arg, 1);
5883         }
5884         break;
5885 #endif
5886 
5887 #ifdef F_SETOWN_EX
5888     case TARGET_F_SETOWN_EX:
5889         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5890             return -TARGET_EFAULT;
5891         fox.type = tswap32(target_fox->type);
5892         fox.pid = tswap32(target_fox->pid);
5893         unlock_user_struct(target_fox, arg, 0);
5894         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5895         break;
5896 #endif
5897 
5898     case TARGET_F_SETOWN:
5899     case TARGET_F_GETOWN:
5900     case TARGET_F_SETSIG:
5901     case TARGET_F_GETSIG:
5902     case TARGET_F_SETLEASE:
5903     case TARGET_F_GETLEASE:
5904     case TARGET_F_SETPIPE_SZ:
5905     case TARGET_F_GETPIPE_SZ:
5906         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5907         break;
5908 
5909     default:
5910         ret = get_errno(safe_fcntl(fd, cmd, arg));
5911         break;
5912     }
5913     return ret;
5914 }
5915 
5916 #ifdef USE_UID16
5917 
5918 static inline int high2lowuid(int uid)
5919 {
5920     if (uid > 65535)
5921         return 65534;
5922     else
5923         return uid;
5924 }
5925 
5926 static inline int high2lowgid(int gid)
5927 {
5928     if (gid > 65535)
5929         return 65534;
5930     else
5931         return gid;
5932 }
5933 
5934 static inline int low2highuid(int uid)
5935 {
5936     if ((int16_t)uid == -1)
5937         return -1;
5938     else
5939         return uid;
5940 }
5941 
5942 static inline int low2highgid(int gid)
5943 {
5944     if ((int16_t)gid == -1)
5945         return -1;
5946     else
5947         return gid;
5948 }
5949 static inline int tswapid(int id)
5950 {
5951     return tswap16(id);
5952 }
5953 
5954 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5955 
5956 #else /* !USE_UID16 */
5957 static inline int high2lowuid(int uid)
5958 {
5959     return uid;
5960 }
5961 static inline int high2lowgid(int gid)
5962 {
5963     return gid;
5964 }
5965 static inline int low2highuid(int uid)
5966 {
5967     return uid;
5968 }
5969 static inline int low2highgid(int gid)
5970 {
5971     return gid;
5972 }
5973 static inline int tswapid(int id)
5974 {
5975     return tswap32(id);
5976 }
5977 
5978 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5979 
5980 #endif /* USE_UID16 */
5981 
5982 /* We must do direct syscalls for setting UID/GID, because we want to
5983  * implement the Linux system call semantics of "change only for this thread",
5984  * not the libc/POSIX semantics of "change for all threads in process".
5985  * (See http://ewontfix.com/17/ for more details.)
5986  * We use the 32-bit version of the syscalls if present; if it is not
5987  * then either the host architecture supports 32-bit UIDs natively with
5988  * the standard syscall, or the 16-bit UID is the best we can do.
5989  */
5990 #ifdef __NR_setuid32
5991 #define __NR_sys_setuid __NR_setuid32
5992 #else
5993 #define __NR_sys_setuid __NR_setuid
5994 #endif
5995 #ifdef __NR_setgid32
5996 #define __NR_sys_setgid __NR_setgid32
5997 #else
5998 #define __NR_sys_setgid __NR_setgid
5999 #endif
6000 #ifdef __NR_setresuid32
6001 #define __NR_sys_setresuid __NR_setresuid32
6002 #else
6003 #define __NR_sys_setresuid __NR_setresuid
6004 #endif
6005 #ifdef __NR_setresgid32
6006 #define __NR_sys_setresgid __NR_setresgid32
6007 #else
6008 #define __NR_sys_setresgid __NR_setresgid
6009 #endif
6010 
6011 _syscall1(int, sys_setuid, uid_t, uid)
6012 _syscall1(int, sys_setgid, gid_t, gid)
6013 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6014 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6015 
6016 void syscall_init(void)
6017 {
6018     IOCTLEntry *ie;
6019     const argtype *arg_type;
6020     int size;
6021     int i;
6022 
6023     thunk_init(STRUCT_MAX);
6024 
6025 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6026 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6027 #include "syscall_types.h"
6028 #undef STRUCT
6029 #undef STRUCT_SPECIAL
6030 
6031     /* Build target_to_host_errno_table[] table from
6032      * host_to_target_errno_table[]. */
6033     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6034         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6035     }
6036 
6037     /* we patch the ioctl size if necessary. We rely on the fact that
6038        no ioctl has all the bits at '1' in the size field */
6039     ie = ioctl_entries;
6040     while (ie->target_cmd != 0) {
6041         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6042             TARGET_IOC_SIZEMASK) {
6043             arg_type = ie->arg_type;
6044             if (arg_type[0] != TYPE_PTR) {
6045                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6046                         ie->target_cmd);
6047                 exit(1);
6048             }
6049             arg_type++;
6050             size = thunk_type_size(arg_type, 0);
6051             ie->target_cmd = (ie->target_cmd &
6052                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6053                 (size << TARGET_IOC_SIZESHIFT);
6054         }
6055 
6056         /* automatic consistency check if same arch */
6057 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6058     (defined(__x86_64__) && defined(TARGET_X86_64))
6059         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6060             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6061                     ie->name, ie->target_cmd, ie->host_cmd);
6062         }
6063 #endif
6064         ie++;
6065     }
6066 }
6067 
6068 #if TARGET_ABI_BITS == 32
6069 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6070 {
6071 #ifdef TARGET_WORDS_BIGENDIAN
6072     return ((uint64_t)word0 << 32) | word1;
6073 #else
6074     return ((uint64_t)word1 << 32) | word0;
6075 #endif
6076 }
6077 #else /* TARGET_ABI_BITS == 32 */
6078 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6079 {
6080     return word0;
6081 }
6082 #endif /* TARGET_ABI_BITS != 32 */
6083 
6084 #ifdef TARGET_NR_truncate64
6085 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6086                                          abi_long arg2,
6087                                          abi_long arg3,
6088                                          abi_long arg4)
6089 {
6090     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6091         arg2 = arg3;
6092         arg3 = arg4;
6093     }
6094     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6095 }
6096 #endif
6097 
6098 #ifdef TARGET_NR_ftruncate64
6099 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6100                                           abi_long arg2,
6101                                           abi_long arg3,
6102                                           abi_long arg4)
6103 {
6104     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6105         arg2 = arg3;
6106         arg3 = arg4;
6107     }
6108     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6109 }
6110 #endif
6111 
6112 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6113                                                abi_ulong target_addr)
6114 {
6115     struct target_timespec *target_ts;
6116 
6117     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6118         return -TARGET_EFAULT;
6119     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6120     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6121     unlock_user_struct(target_ts, target_addr, 0);
6122     return 0;
6123 }
6124 
6125 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6126                                                struct timespec *host_ts)
6127 {
6128     struct target_timespec *target_ts;
6129 
6130     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6131         return -TARGET_EFAULT;
6132     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6133     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6134     unlock_user_struct(target_ts, target_addr, 1);
6135     return 0;
6136 }
6137 
6138 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6139                                                  abi_ulong target_addr)
6140 {
6141     struct target_itimerspec *target_itspec;
6142 
6143     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6144         return -TARGET_EFAULT;
6145     }
6146 
6147     host_itspec->it_interval.tv_sec =
6148                             tswapal(target_itspec->it_interval.tv_sec);
6149     host_itspec->it_interval.tv_nsec =
6150                             tswapal(target_itspec->it_interval.tv_nsec);
6151     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6152     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6153 
6154     unlock_user_struct(target_itspec, target_addr, 1);
6155     return 0;
6156 }
6157 
6158 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6159                                                struct itimerspec *host_its)
6160 {
6161     struct target_itimerspec *target_itspec;
6162 
6163     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6164         return -TARGET_EFAULT;
6165     }
6166 
6167     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6168     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6169 
6170     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6171     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6172 
6173     unlock_user_struct(target_itspec, target_addr, 0);
6174     return 0;
6175 }
6176 
6177 static inline abi_long target_to_host_timex(struct timex *host_tx,
6178                                             abi_long target_addr)
6179 {
6180     struct target_timex *target_tx;
6181 
6182     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6183         return -TARGET_EFAULT;
6184     }
6185 
6186     __get_user(host_tx->modes, &target_tx->modes);
6187     __get_user(host_tx->offset, &target_tx->offset);
6188     __get_user(host_tx->freq, &target_tx->freq);
6189     __get_user(host_tx->maxerror, &target_tx->maxerror);
6190     __get_user(host_tx->esterror, &target_tx->esterror);
6191     __get_user(host_tx->status, &target_tx->status);
6192     __get_user(host_tx->constant, &target_tx->constant);
6193     __get_user(host_tx->precision, &target_tx->precision);
6194     __get_user(host_tx->tolerance, &target_tx->tolerance);
6195     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6196     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6197     __get_user(host_tx->tick, &target_tx->tick);
6198     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6199     __get_user(host_tx->jitter, &target_tx->jitter);
6200     __get_user(host_tx->shift, &target_tx->shift);
6201     __get_user(host_tx->stabil, &target_tx->stabil);
6202     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6203     __get_user(host_tx->calcnt, &target_tx->calcnt);
6204     __get_user(host_tx->errcnt, &target_tx->errcnt);
6205     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6206     __get_user(host_tx->tai, &target_tx->tai);
6207 
6208     unlock_user_struct(target_tx, target_addr, 0);
6209     return 0;
6210 }
6211 
6212 static inline abi_long host_to_target_timex(abi_long target_addr,
6213                                             struct timex *host_tx)
6214 {
6215     struct target_timex *target_tx;
6216 
6217     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6218         return -TARGET_EFAULT;
6219     }
6220 
6221     __put_user(host_tx->modes, &target_tx->modes);
6222     __put_user(host_tx->offset, &target_tx->offset);
6223     __put_user(host_tx->freq, &target_tx->freq);
6224     __put_user(host_tx->maxerror, &target_tx->maxerror);
6225     __put_user(host_tx->esterror, &target_tx->esterror);
6226     __put_user(host_tx->status, &target_tx->status);
6227     __put_user(host_tx->constant, &target_tx->constant);
6228     __put_user(host_tx->precision, &target_tx->precision);
6229     __put_user(host_tx->tolerance, &target_tx->tolerance);
6230     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6231     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6232     __put_user(host_tx->tick, &target_tx->tick);
6233     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6234     __put_user(host_tx->jitter, &target_tx->jitter);
6235     __put_user(host_tx->shift, &target_tx->shift);
6236     __put_user(host_tx->stabil, &target_tx->stabil);
6237     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6238     __put_user(host_tx->calcnt, &target_tx->calcnt);
6239     __put_user(host_tx->errcnt, &target_tx->errcnt);
6240     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6241     __put_user(host_tx->tai, &target_tx->tai);
6242 
6243     unlock_user_struct(target_tx, target_addr, 1);
6244     return 0;
6245 }
6246 
6247 
6248 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6249                                                abi_ulong target_addr)
6250 {
6251     struct target_sigevent *target_sevp;
6252 
6253     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6254         return -TARGET_EFAULT;
6255     }
6256 
6257     /* This union is awkward on 64 bit systems because it has a 32 bit
6258      * integer and a pointer in it; we follow the conversion approach
6259      * used for handling sigval types in signal.c so the guest should get
6260      * the correct value back even if we did a 64 bit byteswap and it's
6261      * using the 32 bit integer.
6262      */
6263     host_sevp->sigev_value.sival_ptr =
6264         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6265     host_sevp->sigev_signo =
6266         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6267     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6268     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6269 
6270     unlock_user_struct(target_sevp, target_addr, 1);
6271     return 0;
6272 }
6273 
6274 #if defined(TARGET_NR_mlockall)
6275 static inline int target_to_host_mlockall_arg(int arg)
6276 {
6277     int result = 0;
6278 
6279     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6280         result |= MCL_CURRENT;
6281     }
6282     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6283         result |= MCL_FUTURE;
6284     }
6285     return result;
6286 }
6287 #endif
6288 
6289 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6290      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6291      defined(TARGET_NR_newfstatat))
6292 static inline abi_long host_to_target_stat64(void *cpu_env,
6293                                              abi_ulong target_addr,
6294                                              struct stat *host_st)
6295 {
6296 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6297     if (((CPUARMState *)cpu_env)->eabi) {
6298         struct target_eabi_stat64 *target_st;
6299 
6300         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6301             return -TARGET_EFAULT;
6302         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6303         __put_user(host_st->st_dev, &target_st->st_dev);
6304         __put_user(host_st->st_ino, &target_st->st_ino);
6305 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6306         __put_user(host_st->st_ino, &target_st->__st_ino);
6307 #endif
6308         __put_user(host_st->st_mode, &target_st->st_mode);
6309         __put_user(host_st->st_nlink, &target_st->st_nlink);
6310         __put_user(host_st->st_uid, &target_st->st_uid);
6311         __put_user(host_st->st_gid, &target_st->st_gid);
6312         __put_user(host_st->st_rdev, &target_st->st_rdev);
6313         __put_user(host_st->st_size, &target_st->st_size);
6314         __put_user(host_st->st_blksize, &target_st->st_blksize);
6315         __put_user(host_st->st_blocks, &target_st->st_blocks);
6316         __put_user(host_st->st_atime, &target_st->target_st_atime);
6317         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6318         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6319         unlock_user_struct(target_st, target_addr, 1);
6320     } else
6321 #endif
6322     {
6323 #if defined(TARGET_HAS_STRUCT_STAT64)
6324         struct target_stat64 *target_st;
6325 #else
6326         struct target_stat *target_st;
6327 #endif
6328 
6329         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6330             return -TARGET_EFAULT;
6331         memset(target_st, 0, sizeof(*target_st));
6332         __put_user(host_st->st_dev, &target_st->st_dev);
6333         __put_user(host_st->st_ino, &target_st->st_ino);
6334 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6335         __put_user(host_st->st_ino, &target_st->__st_ino);
6336 #endif
6337         __put_user(host_st->st_mode, &target_st->st_mode);
6338         __put_user(host_st->st_nlink, &target_st->st_nlink);
6339         __put_user(host_st->st_uid, &target_st->st_uid);
6340         __put_user(host_st->st_gid, &target_st->st_gid);
6341         __put_user(host_st->st_rdev, &target_st->st_rdev);
6342         /* XXX: better use of kernel struct */
6343         __put_user(host_st->st_size, &target_st->st_size);
6344         __put_user(host_st->st_blksize, &target_st->st_blksize);
6345         __put_user(host_st->st_blocks, &target_st->st_blocks);
6346         __put_user(host_st->st_atime, &target_st->target_st_atime);
6347         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6348         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6349         unlock_user_struct(target_st, target_addr, 1);
6350     }
6351 
6352     return 0;
6353 }
6354 #endif
6355 
6356 /* ??? Using host futex calls even when target atomic operations
6357    are not really atomic probably breaks things.  However implementing
6358    futexes locally would make futexes shared between multiple processes
6359    tricky.  However they're probably useless because guest atomic
6360    operations won't work either.  */
6361 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6362                     target_ulong uaddr2, int val3)
6363 {
6364     struct timespec ts, *pts;
6365     int base_op;
6366 
6367     /* ??? We assume FUTEX_* constants are the same on both host
6368        and target.  */
6369 #ifdef FUTEX_CMD_MASK
6370     base_op = op & FUTEX_CMD_MASK;
6371 #else
6372     base_op = op;
6373 #endif
6374     switch (base_op) {
6375     case FUTEX_WAIT:
6376     case FUTEX_WAIT_BITSET:
6377         if (timeout) {
6378             pts = &ts;
6379             target_to_host_timespec(pts, timeout);
6380         } else {
6381             pts = NULL;
6382         }
6383         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6384                          pts, NULL, val3));
6385     case FUTEX_WAKE:
6386         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6387     case FUTEX_FD:
6388         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6389     case FUTEX_REQUEUE:
6390     case FUTEX_CMP_REQUEUE:
6391     case FUTEX_WAKE_OP:
6392         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6393            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6394            But the prototype takes a `struct timespec *'; insert casts
6395            to satisfy the compiler.  We do not need to tswap TIMEOUT
6396            since it's not compared to guest memory.  */
6397         pts = (struct timespec *)(uintptr_t) timeout;
6398         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6399                                     g2h(uaddr2),
6400                                     (base_op == FUTEX_CMP_REQUEUE
6401                                      ? tswap32(val3)
6402                                      : val3)));
6403     default:
6404         return -TARGET_ENOSYS;
6405     }
6406 }
6407 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6408 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6409                                      abi_long handle, abi_long mount_id,
6410                                      abi_long flags)
6411 {
6412     struct file_handle *target_fh;
6413     struct file_handle *fh;
6414     int mid = 0;
6415     abi_long ret;
6416     char *name;
6417     unsigned int size, total_size;
6418 
6419     if (get_user_s32(size, handle)) {
6420         return -TARGET_EFAULT;
6421     }
6422 
6423     name = lock_user_string(pathname);
6424     if (!name) {
6425         return -TARGET_EFAULT;
6426     }
6427 
6428     total_size = sizeof(struct file_handle) + size;
6429     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6430     if (!target_fh) {
6431         unlock_user(name, pathname, 0);
6432         return -TARGET_EFAULT;
6433     }
6434 
6435     fh = g_malloc0(total_size);
6436     fh->handle_bytes = size;
6437 
6438     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6439     unlock_user(name, pathname, 0);
6440 
6441     /* man name_to_handle_at(2):
6442      * Other than the use of the handle_bytes field, the caller should treat
6443      * the file_handle structure as an opaque data type
6444      */
6445 
6446     memcpy(target_fh, fh, total_size);
6447     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6448     target_fh->handle_type = tswap32(fh->handle_type);
6449     g_free(fh);
6450     unlock_user(target_fh, handle, total_size);
6451 
6452     if (put_user_s32(mid, mount_id)) {
6453         return -TARGET_EFAULT;
6454     }
6455 
6456     return ret;
6457 
6458 }
6459 #endif
6460 
6461 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6462 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6463                                      abi_long flags)
6464 {
6465     struct file_handle *target_fh;
6466     struct file_handle *fh;
6467     unsigned int size, total_size;
6468     abi_long ret;
6469 
6470     if (get_user_s32(size, handle)) {
6471         return -TARGET_EFAULT;
6472     }
6473 
6474     total_size = sizeof(struct file_handle) + size;
6475     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6476     if (!target_fh) {
6477         return -TARGET_EFAULT;
6478     }
6479 
6480     fh = g_memdup(target_fh, total_size);
6481     fh->handle_bytes = size;
6482     fh->handle_type = tswap32(target_fh->handle_type);
6483 
6484     ret = get_errno(open_by_handle_at(mount_fd, fh,
6485                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6486 
6487     g_free(fh);
6488 
6489     unlock_user(target_fh, handle, total_size);
6490 
6491     return ret;
6492 }
6493 #endif
6494 
6495 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6496 
6497 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6498 {
6499     int host_flags;
6500     target_sigset_t *target_mask;
6501     sigset_t host_mask;
6502     abi_long ret;
6503 
6504     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6505         return -TARGET_EINVAL;
6506     }
6507     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6508         return -TARGET_EFAULT;
6509     }
6510 
6511     target_to_host_sigset(&host_mask, target_mask);
6512 
6513     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6514 
6515     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6516     if (ret >= 0) {
6517         fd_trans_register(ret, &target_signalfd_trans);
6518     }
6519 
6520     unlock_user_struct(target_mask, mask, 0);
6521 
6522     return ret;
6523 }
6524 #endif
6525 
6526 /* Map host to target signal numbers for the wait family of syscalls.
6527    Assume all other status bits are the same.  */
6528 int host_to_target_waitstatus(int status)
6529 {
6530     if (WIFSIGNALED(status)) {
6531         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6532     }
6533     if (WIFSTOPPED(status)) {
6534         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6535                | (status & 0xff);
6536     }
6537     return status;
6538 }
6539 
6540 static int open_self_cmdline(void *cpu_env, int fd)
6541 {
6542     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6543     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6544     int i;
6545 
6546     for (i = 0; i < bprm->argc; i++) {
6547         size_t len = strlen(bprm->argv[i]) + 1;
6548 
6549         if (write(fd, bprm->argv[i], len) != len) {
6550             return -1;
6551         }
6552     }
6553 
6554     return 0;
6555 }
6556 
6557 static int open_self_maps(void *cpu_env, int fd)
6558 {
6559     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6560     TaskState *ts = cpu->opaque;
6561     FILE *fp;
6562     char *line = NULL;
6563     size_t len = 0;
6564     ssize_t read;
6565 
6566     fp = fopen("/proc/self/maps", "r");
6567     if (fp == NULL) {
6568         return -1;
6569     }
6570 
6571     while ((read = getline(&line, &len, fp)) != -1) {
6572         int fields, dev_maj, dev_min, inode;
6573         uint64_t min, max, offset;
6574         char flag_r, flag_w, flag_x, flag_p;
6575         char path[512] = "";
6576         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6577                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6578                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6579 
6580         if ((fields < 10) || (fields > 11)) {
6581             continue;
6582         }
6583         if (h2g_valid(min)) {
6584             int flags = page_get_flags(h2g(min));
6585             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6586             if (page_check_range(h2g(min), max - min, flags) == -1) {
6587                 continue;
6588             }
6589             if (h2g(min) == ts->info->stack_limit) {
6590                 pstrcpy(path, sizeof(path), "      [stack]");
6591             }
6592             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6593                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6594                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6595                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6596                     path[0] ? "         " : "", path);
6597         }
6598     }
6599 
6600     free(line);
6601     fclose(fp);
6602 
6603     return 0;
6604 }
6605 
6606 static int open_self_stat(void *cpu_env, int fd)
6607 {
6608     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6609     TaskState *ts = cpu->opaque;
6610     abi_ulong start_stack = ts->info->start_stack;
6611     int i;
6612 
6613     for (i = 0; i < 44; i++) {
6614       char buf[128];
6615       int len;
6616       uint64_t val = 0;
6617 
6618       if (i == 0) {
6619         /* pid */
6620         val = getpid();
6621         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6622       } else if (i == 1) {
6623         /* app name */
6624         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6625       } else if (i == 27) {
6626         /* stack bottom */
6627         val = start_stack;
6628         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6629       } else {
6630         /* for the rest, there is MasterCard */
6631         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6632       }
6633 
6634       len = strlen(buf);
6635       if (write(fd, buf, len) != len) {
6636           return -1;
6637       }
6638     }
6639 
6640     return 0;
6641 }
6642 
6643 static int open_self_auxv(void *cpu_env, int fd)
6644 {
6645     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6646     TaskState *ts = cpu->opaque;
6647     abi_ulong auxv = ts->info->saved_auxv;
6648     abi_ulong len = ts->info->auxv_len;
6649     char *ptr;
6650 
6651     /*
6652      * Auxiliary vector is stored in target process stack.
6653      * read in whole auxv vector and copy it to file
6654      */
6655     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6656     if (ptr != NULL) {
6657         while (len > 0) {
6658             ssize_t r;
6659             r = write(fd, ptr, len);
6660             if (r <= 0) {
6661                 break;
6662             }
6663             len -= r;
6664             ptr += r;
6665         }
6666         lseek(fd, 0, SEEK_SET);
6667         unlock_user(ptr, auxv, len);
6668     }
6669 
6670     return 0;
6671 }
6672 
6673 static int is_proc_myself(const char *filename, const char *entry)
6674 {
6675     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6676         filename += strlen("/proc/");
6677         if (!strncmp(filename, "self/", strlen("self/"))) {
6678             filename += strlen("self/");
6679         } else if (*filename >= '1' && *filename <= '9') {
6680             char myself[80];
6681             snprintf(myself, sizeof(myself), "%d/", getpid());
6682             if (!strncmp(filename, myself, strlen(myself))) {
6683                 filename += strlen(myself);
6684             } else {
6685                 return 0;
6686             }
6687         } else {
6688             return 0;
6689         }
6690         if (!strcmp(filename, entry)) {
6691             return 1;
6692         }
6693     }
6694     return 0;
6695 }
6696 
6697 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6698 static int is_proc(const char *filename, const char *entry)
6699 {
6700     return strcmp(filename, entry) == 0;
6701 }
6702 
6703 static int open_net_route(void *cpu_env, int fd)
6704 {
6705     FILE *fp;
6706     char *line = NULL;
6707     size_t len = 0;
6708     ssize_t read;
6709 
6710     fp = fopen("/proc/net/route", "r");
6711     if (fp == NULL) {
6712         return -1;
6713     }
6714 
6715     /* read header */
6716 
6717     read = getline(&line, &len, fp);
6718     dprintf(fd, "%s", line);
6719 
6720     /* read routes */
6721 
6722     while ((read = getline(&line, &len, fp)) != -1) {
6723         char iface[16];
6724         uint32_t dest, gw, mask;
6725         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6726         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6727                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6728                      &mask, &mtu, &window, &irtt);
6729         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6730                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6731                 metric, tswap32(mask), mtu, window, irtt);
6732     }
6733 
6734     free(line);
6735     fclose(fp);
6736 
6737     return 0;
6738 }
6739 #endif
6740 
6741 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6742 {
6743     struct fake_open {
6744         const char *filename;
6745         int (*fill)(void *cpu_env, int fd);
6746         int (*cmp)(const char *s1, const char *s2);
6747     };
6748     const struct fake_open *fake_open;
6749     static const struct fake_open fakes[] = {
6750         { "maps", open_self_maps, is_proc_myself },
6751         { "stat", open_self_stat, is_proc_myself },
6752         { "auxv", open_self_auxv, is_proc_myself },
6753         { "cmdline", open_self_cmdline, is_proc_myself },
6754 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6755         { "/proc/net/route", open_net_route, is_proc },
6756 #endif
6757         { NULL, NULL, NULL }
6758     };
6759 
6760     if (is_proc_myself(pathname, "exe")) {
6761         int execfd = qemu_getauxval(AT_EXECFD);
6762         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6763     }
6764 
6765     for (fake_open = fakes; fake_open->filename; fake_open++) {
6766         if (fake_open->cmp(pathname, fake_open->filename)) {
6767             break;
6768         }
6769     }
6770 
6771     if (fake_open->filename) {
6772         const char *tmpdir;
6773         char filename[PATH_MAX];
6774         int fd, r;
6775 
6776         /* create temporary file to map stat to */
6777         tmpdir = getenv("TMPDIR");
6778         if (!tmpdir)
6779             tmpdir = "/tmp";
6780         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6781         fd = mkstemp(filename);
6782         if (fd < 0) {
6783             return fd;
6784         }
6785         unlink(filename);
6786 
6787         if ((r = fake_open->fill(cpu_env, fd))) {
6788             int e = errno;
6789             close(fd);
6790             errno = e;
6791             return r;
6792         }
6793         lseek(fd, 0, SEEK_SET);
6794 
6795         return fd;
6796     }
6797 
6798     return safe_openat(dirfd, path(pathname), flags, mode);
6799 }
6800 
6801 #define TIMER_MAGIC 0x0caf0000
6802 #define TIMER_MAGIC_MASK 0xffff0000
6803 
6804 /* Convert QEMU provided timer ID back to internal 16bit index format */
6805 static target_timer_t get_timer_id(abi_long arg)
6806 {
6807     target_timer_t timerid = arg;
6808 
6809     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6810         return -TARGET_EINVAL;
6811     }
6812 
6813     timerid &= 0xffff;
6814 
6815     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6816         return -TARGET_EINVAL;
6817     }
6818 
6819     return timerid;
6820 }
6821 
6822 static int target_to_host_cpu_mask(unsigned long *host_mask,
6823                                    size_t host_size,
6824                                    abi_ulong target_addr,
6825                                    size_t target_size)
6826 {
6827     unsigned target_bits = sizeof(abi_ulong) * 8;
6828     unsigned host_bits = sizeof(*host_mask) * 8;
6829     abi_ulong *target_mask;
6830     unsigned i, j;
6831 
6832     assert(host_size >= target_size);
6833 
6834     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6835     if (!target_mask) {
6836         return -TARGET_EFAULT;
6837     }
6838     memset(host_mask, 0, host_size);
6839 
6840     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6841         unsigned bit = i * target_bits;
6842         abi_ulong val;
6843 
6844         __get_user(val, &target_mask[i]);
6845         for (j = 0; j < target_bits; j++, bit++) {
6846             if (val & (1UL << j)) {
6847                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6848             }
6849         }
6850     }
6851 
6852     unlock_user(target_mask, target_addr, 0);
6853     return 0;
6854 }
6855 
6856 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6857                                    size_t host_size,
6858                                    abi_ulong target_addr,
6859                                    size_t target_size)
6860 {
6861     unsigned target_bits = sizeof(abi_ulong) * 8;
6862     unsigned host_bits = sizeof(*host_mask) * 8;
6863     abi_ulong *target_mask;
6864     unsigned i, j;
6865 
6866     assert(host_size >= target_size);
6867 
6868     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6869     if (!target_mask) {
6870         return -TARGET_EFAULT;
6871     }
6872 
6873     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6874         unsigned bit = i * target_bits;
6875         abi_ulong val = 0;
6876 
6877         for (j = 0; j < target_bits; j++, bit++) {
6878             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6879                 val |= 1UL << j;
6880             }
6881         }
6882         __put_user(val, &target_mask[i]);
6883     }
6884 
6885     unlock_user(target_mask, target_addr, target_size);
6886     return 0;
6887 }
6888 
6889 /* This is an internal helper for do_syscall so that it is easier
6890  * to have a single return point, so that actions, such as logging
6891  * of syscall results, can be performed.
6892  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6893  */
6894 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6895                             abi_long arg2, abi_long arg3, abi_long arg4,
6896                             abi_long arg5, abi_long arg6, abi_long arg7,
6897                             abi_long arg8)
6898 {
6899     CPUState *cpu = ENV_GET_CPU(cpu_env);
6900     abi_long ret;
6901 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6902     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6903     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6904     struct stat st;
6905 #endif
6906 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6907     || defined(TARGET_NR_fstatfs)
6908     struct statfs stfs;
6909 #endif
6910     void *p;
6911 
6912     switch(num) {
6913     case TARGET_NR_exit:
6914         /* In old applications this may be used to implement _exit(2).
6915            However in threaded applictions it is used for thread termination,
6916            and _exit_group is used for application termination.
6917            Do thread termination if we have more then one thread.  */
6918 
6919         if (block_signals()) {
6920             return -TARGET_ERESTARTSYS;
6921         }
6922 
6923         cpu_list_lock();
6924 
6925         if (CPU_NEXT(first_cpu)) {
6926             TaskState *ts;
6927 
6928             /* Remove the CPU from the list.  */
6929             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6930 
6931             cpu_list_unlock();
6932 
6933             ts = cpu->opaque;
6934             if (ts->child_tidptr) {
6935                 put_user_u32(0, ts->child_tidptr);
6936                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6937                           NULL, NULL, 0);
6938             }
6939             thread_cpu = NULL;
6940             object_unref(OBJECT(cpu));
6941             g_free(ts);
6942             rcu_unregister_thread();
6943             pthread_exit(NULL);
6944         }
6945 
6946         cpu_list_unlock();
6947         preexit_cleanup(cpu_env, arg1);
6948         _exit(arg1);
6949         return 0; /* avoid warning */
6950     case TARGET_NR_read:
6951         if (arg3 == 0) {
6952             return 0;
6953         } else {
6954             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6955                 return -TARGET_EFAULT;
6956             ret = get_errno(safe_read(arg1, p, arg3));
6957             if (ret >= 0 &&
6958                 fd_trans_host_to_target_data(arg1)) {
6959                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6960             }
6961             unlock_user(p, arg2, ret);
6962         }
6963         return ret;
6964     case TARGET_NR_write:
6965         if (arg2 == 0 && arg3 == 0) {
6966             return get_errno(safe_write(arg1, 0, 0));
6967         }
6968         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6969             return -TARGET_EFAULT;
6970         if (fd_trans_target_to_host_data(arg1)) {
6971             void *copy = g_malloc(arg3);
6972             memcpy(copy, p, arg3);
6973             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
6974             if (ret >= 0) {
6975                 ret = get_errno(safe_write(arg1, copy, ret));
6976             }
6977             g_free(copy);
6978         } else {
6979             ret = get_errno(safe_write(arg1, p, arg3));
6980         }
6981         unlock_user(p, arg2, 0);
6982         return ret;
6983 
6984 #ifdef TARGET_NR_open
6985     case TARGET_NR_open:
6986         if (!(p = lock_user_string(arg1)))
6987             return -TARGET_EFAULT;
6988         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6989                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
6990                                   arg3));
6991         fd_trans_unregister(ret);
6992         unlock_user(p, arg1, 0);
6993         return ret;
6994 #endif
6995     case TARGET_NR_openat:
6996         if (!(p = lock_user_string(arg2)))
6997             return -TARGET_EFAULT;
6998         ret = get_errno(do_openat(cpu_env, arg1, p,
6999                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7000                                   arg4));
7001         fd_trans_unregister(ret);
7002         unlock_user(p, arg2, 0);
7003         return ret;
7004 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7005     case TARGET_NR_name_to_handle_at:
7006         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7007         return ret;
7008 #endif
7009 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7010     case TARGET_NR_open_by_handle_at:
7011         ret = do_open_by_handle_at(arg1, arg2, arg3);
7012         fd_trans_unregister(ret);
7013         return ret;
7014 #endif
7015     case TARGET_NR_close:
7016         fd_trans_unregister(arg1);
7017         return get_errno(close(arg1));
7018 
7019     case TARGET_NR_brk:
7020         return do_brk(arg1);
7021 #ifdef TARGET_NR_fork
7022     case TARGET_NR_fork:
7023         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7024 #endif
7025 #ifdef TARGET_NR_waitpid
7026     case TARGET_NR_waitpid:
7027         {
7028             int status;
7029             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7030             if (!is_error(ret) && arg2 && ret
7031                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7032                 return -TARGET_EFAULT;
7033         }
7034         return ret;
7035 #endif
7036 #ifdef TARGET_NR_waitid
7037     case TARGET_NR_waitid:
7038         {
7039             siginfo_t info;
7040             info.si_pid = 0;
7041             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7042             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7043                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7044                     return -TARGET_EFAULT;
7045                 host_to_target_siginfo(p, &info);
7046                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7047             }
7048         }
7049         return ret;
7050 #endif
7051 #ifdef TARGET_NR_creat /* not on alpha */
7052     case TARGET_NR_creat:
7053         if (!(p = lock_user_string(arg1)))
7054             return -TARGET_EFAULT;
7055         ret = get_errno(creat(p, arg2));
7056         fd_trans_unregister(ret);
7057         unlock_user(p, arg1, 0);
7058         return ret;
7059 #endif
7060 #ifdef TARGET_NR_link
7061     case TARGET_NR_link:
7062         {
7063             void * p2;
7064             p = lock_user_string(arg1);
7065             p2 = lock_user_string(arg2);
7066             if (!p || !p2)
7067                 ret = -TARGET_EFAULT;
7068             else
7069                 ret = get_errno(link(p, p2));
7070             unlock_user(p2, arg2, 0);
7071             unlock_user(p, arg1, 0);
7072         }
7073         return ret;
7074 #endif
7075 #if defined(TARGET_NR_linkat)
7076     case TARGET_NR_linkat:
7077         {
7078             void * p2 = NULL;
7079             if (!arg2 || !arg4)
7080                 return -TARGET_EFAULT;
7081             p  = lock_user_string(arg2);
7082             p2 = lock_user_string(arg4);
7083             if (!p || !p2)
7084                 ret = -TARGET_EFAULT;
7085             else
7086                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7087             unlock_user(p, arg2, 0);
7088             unlock_user(p2, arg4, 0);
7089         }
7090         return ret;
7091 #endif
7092 #ifdef TARGET_NR_unlink
7093     case TARGET_NR_unlink:
7094         if (!(p = lock_user_string(arg1)))
7095             return -TARGET_EFAULT;
7096         ret = get_errno(unlink(p));
7097         unlock_user(p, arg1, 0);
7098         return ret;
7099 #endif
7100 #if defined(TARGET_NR_unlinkat)
7101     case TARGET_NR_unlinkat:
7102         if (!(p = lock_user_string(arg2)))
7103             return -TARGET_EFAULT;
7104         ret = get_errno(unlinkat(arg1, p, arg3));
7105         unlock_user(p, arg2, 0);
7106         return ret;
7107 #endif
7108     case TARGET_NR_execve:
7109         {
7110             char **argp, **envp;
7111             int argc, envc;
7112             abi_ulong gp;
7113             abi_ulong guest_argp;
7114             abi_ulong guest_envp;
7115             abi_ulong addr;
7116             char **q;
7117             int total_size = 0;
7118 
7119             argc = 0;
7120             guest_argp = arg2;
7121             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7122                 if (get_user_ual(addr, gp))
7123                     return -TARGET_EFAULT;
7124                 if (!addr)
7125                     break;
7126                 argc++;
7127             }
7128             envc = 0;
7129             guest_envp = arg3;
7130             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7131                 if (get_user_ual(addr, gp))
7132                     return -TARGET_EFAULT;
7133                 if (!addr)
7134                     break;
7135                 envc++;
7136             }
7137 
7138             argp = g_new0(char *, argc + 1);
7139             envp = g_new0(char *, envc + 1);
7140 
7141             for (gp = guest_argp, q = argp; gp;
7142                   gp += sizeof(abi_ulong), q++) {
7143                 if (get_user_ual(addr, gp))
7144                     goto execve_efault;
7145                 if (!addr)
7146                     break;
7147                 if (!(*q = lock_user_string(addr)))
7148                     goto execve_efault;
7149                 total_size += strlen(*q) + 1;
7150             }
7151             *q = NULL;
7152 
7153             for (gp = guest_envp, q = envp; gp;
7154                   gp += sizeof(abi_ulong), q++) {
7155                 if (get_user_ual(addr, gp))
7156                     goto execve_efault;
7157                 if (!addr)
7158                     break;
7159                 if (!(*q = lock_user_string(addr)))
7160                     goto execve_efault;
7161                 total_size += strlen(*q) + 1;
7162             }
7163             *q = NULL;
7164 
7165             if (!(p = lock_user_string(arg1)))
7166                 goto execve_efault;
7167             /* Although execve() is not an interruptible syscall it is
7168              * a special case where we must use the safe_syscall wrapper:
7169              * if we allow a signal to happen before we make the host
7170              * syscall then we will 'lose' it, because at the point of
7171              * execve the process leaves QEMU's control. So we use the
7172              * safe syscall wrapper to ensure that we either take the
7173              * signal as a guest signal, or else it does not happen
7174              * before the execve completes and makes it the other
7175              * program's problem.
7176              */
7177             ret = get_errno(safe_execve(p, argp, envp));
7178             unlock_user(p, arg1, 0);
7179 
7180             goto execve_end;
7181 
7182         execve_efault:
7183             ret = -TARGET_EFAULT;
7184 
7185         execve_end:
7186             for (gp = guest_argp, q = argp; *q;
7187                   gp += sizeof(abi_ulong), q++) {
7188                 if (get_user_ual(addr, gp)
7189                     || !addr)
7190                     break;
7191                 unlock_user(*q, addr, 0);
7192             }
7193             for (gp = guest_envp, q = envp; *q;
7194                   gp += sizeof(abi_ulong), q++) {
7195                 if (get_user_ual(addr, gp)
7196                     || !addr)
7197                     break;
7198                 unlock_user(*q, addr, 0);
7199             }
7200 
7201             g_free(argp);
7202             g_free(envp);
7203         }
7204         return ret;
7205     case TARGET_NR_chdir:
7206         if (!(p = lock_user_string(arg1)))
7207             return -TARGET_EFAULT;
7208         ret = get_errno(chdir(p));
7209         unlock_user(p, arg1, 0);
7210         return ret;
7211 #ifdef TARGET_NR_time
7212     case TARGET_NR_time:
7213         {
7214             time_t host_time;
7215             ret = get_errno(time(&host_time));
7216             if (!is_error(ret)
7217                 && arg1
7218                 && put_user_sal(host_time, arg1))
7219                 return -TARGET_EFAULT;
7220         }
7221         return ret;
7222 #endif
7223 #ifdef TARGET_NR_mknod
7224     case TARGET_NR_mknod:
7225         if (!(p = lock_user_string(arg1)))
7226             return -TARGET_EFAULT;
7227         ret = get_errno(mknod(p, arg2, arg3));
7228         unlock_user(p, arg1, 0);
7229         return ret;
7230 #endif
7231 #if defined(TARGET_NR_mknodat)
7232     case TARGET_NR_mknodat:
7233         if (!(p = lock_user_string(arg2)))
7234             return -TARGET_EFAULT;
7235         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7236         unlock_user(p, arg2, 0);
7237         return ret;
7238 #endif
7239 #ifdef TARGET_NR_chmod
7240     case TARGET_NR_chmod:
7241         if (!(p = lock_user_string(arg1)))
7242             return -TARGET_EFAULT;
7243         ret = get_errno(chmod(p, arg2));
7244         unlock_user(p, arg1, 0);
7245         return ret;
7246 #endif
7247 #ifdef TARGET_NR_lseek
7248     case TARGET_NR_lseek:
7249         return get_errno(lseek(arg1, arg2, arg3));
7250 #endif
7251 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7252     /* Alpha specific */
7253     case TARGET_NR_getxpid:
7254         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7255         return get_errno(getpid());
7256 #endif
7257 #ifdef TARGET_NR_getpid
7258     case TARGET_NR_getpid:
7259         return get_errno(getpid());
7260 #endif
7261     case TARGET_NR_mount:
7262         {
7263             /* need to look at the data field */
7264             void *p2, *p3;
7265 
7266             if (arg1) {
7267                 p = lock_user_string(arg1);
7268                 if (!p) {
7269                     return -TARGET_EFAULT;
7270                 }
7271             } else {
7272                 p = NULL;
7273             }
7274 
7275             p2 = lock_user_string(arg2);
7276             if (!p2) {
7277                 if (arg1) {
7278                     unlock_user(p, arg1, 0);
7279                 }
7280                 return -TARGET_EFAULT;
7281             }
7282 
7283             if (arg3) {
7284                 p3 = lock_user_string(arg3);
7285                 if (!p3) {
7286                     if (arg1) {
7287                         unlock_user(p, arg1, 0);
7288                     }
7289                     unlock_user(p2, arg2, 0);
7290                     return -TARGET_EFAULT;
7291                 }
7292             } else {
7293                 p3 = NULL;
7294             }
7295 
7296             /* FIXME - arg5 should be locked, but it isn't clear how to
7297              * do that since it's not guaranteed to be a NULL-terminated
7298              * string.
7299              */
7300             if (!arg5) {
7301                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7302             } else {
7303                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7304             }
7305             ret = get_errno(ret);
7306 
7307             if (arg1) {
7308                 unlock_user(p, arg1, 0);
7309             }
7310             unlock_user(p2, arg2, 0);
7311             if (arg3) {
7312                 unlock_user(p3, arg3, 0);
7313             }
7314         }
7315         return ret;
7316 #ifdef TARGET_NR_umount
7317     case TARGET_NR_umount:
7318         if (!(p = lock_user_string(arg1)))
7319             return -TARGET_EFAULT;
7320         ret = get_errno(umount(p));
7321         unlock_user(p, arg1, 0);
7322         return ret;
7323 #endif
7324 #ifdef TARGET_NR_stime /* not on alpha */
7325     case TARGET_NR_stime:
7326         {
7327             time_t host_time;
7328             if (get_user_sal(host_time, arg1))
7329                 return -TARGET_EFAULT;
7330             return get_errno(stime(&host_time));
7331         }
7332 #endif
7333 #ifdef TARGET_NR_alarm /* not on alpha */
7334     case TARGET_NR_alarm:
7335         return alarm(arg1);
7336 #endif
7337 #ifdef TARGET_NR_pause /* not on alpha */
7338     case TARGET_NR_pause:
7339         if (!block_signals()) {
7340             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7341         }
7342         return -TARGET_EINTR;
7343 #endif
7344 #ifdef TARGET_NR_utime
7345     case TARGET_NR_utime:
7346         {
7347             struct utimbuf tbuf, *host_tbuf;
7348             struct target_utimbuf *target_tbuf;
7349             if (arg2) {
7350                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7351                     return -TARGET_EFAULT;
7352                 tbuf.actime = tswapal(target_tbuf->actime);
7353                 tbuf.modtime = tswapal(target_tbuf->modtime);
7354                 unlock_user_struct(target_tbuf, arg2, 0);
7355                 host_tbuf = &tbuf;
7356             } else {
7357                 host_tbuf = NULL;
7358             }
7359             if (!(p = lock_user_string(arg1)))
7360                 return -TARGET_EFAULT;
7361             ret = get_errno(utime(p, host_tbuf));
7362             unlock_user(p, arg1, 0);
7363         }
7364         return ret;
7365 #endif
7366 #ifdef TARGET_NR_utimes
7367     case TARGET_NR_utimes:
7368         {
7369             struct timeval *tvp, tv[2];
7370             if (arg2) {
7371                 if (copy_from_user_timeval(&tv[0], arg2)
7372                     || copy_from_user_timeval(&tv[1],
7373                                               arg2 + sizeof(struct target_timeval)))
7374                     return -TARGET_EFAULT;
7375                 tvp = tv;
7376             } else {
7377                 tvp = NULL;
7378             }
7379             if (!(p = lock_user_string(arg1)))
7380                 return -TARGET_EFAULT;
7381             ret = get_errno(utimes(p, tvp));
7382             unlock_user(p, arg1, 0);
7383         }
7384         return ret;
7385 #endif
7386 #if defined(TARGET_NR_futimesat)
7387     case TARGET_NR_futimesat:
7388         {
7389             struct timeval *tvp, tv[2];
7390             if (arg3) {
7391                 if (copy_from_user_timeval(&tv[0], arg3)
7392                     || copy_from_user_timeval(&tv[1],
7393                                               arg3 + sizeof(struct target_timeval)))
7394                     return -TARGET_EFAULT;
7395                 tvp = tv;
7396             } else {
7397                 tvp = NULL;
7398             }
7399             if (!(p = lock_user_string(arg2))) {
7400                 return -TARGET_EFAULT;
7401             }
7402             ret = get_errno(futimesat(arg1, path(p), tvp));
7403             unlock_user(p, arg2, 0);
7404         }
7405         return ret;
7406 #endif
7407 #ifdef TARGET_NR_access
7408     case TARGET_NR_access:
7409         if (!(p = lock_user_string(arg1))) {
7410             return -TARGET_EFAULT;
7411         }
7412         ret = get_errno(access(path(p), arg2));
7413         unlock_user(p, arg1, 0);
7414         return ret;
7415 #endif
7416 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7417     case TARGET_NR_faccessat:
7418         if (!(p = lock_user_string(arg2))) {
7419             return -TARGET_EFAULT;
7420         }
7421         ret = get_errno(faccessat(arg1, p, arg3, 0));
7422         unlock_user(p, arg2, 0);
7423         return ret;
7424 #endif
7425 #ifdef TARGET_NR_nice /* not on alpha */
7426     case TARGET_NR_nice:
7427         return get_errno(nice(arg1));
7428 #endif
7429     case TARGET_NR_sync:
7430         sync();
7431         return 0;
7432 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7433     case TARGET_NR_syncfs:
7434         return get_errno(syncfs(arg1));
7435 #endif
7436     case TARGET_NR_kill:
7437         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7438 #ifdef TARGET_NR_rename
7439     case TARGET_NR_rename:
7440         {
7441             void *p2;
7442             p = lock_user_string(arg1);
7443             p2 = lock_user_string(arg2);
7444             if (!p || !p2)
7445                 ret = -TARGET_EFAULT;
7446             else
7447                 ret = get_errno(rename(p, p2));
7448             unlock_user(p2, arg2, 0);
7449             unlock_user(p, arg1, 0);
7450         }
7451         return ret;
7452 #endif
7453 #if defined(TARGET_NR_renameat)
7454     case TARGET_NR_renameat:
7455         {
7456             void *p2;
7457             p  = lock_user_string(arg2);
7458             p2 = lock_user_string(arg4);
7459             if (!p || !p2)
7460                 ret = -TARGET_EFAULT;
7461             else
7462                 ret = get_errno(renameat(arg1, p, arg3, p2));
7463             unlock_user(p2, arg4, 0);
7464             unlock_user(p, arg2, 0);
7465         }
7466         return ret;
7467 #endif
7468 #if defined(TARGET_NR_renameat2)
7469     case TARGET_NR_renameat2:
7470         {
7471             void *p2;
7472             p  = lock_user_string(arg2);
7473             p2 = lock_user_string(arg4);
7474             if (!p || !p2) {
7475                 ret = -TARGET_EFAULT;
7476             } else {
7477                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7478             }
7479             unlock_user(p2, arg4, 0);
7480             unlock_user(p, arg2, 0);
7481         }
7482         return ret;
7483 #endif
7484 #ifdef TARGET_NR_mkdir
7485     case TARGET_NR_mkdir:
7486         if (!(p = lock_user_string(arg1)))
7487             return -TARGET_EFAULT;
7488         ret = get_errno(mkdir(p, arg2));
7489         unlock_user(p, arg1, 0);
7490         return ret;
7491 #endif
7492 #if defined(TARGET_NR_mkdirat)
7493     case TARGET_NR_mkdirat:
7494         if (!(p = lock_user_string(arg2)))
7495             return -TARGET_EFAULT;
7496         ret = get_errno(mkdirat(arg1, p, arg3));
7497         unlock_user(p, arg2, 0);
7498         return ret;
7499 #endif
7500 #ifdef TARGET_NR_rmdir
7501     case TARGET_NR_rmdir:
7502         if (!(p = lock_user_string(arg1)))
7503             return -TARGET_EFAULT;
7504         ret = get_errno(rmdir(p));
7505         unlock_user(p, arg1, 0);
7506         return ret;
7507 #endif
7508     case TARGET_NR_dup:
7509         ret = get_errno(dup(arg1));
7510         if (ret >= 0) {
7511             fd_trans_dup(arg1, ret);
7512         }
7513         return ret;
7514 #ifdef TARGET_NR_pipe
7515     case TARGET_NR_pipe:
7516         return do_pipe(cpu_env, arg1, 0, 0);
7517 #endif
7518 #ifdef TARGET_NR_pipe2
7519     case TARGET_NR_pipe2:
7520         return do_pipe(cpu_env, arg1,
7521                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7522 #endif
7523     case TARGET_NR_times:
7524         {
7525             struct target_tms *tmsp;
7526             struct tms tms;
7527             ret = get_errno(times(&tms));
7528             if (arg1) {
7529                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7530                 if (!tmsp)
7531                     return -TARGET_EFAULT;
7532                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7533                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7534                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7535                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7536             }
7537             if (!is_error(ret))
7538                 ret = host_to_target_clock_t(ret);
7539         }
7540         return ret;
7541     case TARGET_NR_acct:
7542         if (arg1 == 0) {
7543             ret = get_errno(acct(NULL));
7544         } else {
7545             if (!(p = lock_user_string(arg1))) {
7546                 return -TARGET_EFAULT;
7547             }
7548             ret = get_errno(acct(path(p)));
7549             unlock_user(p, arg1, 0);
7550         }
7551         return ret;
7552 #ifdef TARGET_NR_umount2
7553     case TARGET_NR_umount2:
7554         if (!(p = lock_user_string(arg1)))
7555             return -TARGET_EFAULT;
7556         ret = get_errno(umount2(p, arg2));
7557         unlock_user(p, arg1, 0);
7558         return ret;
7559 #endif
7560     case TARGET_NR_ioctl:
7561         return do_ioctl(arg1, arg2, arg3);
7562 #ifdef TARGET_NR_fcntl
7563     case TARGET_NR_fcntl:
7564         return do_fcntl(arg1, arg2, arg3);
7565 #endif
7566     case TARGET_NR_setpgid:
7567         return get_errno(setpgid(arg1, arg2));
7568     case TARGET_NR_umask:
7569         return get_errno(umask(arg1));
7570     case TARGET_NR_chroot:
7571         if (!(p = lock_user_string(arg1)))
7572             return -TARGET_EFAULT;
7573         ret = get_errno(chroot(p));
7574         unlock_user(p, arg1, 0);
7575         return ret;
7576 #ifdef TARGET_NR_dup2
7577     case TARGET_NR_dup2:
7578         ret = get_errno(dup2(arg1, arg2));
7579         if (ret >= 0) {
7580             fd_trans_dup(arg1, arg2);
7581         }
7582         return ret;
7583 #endif
7584 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7585     case TARGET_NR_dup3:
7586     {
7587         int host_flags;
7588 
7589         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7590             return -EINVAL;
7591         }
7592         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7593         ret = get_errno(dup3(arg1, arg2, host_flags));
7594         if (ret >= 0) {
7595             fd_trans_dup(arg1, arg2);
7596         }
7597         return ret;
7598     }
7599 #endif
7600 #ifdef TARGET_NR_getppid /* not on alpha */
7601     case TARGET_NR_getppid:
7602         return get_errno(getppid());
7603 #endif
7604 #ifdef TARGET_NR_getpgrp
7605     case TARGET_NR_getpgrp:
7606         return get_errno(getpgrp());
7607 #endif
7608     case TARGET_NR_setsid:
7609         return get_errno(setsid());
7610 #ifdef TARGET_NR_sigaction
7611     case TARGET_NR_sigaction:
7612         {
7613 #if defined(TARGET_ALPHA)
7614             struct target_sigaction act, oact, *pact = 0;
7615             struct target_old_sigaction *old_act;
7616             if (arg2) {
7617                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7618                     return -TARGET_EFAULT;
7619                 act._sa_handler = old_act->_sa_handler;
7620                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7621                 act.sa_flags = old_act->sa_flags;
7622                 act.sa_restorer = 0;
7623                 unlock_user_struct(old_act, arg2, 0);
7624                 pact = &act;
7625             }
7626             ret = get_errno(do_sigaction(arg1, pact, &oact));
7627             if (!is_error(ret) && arg3) {
7628                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7629                     return -TARGET_EFAULT;
7630                 old_act->_sa_handler = oact._sa_handler;
7631                 old_act->sa_mask = oact.sa_mask.sig[0];
7632                 old_act->sa_flags = oact.sa_flags;
7633                 unlock_user_struct(old_act, arg3, 1);
7634             }
7635 #elif defined(TARGET_MIPS)
7636 	    struct target_sigaction act, oact, *pact, *old_act;
7637 
7638 	    if (arg2) {
7639                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7640                     return -TARGET_EFAULT;
7641 		act._sa_handler = old_act->_sa_handler;
7642 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7643 		act.sa_flags = old_act->sa_flags;
7644 		unlock_user_struct(old_act, arg2, 0);
7645 		pact = &act;
7646 	    } else {
7647 		pact = NULL;
7648 	    }
7649 
7650 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7651 
7652 	    if (!is_error(ret) && arg3) {
7653                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7654                     return -TARGET_EFAULT;
7655 		old_act->_sa_handler = oact._sa_handler;
7656 		old_act->sa_flags = oact.sa_flags;
7657 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7658 		old_act->sa_mask.sig[1] = 0;
7659 		old_act->sa_mask.sig[2] = 0;
7660 		old_act->sa_mask.sig[3] = 0;
7661 		unlock_user_struct(old_act, arg3, 1);
7662 	    }
7663 #else
7664             struct target_old_sigaction *old_act;
7665             struct target_sigaction act, oact, *pact;
7666             if (arg2) {
7667                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7668                     return -TARGET_EFAULT;
7669                 act._sa_handler = old_act->_sa_handler;
7670                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7671                 act.sa_flags = old_act->sa_flags;
7672                 act.sa_restorer = old_act->sa_restorer;
7673 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7674                 act.ka_restorer = 0;
7675 #endif
7676                 unlock_user_struct(old_act, arg2, 0);
7677                 pact = &act;
7678             } else {
7679                 pact = NULL;
7680             }
7681             ret = get_errno(do_sigaction(arg1, pact, &oact));
7682             if (!is_error(ret) && arg3) {
7683                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7684                     return -TARGET_EFAULT;
7685                 old_act->_sa_handler = oact._sa_handler;
7686                 old_act->sa_mask = oact.sa_mask.sig[0];
7687                 old_act->sa_flags = oact.sa_flags;
7688                 old_act->sa_restorer = oact.sa_restorer;
7689                 unlock_user_struct(old_act, arg3, 1);
7690             }
7691 #endif
7692         }
7693         return ret;
7694 #endif
7695     case TARGET_NR_rt_sigaction:
7696         {
7697 #if defined(TARGET_ALPHA)
7698             /* For Alpha and SPARC this is a 5 argument syscall, with
7699              * a 'restorer' parameter which must be copied into the
7700              * sa_restorer field of the sigaction struct.
7701              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7702              * and arg5 is the sigsetsize.
7703              * Alpha also has a separate rt_sigaction struct that it uses
7704              * here; SPARC uses the usual sigaction struct.
7705              */
7706             struct target_rt_sigaction *rt_act;
7707             struct target_sigaction act, oact, *pact = 0;
7708 
7709             if (arg4 != sizeof(target_sigset_t)) {
7710                 return -TARGET_EINVAL;
7711             }
7712             if (arg2) {
7713                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7714                     return -TARGET_EFAULT;
7715                 act._sa_handler = rt_act->_sa_handler;
7716                 act.sa_mask = rt_act->sa_mask;
7717                 act.sa_flags = rt_act->sa_flags;
7718                 act.sa_restorer = arg5;
7719                 unlock_user_struct(rt_act, arg2, 0);
7720                 pact = &act;
7721             }
7722             ret = get_errno(do_sigaction(arg1, pact, &oact));
7723             if (!is_error(ret) && arg3) {
7724                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7725                     return -TARGET_EFAULT;
7726                 rt_act->_sa_handler = oact._sa_handler;
7727                 rt_act->sa_mask = oact.sa_mask;
7728                 rt_act->sa_flags = oact.sa_flags;
7729                 unlock_user_struct(rt_act, arg3, 1);
7730             }
7731 #else
7732 #ifdef TARGET_SPARC
7733             target_ulong restorer = arg4;
7734             target_ulong sigsetsize = arg5;
7735 #else
7736             target_ulong sigsetsize = arg4;
7737 #endif
7738             struct target_sigaction *act;
7739             struct target_sigaction *oact;
7740 
7741             if (sigsetsize != sizeof(target_sigset_t)) {
7742                 return -TARGET_EINVAL;
7743             }
7744             if (arg2) {
7745                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7746                     return -TARGET_EFAULT;
7747                 }
7748 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7749                 act->ka_restorer = restorer;
7750 #endif
7751             } else {
7752                 act = NULL;
7753             }
7754             if (arg3) {
7755                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7756                     ret = -TARGET_EFAULT;
7757                     goto rt_sigaction_fail;
7758                 }
7759             } else
7760                 oact = NULL;
7761             ret = get_errno(do_sigaction(arg1, act, oact));
7762 	rt_sigaction_fail:
7763             if (act)
7764                 unlock_user_struct(act, arg2, 0);
7765             if (oact)
7766                 unlock_user_struct(oact, arg3, 1);
7767 #endif
7768         }
7769         return ret;
7770 #ifdef TARGET_NR_sgetmask /* not on alpha */
7771     case TARGET_NR_sgetmask:
7772         {
7773             sigset_t cur_set;
7774             abi_ulong target_set;
7775             ret = do_sigprocmask(0, NULL, &cur_set);
7776             if (!ret) {
7777                 host_to_target_old_sigset(&target_set, &cur_set);
7778                 ret = target_set;
7779             }
7780         }
7781         return ret;
7782 #endif
7783 #ifdef TARGET_NR_ssetmask /* not on alpha */
7784     case TARGET_NR_ssetmask:
7785         {
7786             sigset_t set, oset;
7787             abi_ulong target_set = arg1;
7788             target_to_host_old_sigset(&set, &target_set);
7789             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7790             if (!ret) {
7791                 host_to_target_old_sigset(&target_set, &oset);
7792                 ret = target_set;
7793             }
7794         }
7795         return ret;
7796 #endif
7797 #ifdef TARGET_NR_sigprocmask
7798     case TARGET_NR_sigprocmask:
7799         {
7800 #if defined(TARGET_ALPHA)
7801             sigset_t set, oldset;
7802             abi_ulong mask;
7803             int how;
7804 
7805             switch (arg1) {
7806             case TARGET_SIG_BLOCK:
7807                 how = SIG_BLOCK;
7808                 break;
7809             case TARGET_SIG_UNBLOCK:
7810                 how = SIG_UNBLOCK;
7811                 break;
7812             case TARGET_SIG_SETMASK:
7813                 how = SIG_SETMASK;
7814                 break;
7815             default:
7816                 return -TARGET_EINVAL;
7817             }
7818             mask = arg2;
7819             target_to_host_old_sigset(&set, &mask);
7820 
7821             ret = do_sigprocmask(how, &set, &oldset);
7822             if (!is_error(ret)) {
7823                 host_to_target_old_sigset(&mask, &oldset);
7824                 ret = mask;
7825                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7826             }
7827 #else
7828             sigset_t set, oldset, *set_ptr;
7829             int how;
7830 
7831             if (arg2) {
7832                 switch (arg1) {
7833                 case TARGET_SIG_BLOCK:
7834                     how = SIG_BLOCK;
7835                     break;
7836                 case TARGET_SIG_UNBLOCK:
7837                     how = SIG_UNBLOCK;
7838                     break;
7839                 case TARGET_SIG_SETMASK:
7840                     how = SIG_SETMASK;
7841                     break;
7842                 default:
7843                     return -TARGET_EINVAL;
7844                 }
7845                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7846                     return -TARGET_EFAULT;
7847                 target_to_host_old_sigset(&set, p);
7848                 unlock_user(p, arg2, 0);
7849                 set_ptr = &set;
7850             } else {
7851                 how = 0;
7852                 set_ptr = NULL;
7853             }
7854             ret = do_sigprocmask(how, set_ptr, &oldset);
7855             if (!is_error(ret) && arg3) {
7856                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7857                     return -TARGET_EFAULT;
7858                 host_to_target_old_sigset(p, &oldset);
7859                 unlock_user(p, arg3, sizeof(target_sigset_t));
7860             }
7861 #endif
7862         }
7863         return ret;
7864 #endif
7865     case TARGET_NR_rt_sigprocmask:
7866         {
7867             int how = arg1;
7868             sigset_t set, oldset, *set_ptr;
7869 
7870             if (arg4 != sizeof(target_sigset_t)) {
7871                 return -TARGET_EINVAL;
7872             }
7873 
7874             if (arg2) {
7875                 switch(how) {
7876                 case TARGET_SIG_BLOCK:
7877                     how = SIG_BLOCK;
7878                     break;
7879                 case TARGET_SIG_UNBLOCK:
7880                     how = SIG_UNBLOCK;
7881                     break;
7882                 case TARGET_SIG_SETMASK:
7883                     how = SIG_SETMASK;
7884                     break;
7885                 default:
7886                     return -TARGET_EINVAL;
7887                 }
7888                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7889                     return -TARGET_EFAULT;
7890                 target_to_host_sigset(&set, p);
7891                 unlock_user(p, arg2, 0);
7892                 set_ptr = &set;
7893             } else {
7894                 how = 0;
7895                 set_ptr = NULL;
7896             }
7897             ret = do_sigprocmask(how, set_ptr, &oldset);
7898             if (!is_error(ret) && arg3) {
7899                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7900                     return -TARGET_EFAULT;
7901                 host_to_target_sigset(p, &oldset);
7902                 unlock_user(p, arg3, sizeof(target_sigset_t));
7903             }
7904         }
7905         return ret;
7906 #ifdef TARGET_NR_sigpending
7907     case TARGET_NR_sigpending:
7908         {
7909             sigset_t set;
7910             ret = get_errno(sigpending(&set));
7911             if (!is_error(ret)) {
7912                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7913                     return -TARGET_EFAULT;
7914                 host_to_target_old_sigset(p, &set);
7915                 unlock_user(p, arg1, sizeof(target_sigset_t));
7916             }
7917         }
7918         return ret;
7919 #endif
7920     case TARGET_NR_rt_sigpending:
7921         {
7922             sigset_t set;
7923 
7924             /* Yes, this check is >, not != like most. We follow the kernel's
7925              * logic and it does it like this because it implements
7926              * NR_sigpending through the same code path, and in that case
7927              * the old_sigset_t is smaller in size.
7928              */
7929             if (arg2 > sizeof(target_sigset_t)) {
7930                 return -TARGET_EINVAL;
7931             }
7932 
7933             ret = get_errno(sigpending(&set));
7934             if (!is_error(ret)) {
7935                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7936                     return -TARGET_EFAULT;
7937                 host_to_target_sigset(p, &set);
7938                 unlock_user(p, arg1, sizeof(target_sigset_t));
7939             }
7940         }
7941         return ret;
7942 #ifdef TARGET_NR_sigsuspend
7943     case TARGET_NR_sigsuspend:
7944         {
7945             TaskState *ts = cpu->opaque;
7946 #if defined(TARGET_ALPHA)
7947             abi_ulong mask = arg1;
7948             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7949 #else
7950             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7951                 return -TARGET_EFAULT;
7952             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7953             unlock_user(p, arg1, 0);
7954 #endif
7955             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7956                                                SIGSET_T_SIZE));
7957             if (ret != -TARGET_ERESTARTSYS) {
7958                 ts->in_sigsuspend = 1;
7959             }
7960         }
7961         return ret;
7962 #endif
7963     case TARGET_NR_rt_sigsuspend:
7964         {
7965             TaskState *ts = cpu->opaque;
7966 
7967             if (arg2 != sizeof(target_sigset_t)) {
7968                 return -TARGET_EINVAL;
7969             }
7970             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7971                 return -TARGET_EFAULT;
7972             target_to_host_sigset(&ts->sigsuspend_mask, p);
7973             unlock_user(p, arg1, 0);
7974             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7975                                                SIGSET_T_SIZE));
7976             if (ret != -TARGET_ERESTARTSYS) {
7977                 ts->in_sigsuspend = 1;
7978             }
7979         }
7980         return ret;
7981     case TARGET_NR_rt_sigtimedwait:
7982         {
7983             sigset_t set;
7984             struct timespec uts, *puts;
7985             siginfo_t uinfo;
7986 
7987             if (arg4 != sizeof(target_sigset_t)) {
7988                 return -TARGET_EINVAL;
7989             }
7990 
7991             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7992                 return -TARGET_EFAULT;
7993             target_to_host_sigset(&set, p);
7994             unlock_user(p, arg1, 0);
7995             if (arg3) {
7996                 puts = &uts;
7997                 target_to_host_timespec(puts, arg3);
7998             } else {
7999                 puts = NULL;
8000             }
8001             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8002                                                  SIGSET_T_SIZE));
8003             if (!is_error(ret)) {
8004                 if (arg2) {
8005                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8006                                   0);
8007                     if (!p) {
8008                         return -TARGET_EFAULT;
8009                     }
8010                     host_to_target_siginfo(p, &uinfo);
8011                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8012                 }
8013                 ret = host_to_target_signal(ret);
8014             }
8015         }
8016         return ret;
8017     case TARGET_NR_rt_sigqueueinfo:
8018         {
8019             siginfo_t uinfo;
8020 
8021             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8022             if (!p) {
8023                 return -TARGET_EFAULT;
8024             }
8025             target_to_host_siginfo(&uinfo, p);
8026             unlock_user(p, arg3, 0);
8027             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8028         }
8029         return ret;
8030     case TARGET_NR_rt_tgsigqueueinfo:
8031         {
8032             siginfo_t uinfo;
8033 
8034             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8035             if (!p) {
8036                 return -TARGET_EFAULT;
8037             }
8038             target_to_host_siginfo(&uinfo, p);
8039             unlock_user(p, arg4, 0);
8040             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8041         }
8042         return ret;
8043 #ifdef TARGET_NR_sigreturn
8044     case TARGET_NR_sigreturn:
8045         if (block_signals()) {
8046             return -TARGET_ERESTARTSYS;
8047         }
8048         return do_sigreturn(cpu_env);
8049 #endif
8050     case TARGET_NR_rt_sigreturn:
8051         if (block_signals()) {
8052             return -TARGET_ERESTARTSYS;
8053         }
8054         return do_rt_sigreturn(cpu_env);
8055     case TARGET_NR_sethostname:
8056         if (!(p = lock_user_string(arg1)))
8057             return -TARGET_EFAULT;
8058         ret = get_errno(sethostname(p, arg2));
8059         unlock_user(p, arg1, 0);
8060         return ret;
8061 #ifdef TARGET_NR_setrlimit
8062     case TARGET_NR_setrlimit:
8063         {
8064             int resource = target_to_host_resource(arg1);
8065             struct target_rlimit *target_rlim;
8066             struct rlimit rlim;
8067             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8068                 return -TARGET_EFAULT;
8069             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8070             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8071             unlock_user_struct(target_rlim, arg2, 0);
8072             /*
8073              * If we just passed through resource limit settings for memory then
8074              * they would also apply to QEMU's own allocations, and QEMU will
8075              * crash or hang or die if its allocations fail. Ideally we would
8076              * track the guest allocations in QEMU and apply the limits ourselves.
8077              * For now, just tell the guest the call succeeded but don't actually
8078              * limit anything.
8079              */
8080             if (resource != RLIMIT_AS &&
8081                 resource != RLIMIT_DATA &&
8082                 resource != RLIMIT_STACK) {
8083                 return get_errno(setrlimit(resource, &rlim));
8084             } else {
8085                 return 0;
8086             }
8087         }
8088 #endif
8089 #ifdef TARGET_NR_getrlimit
8090     case TARGET_NR_getrlimit:
8091         {
8092             int resource = target_to_host_resource(arg1);
8093             struct target_rlimit *target_rlim;
8094             struct rlimit rlim;
8095 
8096             ret = get_errno(getrlimit(resource, &rlim));
8097             if (!is_error(ret)) {
8098                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8099                     return -TARGET_EFAULT;
8100                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8101                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8102                 unlock_user_struct(target_rlim, arg2, 1);
8103             }
8104         }
8105         return ret;
8106 #endif
8107     case TARGET_NR_getrusage:
8108         {
8109             struct rusage rusage;
8110             ret = get_errno(getrusage(arg1, &rusage));
8111             if (!is_error(ret)) {
8112                 ret = host_to_target_rusage(arg2, &rusage);
8113             }
8114         }
8115         return ret;
8116     case TARGET_NR_gettimeofday:
8117         {
8118             struct timeval tv;
8119             ret = get_errno(gettimeofday(&tv, NULL));
8120             if (!is_error(ret)) {
8121                 if (copy_to_user_timeval(arg1, &tv))
8122                     return -TARGET_EFAULT;
8123             }
8124         }
8125         return ret;
8126     case TARGET_NR_settimeofday:
8127         {
8128             struct timeval tv, *ptv = NULL;
8129             struct timezone tz, *ptz = NULL;
8130 
8131             if (arg1) {
8132                 if (copy_from_user_timeval(&tv, arg1)) {
8133                     return -TARGET_EFAULT;
8134                 }
8135                 ptv = &tv;
8136             }
8137 
8138             if (arg2) {
8139                 if (copy_from_user_timezone(&tz, arg2)) {
8140                     return -TARGET_EFAULT;
8141                 }
8142                 ptz = &tz;
8143             }
8144 
8145             return get_errno(settimeofday(ptv, ptz));
8146         }
8147 #if defined(TARGET_NR_select)
8148     case TARGET_NR_select:
8149 #if defined(TARGET_WANT_NI_OLD_SELECT)
8150         /* some architectures used to have old_select here
8151          * but now ENOSYS it.
8152          */
8153         ret = -TARGET_ENOSYS;
8154 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8155         ret = do_old_select(arg1);
8156 #else
8157         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8158 #endif
8159         return ret;
8160 #endif
8161 #ifdef TARGET_NR_pselect6
8162     case TARGET_NR_pselect6:
8163         {
8164             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8165             fd_set rfds, wfds, efds;
8166             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8167             struct timespec ts, *ts_ptr;
8168 
8169             /*
8170              * The 6th arg is actually two args smashed together,
8171              * so we cannot use the C library.
8172              */
8173             sigset_t set;
8174             struct {
8175                 sigset_t *set;
8176                 size_t size;
8177             } sig, *sig_ptr;
8178 
8179             abi_ulong arg_sigset, arg_sigsize, *arg7;
8180             target_sigset_t *target_sigset;
8181 
8182             n = arg1;
8183             rfd_addr = arg2;
8184             wfd_addr = arg3;
8185             efd_addr = arg4;
8186             ts_addr = arg5;
8187 
8188             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8189             if (ret) {
8190                 return ret;
8191             }
8192             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8193             if (ret) {
8194                 return ret;
8195             }
8196             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8197             if (ret) {
8198                 return ret;
8199             }
8200 
8201             /*
8202              * This takes a timespec, and not a timeval, so we cannot
8203              * use the do_select() helper ...
8204              */
8205             if (ts_addr) {
8206                 if (target_to_host_timespec(&ts, ts_addr)) {
8207                     return -TARGET_EFAULT;
8208                 }
8209                 ts_ptr = &ts;
8210             } else {
8211                 ts_ptr = NULL;
8212             }
8213 
8214             /* Extract the two packed args for the sigset */
8215             if (arg6) {
8216                 sig_ptr = &sig;
8217                 sig.size = SIGSET_T_SIZE;
8218 
8219                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8220                 if (!arg7) {
8221                     return -TARGET_EFAULT;
8222                 }
8223                 arg_sigset = tswapal(arg7[0]);
8224                 arg_sigsize = tswapal(arg7[1]);
8225                 unlock_user(arg7, arg6, 0);
8226 
8227                 if (arg_sigset) {
8228                     sig.set = &set;
8229                     if (arg_sigsize != sizeof(*target_sigset)) {
8230                         /* Like the kernel, we enforce correct size sigsets */
8231                         return -TARGET_EINVAL;
8232                     }
8233                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8234                                               sizeof(*target_sigset), 1);
8235                     if (!target_sigset) {
8236                         return -TARGET_EFAULT;
8237                     }
8238                     target_to_host_sigset(&set, target_sigset);
8239                     unlock_user(target_sigset, arg_sigset, 0);
8240                 } else {
8241                     sig.set = NULL;
8242                 }
8243             } else {
8244                 sig_ptr = NULL;
8245             }
8246 
8247             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8248                                           ts_ptr, sig_ptr));
8249 
8250             if (!is_error(ret)) {
8251                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8252                     return -TARGET_EFAULT;
8253                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8254                     return -TARGET_EFAULT;
8255                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8256                     return -TARGET_EFAULT;
8257 
8258                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8259                     return -TARGET_EFAULT;
8260             }
8261         }
8262         return ret;
8263 #endif
8264 #ifdef TARGET_NR_symlink
8265     case TARGET_NR_symlink:
8266         {
8267             void *p2;
8268             p = lock_user_string(arg1);
8269             p2 = lock_user_string(arg2);
8270             if (!p || !p2)
8271                 ret = -TARGET_EFAULT;
8272             else
8273                 ret = get_errno(symlink(p, p2));
8274             unlock_user(p2, arg2, 0);
8275             unlock_user(p, arg1, 0);
8276         }
8277         return ret;
8278 #endif
8279 #if defined(TARGET_NR_symlinkat)
8280     case TARGET_NR_symlinkat:
8281         {
8282             void *p2;
8283             p  = lock_user_string(arg1);
8284             p2 = lock_user_string(arg3);
8285             if (!p || !p2)
8286                 ret = -TARGET_EFAULT;
8287             else
8288                 ret = get_errno(symlinkat(p, arg2, p2));
8289             unlock_user(p2, arg3, 0);
8290             unlock_user(p, arg1, 0);
8291         }
8292         return ret;
8293 #endif
8294 #ifdef TARGET_NR_readlink
8295     case TARGET_NR_readlink:
8296         {
8297             void *p2;
8298             p = lock_user_string(arg1);
8299             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8300             if (!p || !p2) {
8301                 ret = -TARGET_EFAULT;
8302             } else if (!arg3) {
8303                 /* Short circuit this for the magic exe check. */
8304                 ret = -TARGET_EINVAL;
8305             } else if (is_proc_myself((const char *)p, "exe")) {
8306                 char real[PATH_MAX], *temp;
8307                 temp = realpath(exec_path, real);
8308                 /* Return value is # of bytes that we wrote to the buffer. */
8309                 if (temp == NULL) {
8310                     ret = get_errno(-1);
8311                 } else {
8312                     /* Don't worry about sign mismatch as earlier mapping
8313                      * logic would have thrown a bad address error. */
8314                     ret = MIN(strlen(real), arg3);
8315                     /* We cannot NUL terminate the string. */
8316                     memcpy(p2, real, ret);
8317                 }
8318             } else {
8319                 ret = get_errno(readlink(path(p), p2, arg3));
8320             }
8321             unlock_user(p2, arg2, ret);
8322             unlock_user(p, arg1, 0);
8323         }
8324         return ret;
8325 #endif
8326 #if defined(TARGET_NR_readlinkat)
8327     case TARGET_NR_readlinkat:
8328         {
8329             void *p2;
8330             p  = lock_user_string(arg2);
8331             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8332             if (!p || !p2) {
8333                 ret = -TARGET_EFAULT;
8334             } else if (is_proc_myself((const char *)p, "exe")) {
8335                 char real[PATH_MAX], *temp;
8336                 temp = realpath(exec_path, real);
8337                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8338                 snprintf((char *)p2, arg4, "%s", real);
8339             } else {
8340                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8341             }
8342             unlock_user(p2, arg3, ret);
8343             unlock_user(p, arg2, 0);
8344         }
8345         return ret;
8346 #endif
8347 #ifdef TARGET_NR_swapon
8348     case TARGET_NR_swapon:
8349         if (!(p = lock_user_string(arg1)))
8350             return -TARGET_EFAULT;
8351         ret = get_errno(swapon(p, arg2));
8352         unlock_user(p, arg1, 0);
8353         return ret;
8354 #endif
8355     case TARGET_NR_reboot:
8356         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8357            /* arg4 must be ignored in all other cases */
8358            p = lock_user_string(arg4);
8359            if (!p) {
8360                return -TARGET_EFAULT;
8361            }
8362            ret = get_errno(reboot(arg1, arg2, arg3, p));
8363            unlock_user(p, arg4, 0);
8364         } else {
8365            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8366         }
8367         return ret;
8368 #ifdef TARGET_NR_mmap
8369     case TARGET_NR_mmap:
8370 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8371     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8372     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8373     || defined(TARGET_S390X)
8374         {
8375             abi_ulong *v;
8376             abi_ulong v1, v2, v3, v4, v5, v6;
8377             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8378                 return -TARGET_EFAULT;
8379             v1 = tswapal(v[0]);
8380             v2 = tswapal(v[1]);
8381             v3 = tswapal(v[2]);
8382             v4 = tswapal(v[3]);
8383             v5 = tswapal(v[4]);
8384             v6 = tswapal(v[5]);
8385             unlock_user(v, arg1, 0);
8386             ret = get_errno(target_mmap(v1, v2, v3,
8387                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8388                                         v5, v6));
8389         }
8390 #else
8391         ret = get_errno(target_mmap(arg1, arg2, arg3,
8392                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8393                                     arg5,
8394                                     arg6));
8395 #endif
8396         return ret;
8397 #endif
8398 #ifdef TARGET_NR_mmap2
8399     case TARGET_NR_mmap2:
8400 #ifndef MMAP_SHIFT
8401 #define MMAP_SHIFT 12
8402 #endif
8403         ret = target_mmap(arg1, arg2, arg3,
8404                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8405                           arg5, arg6 << MMAP_SHIFT);
8406         return get_errno(ret);
8407 #endif
8408     case TARGET_NR_munmap:
8409         return get_errno(target_munmap(arg1, arg2));
8410     case TARGET_NR_mprotect:
8411         {
8412             TaskState *ts = cpu->opaque;
8413             /* Special hack to detect libc making the stack executable.  */
8414             if ((arg3 & PROT_GROWSDOWN)
8415                 && arg1 >= ts->info->stack_limit
8416                 && arg1 <= ts->info->start_stack) {
8417                 arg3 &= ~PROT_GROWSDOWN;
8418                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8419                 arg1 = ts->info->stack_limit;
8420             }
8421         }
8422         return get_errno(target_mprotect(arg1, arg2, arg3));
8423 #ifdef TARGET_NR_mremap
8424     case TARGET_NR_mremap:
8425         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8426 #endif
8427         /* ??? msync/mlock/munlock are broken for softmmu.  */
8428 #ifdef TARGET_NR_msync
8429     case TARGET_NR_msync:
8430         return get_errno(msync(g2h(arg1), arg2, arg3));
8431 #endif
8432 #ifdef TARGET_NR_mlock
8433     case TARGET_NR_mlock:
8434         return get_errno(mlock(g2h(arg1), arg2));
8435 #endif
8436 #ifdef TARGET_NR_munlock
8437     case TARGET_NR_munlock:
8438         return get_errno(munlock(g2h(arg1), arg2));
8439 #endif
8440 #ifdef TARGET_NR_mlockall
8441     case TARGET_NR_mlockall:
8442         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8443 #endif
8444 #ifdef TARGET_NR_munlockall
8445     case TARGET_NR_munlockall:
8446         return get_errno(munlockall());
8447 #endif
8448 #ifdef TARGET_NR_truncate
8449     case TARGET_NR_truncate:
8450         if (!(p = lock_user_string(arg1)))
8451             return -TARGET_EFAULT;
8452         ret = get_errno(truncate(p, arg2));
8453         unlock_user(p, arg1, 0);
8454         return ret;
8455 #endif
8456 #ifdef TARGET_NR_ftruncate
8457     case TARGET_NR_ftruncate:
8458         return get_errno(ftruncate(arg1, arg2));
8459 #endif
8460     case TARGET_NR_fchmod:
8461         return get_errno(fchmod(arg1, arg2));
8462 #if defined(TARGET_NR_fchmodat)
8463     case TARGET_NR_fchmodat:
8464         if (!(p = lock_user_string(arg2)))
8465             return -TARGET_EFAULT;
8466         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8467         unlock_user(p, arg2, 0);
8468         return ret;
8469 #endif
8470     case TARGET_NR_getpriority:
8471         /* Note that negative values are valid for getpriority, so we must
8472            differentiate based on errno settings.  */
8473         errno = 0;
8474         ret = getpriority(arg1, arg2);
8475         if (ret == -1 && errno != 0) {
8476             return -host_to_target_errno(errno);
8477         }
8478 #ifdef TARGET_ALPHA
8479         /* Return value is the unbiased priority.  Signal no error.  */
8480         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8481 #else
8482         /* Return value is a biased priority to avoid negative numbers.  */
8483         ret = 20 - ret;
8484 #endif
8485         return ret;
8486     case TARGET_NR_setpriority:
8487         return get_errno(setpriority(arg1, arg2, arg3));
8488 #ifdef TARGET_NR_statfs
8489     case TARGET_NR_statfs:
8490         if (!(p = lock_user_string(arg1))) {
8491             return -TARGET_EFAULT;
8492         }
8493         ret = get_errno(statfs(path(p), &stfs));
8494         unlock_user(p, arg1, 0);
8495     convert_statfs:
8496         if (!is_error(ret)) {
8497             struct target_statfs *target_stfs;
8498 
8499             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8500                 return -TARGET_EFAULT;
8501             __put_user(stfs.f_type, &target_stfs->f_type);
8502             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8503             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8504             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8505             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8506             __put_user(stfs.f_files, &target_stfs->f_files);
8507             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8508             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8509             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8510             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8511             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8512 #ifdef _STATFS_F_FLAGS
8513             __put_user(stfs.f_flags, &target_stfs->f_flags);
8514 #else
8515             __put_user(0, &target_stfs->f_flags);
8516 #endif
8517             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8518             unlock_user_struct(target_stfs, arg2, 1);
8519         }
8520         return ret;
8521 #endif
8522 #ifdef TARGET_NR_fstatfs
8523     case TARGET_NR_fstatfs:
8524         ret = get_errno(fstatfs(arg1, &stfs));
8525         goto convert_statfs;
8526 #endif
8527 #ifdef TARGET_NR_statfs64
8528     case TARGET_NR_statfs64:
8529         if (!(p = lock_user_string(arg1))) {
8530             return -TARGET_EFAULT;
8531         }
8532         ret = get_errno(statfs(path(p), &stfs));
8533         unlock_user(p, arg1, 0);
8534     convert_statfs64:
8535         if (!is_error(ret)) {
8536             struct target_statfs64 *target_stfs;
8537 
8538             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8539                 return -TARGET_EFAULT;
8540             __put_user(stfs.f_type, &target_stfs->f_type);
8541             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8542             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8543             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8544             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8545             __put_user(stfs.f_files, &target_stfs->f_files);
8546             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8547             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8548             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8549             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8550             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8551             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8552             unlock_user_struct(target_stfs, arg3, 1);
8553         }
8554         return ret;
8555     case TARGET_NR_fstatfs64:
8556         ret = get_errno(fstatfs(arg1, &stfs));
8557         goto convert_statfs64;
8558 #endif
8559 #ifdef TARGET_NR_socketcall
8560     case TARGET_NR_socketcall:
8561         return do_socketcall(arg1, arg2);
8562 #endif
8563 #ifdef TARGET_NR_accept
8564     case TARGET_NR_accept:
8565         return do_accept4(arg1, arg2, arg3, 0);
8566 #endif
8567 #ifdef TARGET_NR_accept4
8568     case TARGET_NR_accept4:
8569         return do_accept4(arg1, arg2, arg3, arg4);
8570 #endif
8571 #ifdef TARGET_NR_bind
8572     case TARGET_NR_bind:
8573         return do_bind(arg1, arg2, arg3);
8574 #endif
8575 #ifdef TARGET_NR_connect
8576     case TARGET_NR_connect:
8577         return do_connect(arg1, arg2, arg3);
8578 #endif
8579 #ifdef TARGET_NR_getpeername
8580     case TARGET_NR_getpeername:
8581         return do_getpeername(arg1, arg2, arg3);
8582 #endif
8583 #ifdef TARGET_NR_getsockname
8584     case TARGET_NR_getsockname:
8585         return do_getsockname(arg1, arg2, arg3);
8586 #endif
8587 #ifdef TARGET_NR_getsockopt
8588     case TARGET_NR_getsockopt:
8589         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8590 #endif
8591 #ifdef TARGET_NR_listen
8592     case TARGET_NR_listen:
8593         return get_errno(listen(arg1, arg2));
8594 #endif
8595 #ifdef TARGET_NR_recv
8596     case TARGET_NR_recv:
8597         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8598 #endif
8599 #ifdef TARGET_NR_recvfrom
8600     case TARGET_NR_recvfrom:
8601         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8602 #endif
8603 #ifdef TARGET_NR_recvmsg
8604     case TARGET_NR_recvmsg:
8605         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8606 #endif
8607 #ifdef TARGET_NR_send
8608     case TARGET_NR_send:
8609         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8610 #endif
8611 #ifdef TARGET_NR_sendmsg
8612     case TARGET_NR_sendmsg:
8613         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8614 #endif
8615 #ifdef TARGET_NR_sendmmsg
8616     case TARGET_NR_sendmmsg:
8617         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8618     case TARGET_NR_recvmmsg:
8619         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8620 #endif
8621 #ifdef TARGET_NR_sendto
8622     case TARGET_NR_sendto:
8623         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8624 #endif
8625 #ifdef TARGET_NR_shutdown
8626     case TARGET_NR_shutdown:
8627         return get_errno(shutdown(arg1, arg2));
8628 #endif
8629 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8630     case TARGET_NR_getrandom:
8631         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8632         if (!p) {
8633             return -TARGET_EFAULT;
8634         }
8635         ret = get_errno(getrandom(p, arg2, arg3));
8636         unlock_user(p, arg1, ret);
8637         return ret;
8638 #endif
8639 #ifdef TARGET_NR_socket
8640     case TARGET_NR_socket:
8641         return do_socket(arg1, arg2, arg3);
8642 #endif
8643 #ifdef TARGET_NR_socketpair
8644     case TARGET_NR_socketpair:
8645         return do_socketpair(arg1, arg2, arg3, arg4);
8646 #endif
8647 #ifdef TARGET_NR_setsockopt
8648     case TARGET_NR_setsockopt:
8649         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8650 #endif
8651 #if defined(TARGET_NR_syslog)
8652     case TARGET_NR_syslog:
8653         {
8654             int len = arg2;
8655 
8656             switch (arg1) {
8657             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8658             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8659             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8660             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8661             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8662             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8663             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8664             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8665                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8666             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8667             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8668             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8669                 {
8670                     if (len < 0) {
8671                         return -TARGET_EINVAL;
8672                     }
8673                     if (len == 0) {
8674                         return 0;
8675                     }
8676                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8677                     if (!p) {
8678                         return -TARGET_EFAULT;
8679                     }
8680                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8681                     unlock_user(p, arg2, arg3);
8682                 }
8683                 return ret;
8684             default:
8685                 return -TARGET_EINVAL;
8686             }
8687         }
8688         break;
8689 #endif
8690     case TARGET_NR_setitimer:
8691         {
8692             struct itimerval value, ovalue, *pvalue;
8693 
8694             if (arg2) {
8695                 pvalue = &value;
8696                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8697                     || copy_from_user_timeval(&pvalue->it_value,
8698                                               arg2 + sizeof(struct target_timeval)))
8699                     return -TARGET_EFAULT;
8700             } else {
8701                 pvalue = NULL;
8702             }
8703             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8704             if (!is_error(ret) && arg3) {
8705                 if (copy_to_user_timeval(arg3,
8706                                          &ovalue.it_interval)
8707                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8708                                             &ovalue.it_value))
8709                     return -TARGET_EFAULT;
8710             }
8711         }
8712         return ret;
8713     case TARGET_NR_getitimer:
8714         {
8715             struct itimerval value;
8716 
8717             ret = get_errno(getitimer(arg1, &value));
8718             if (!is_error(ret) && arg2) {
8719                 if (copy_to_user_timeval(arg2,
8720                                          &value.it_interval)
8721                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8722                                             &value.it_value))
8723                     return -TARGET_EFAULT;
8724             }
8725         }
8726         return ret;
8727 #ifdef TARGET_NR_stat
8728     case TARGET_NR_stat:
8729         if (!(p = lock_user_string(arg1))) {
8730             return -TARGET_EFAULT;
8731         }
8732         ret = get_errno(stat(path(p), &st));
8733         unlock_user(p, arg1, 0);
8734         goto do_stat;
8735 #endif
8736 #ifdef TARGET_NR_lstat
8737     case TARGET_NR_lstat:
8738         if (!(p = lock_user_string(arg1))) {
8739             return -TARGET_EFAULT;
8740         }
8741         ret = get_errno(lstat(path(p), &st));
8742         unlock_user(p, arg1, 0);
8743         goto do_stat;
8744 #endif
8745 #ifdef TARGET_NR_fstat
8746     case TARGET_NR_fstat:
8747         {
8748             ret = get_errno(fstat(arg1, &st));
8749 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8750         do_stat:
8751 #endif
8752             if (!is_error(ret)) {
8753                 struct target_stat *target_st;
8754 
8755                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8756                     return -TARGET_EFAULT;
8757                 memset(target_st, 0, sizeof(*target_st));
8758                 __put_user(st.st_dev, &target_st->st_dev);
8759                 __put_user(st.st_ino, &target_st->st_ino);
8760                 __put_user(st.st_mode, &target_st->st_mode);
8761                 __put_user(st.st_uid, &target_st->st_uid);
8762                 __put_user(st.st_gid, &target_st->st_gid);
8763                 __put_user(st.st_nlink, &target_st->st_nlink);
8764                 __put_user(st.st_rdev, &target_st->st_rdev);
8765                 __put_user(st.st_size, &target_st->st_size);
8766                 __put_user(st.st_blksize, &target_st->st_blksize);
8767                 __put_user(st.st_blocks, &target_st->st_blocks);
8768                 __put_user(st.st_atime, &target_st->target_st_atime);
8769                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8770                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8771                 unlock_user_struct(target_st, arg2, 1);
8772             }
8773         }
8774         return ret;
8775 #endif
8776     case TARGET_NR_vhangup:
8777         return get_errno(vhangup());
8778 #ifdef TARGET_NR_syscall
8779     case TARGET_NR_syscall:
8780         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8781                           arg6, arg7, arg8, 0);
8782 #endif
8783     case TARGET_NR_wait4:
8784         {
8785             int status;
8786             abi_long status_ptr = arg2;
8787             struct rusage rusage, *rusage_ptr;
8788             abi_ulong target_rusage = arg4;
8789             abi_long rusage_err;
8790             if (target_rusage)
8791                 rusage_ptr = &rusage;
8792             else
8793                 rusage_ptr = NULL;
8794             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8795             if (!is_error(ret)) {
8796                 if (status_ptr && ret) {
8797                     status = host_to_target_waitstatus(status);
8798                     if (put_user_s32(status, status_ptr))
8799                         return -TARGET_EFAULT;
8800                 }
8801                 if (target_rusage) {
8802                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8803                     if (rusage_err) {
8804                         ret = rusage_err;
8805                     }
8806                 }
8807             }
8808         }
8809         return ret;
8810 #ifdef TARGET_NR_swapoff
8811     case TARGET_NR_swapoff:
8812         if (!(p = lock_user_string(arg1)))
8813             return -TARGET_EFAULT;
8814         ret = get_errno(swapoff(p));
8815         unlock_user(p, arg1, 0);
8816         return ret;
8817 #endif
8818     case TARGET_NR_sysinfo:
8819         {
8820             struct target_sysinfo *target_value;
8821             struct sysinfo value;
8822             ret = get_errno(sysinfo(&value));
8823             if (!is_error(ret) && arg1)
8824             {
8825                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8826                     return -TARGET_EFAULT;
8827                 __put_user(value.uptime, &target_value->uptime);
8828                 __put_user(value.loads[0], &target_value->loads[0]);
8829                 __put_user(value.loads[1], &target_value->loads[1]);
8830                 __put_user(value.loads[2], &target_value->loads[2]);
8831                 __put_user(value.totalram, &target_value->totalram);
8832                 __put_user(value.freeram, &target_value->freeram);
8833                 __put_user(value.sharedram, &target_value->sharedram);
8834                 __put_user(value.bufferram, &target_value->bufferram);
8835                 __put_user(value.totalswap, &target_value->totalswap);
8836                 __put_user(value.freeswap, &target_value->freeswap);
8837                 __put_user(value.procs, &target_value->procs);
8838                 __put_user(value.totalhigh, &target_value->totalhigh);
8839                 __put_user(value.freehigh, &target_value->freehigh);
8840                 __put_user(value.mem_unit, &target_value->mem_unit);
8841                 unlock_user_struct(target_value, arg1, 1);
8842             }
8843         }
8844         return ret;
8845 #ifdef TARGET_NR_ipc
8846     case TARGET_NR_ipc:
8847         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8848 #endif
8849 #ifdef TARGET_NR_semget
8850     case TARGET_NR_semget:
8851         return get_errno(semget(arg1, arg2, arg3));
8852 #endif
8853 #ifdef TARGET_NR_semop
8854     case TARGET_NR_semop:
8855         return do_semop(arg1, arg2, arg3);
8856 #endif
8857 #ifdef TARGET_NR_semctl
8858     case TARGET_NR_semctl:
8859         return do_semctl(arg1, arg2, arg3, arg4);
8860 #endif
8861 #ifdef TARGET_NR_msgctl
8862     case TARGET_NR_msgctl:
8863         return do_msgctl(arg1, arg2, arg3);
8864 #endif
8865 #ifdef TARGET_NR_msgget
8866     case TARGET_NR_msgget:
8867         return get_errno(msgget(arg1, arg2));
8868 #endif
8869 #ifdef TARGET_NR_msgrcv
8870     case TARGET_NR_msgrcv:
8871         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8872 #endif
8873 #ifdef TARGET_NR_msgsnd
8874     case TARGET_NR_msgsnd:
8875         return do_msgsnd(arg1, arg2, arg3, arg4);
8876 #endif
8877 #ifdef TARGET_NR_shmget
8878     case TARGET_NR_shmget:
8879         return get_errno(shmget(arg1, arg2, arg3));
8880 #endif
8881 #ifdef TARGET_NR_shmctl
8882     case TARGET_NR_shmctl:
8883         return do_shmctl(arg1, arg2, arg3);
8884 #endif
8885 #ifdef TARGET_NR_shmat
8886     case TARGET_NR_shmat:
8887         return do_shmat(cpu_env, arg1, arg2, arg3);
8888 #endif
8889 #ifdef TARGET_NR_shmdt
8890     case TARGET_NR_shmdt:
8891         return do_shmdt(arg1);
8892 #endif
8893     case TARGET_NR_fsync:
8894         return get_errno(fsync(arg1));
8895     case TARGET_NR_clone:
8896         /* Linux manages to have three different orderings for its
8897          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8898          * match the kernel's CONFIG_CLONE_* settings.
8899          * Microblaze is further special in that it uses a sixth
8900          * implicit argument to clone for the TLS pointer.
8901          */
8902 #if defined(TARGET_MICROBLAZE)
8903         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8904 #elif defined(TARGET_CLONE_BACKWARDS)
8905         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8906 #elif defined(TARGET_CLONE_BACKWARDS2)
8907         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8908 #else
8909         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8910 #endif
8911         return ret;
8912 #ifdef __NR_exit_group
8913         /* new thread calls */
8914     case TARGET_NR_exit_group:
8915         preexit_cleanup(cpu_env, arg1);
8916         return get_errno(exit_group(arg1));
8917 #endif
8918     case TARGET_NR_setdomainname:
8919         if (!(p = lock_user_string(arg1)))
8920             return -TARGET_EFAULT;
8921         ret = get_errno(setdomainname(p, arg2));
8922         unlock_user(p, arg1, 0);
8923         return ret;
8924     case TARGET_NR_uname:
8925         /* no need to transcode because we use the linux syscall */
8926         {
8927             struct new_utsname * buf;
8928 
8929             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8930                 return -TARGET_EFAULT;
8931             ret = get_errno(sys_uname(buf));
8932             if (!is_error(ret)) {
8933                 /* Overwrite the native machine name with whatever is being
8934                    emulated. */
8935                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8936                           sizeof(buf->machine));
8937                 /* Allow the user to override the reported release.  */
8938                 if (qemu_uname_release && *qemu_uname_release) {
8939                     g_strlcpy(buf->release, qemu_uname_release,
8940                               sizeof(buf->release));
8941                 }
8942             }
8943             unlock_user_struct(buf, arg1, 1);
8944         }
8945         return ret;
8946 #ifdef TARGET_I386
8947     case TARGET_NR_modify_ldt:
8948         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
8949 #if !defined(TARGET_X86_64)
8950     case TARGET_NR_vm86:
8951         return do_vm86(cpu_env, arg1, arg2);
8952 #endif
8953 #endif
8954     case TARGET_NR_adjtimex:
8955         {
8956             struct timex host_buf;
8957 
8958             if (target_to_host_timex(&host_buf, arg1) != 0) {
8959                 return -TARGET_EFAULT;
8960             }
8961             ret = get_errno(adjtimex(&host_buf));
8962             if (!is_error(ret)) {
8963                 if (host_to_target_timex(arg1, &host_buf) != 0) {
8964                     return -TARGET_EFAULT;
8965                 }
8966             }
8967         }
8968         return ret;
8969 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8970     case TARGET_NR_clock_adjtime:
8971         {
8972             struct timex htx, *phtx = &htx;
8973 
8974             if (target_to_host_timex(phtx, arg2) != 0) {
8975                 return -TARGET_EFAULT;
8976             }
8977             ret = get_errno(clock_adjtime(arg1, phtx));
8978             if (!is_error(ret) && phtx) {
8979                 if (host_to_target_timex(arg2, phtx) != 0) {
8980                     return -TARGET_EFAULT;
8981                 }
8982             }
8983         }
8984         return ret;
8985 #endif
8986     case TARGET_NR_getpgid:
8987         return get_errno(getpgid(arg1));
8988     case TARGET_NR_fchdir:
8989         return get_errno(fchdir(arg1));
8990     case TARGET_NR_personality:
8991         return get_errno(personality(arg1));
8992 #ifdef TARGET_NR__llseek /* Not on alpha */
8993     case TARGET_NR__llseek:
8994         {
8995             int64_t res;
8996 #if !defined(__NR_llseek)
8997             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
8998             if (res == -1) {
8999                 ret = get_errno(res);
9000             } else {
9001                 ret = 0;
9002             }
9003 #else
9004             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9005 #endif
9006             if ((ret == 0) && put_user_s64(res, arg4)) {
9007                 return -TARGET_EFAULT;
9008             }
9009         }
9010         return ret;
9011 #endif
9012 #ifdef TARGET_NR_getdents
9013     case TARGET_NR_getdents:
9014 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9015 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9016         {
9017             struct target_dirent *target_dirp;
9018             struct linux_dirent *dirp;
9019             abi_long count = arg3;
9020 
9021             dirp = g_try_malloc(count);
9022             if (!dirp) {
9023                 return -TARGET_ENOMEM;
9024             }
9025 
9026             ret = get_errno(sys_getdents(arg1, dirp, count));
9027             if (!is_error(ret)) {
9028                 struct linux_dirent *de;
9029 		struct target_dirent *tde;
9030                 int len = ret;
9031                 int reclen, treclen;
9032 		int count1, tnamelen;
9033 
9034 		count1 = 0;
9035                 de = dirp;
9036                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9037                     return -TARGET_EFAULT;
9038 		tde = target_dirp;
9039                 while (len > 0) {
9040                     reclen = de->d_reclen;
9041                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9042                     assert(tnamelen >= 0);
9043                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9044                     assert(count1 + treclen <= count);
9045                     tde->d_reclen = tswap16(treclen);
9046                     tde->d_ino = tswapal(de->d_ino);
9047                     tde->d_off = tswapal(de->d_off);
9048                     memcpy(tde->d_name, de->d_name, tnamelen);
9049                     de = (struct linux_dirent *)((char *)de + reclen);
9050                     len -= reclen;
9051                     tde = (struct target_dirent *)((char *)tde + treclen);
9052 		    count1 += treclen;
9053                 }
9054 		ret = count1;
9055                 unlock_user(target_dirp, arg2, ret);
9056             }
9057             g_free(dirp);
9058         }
9059 #else
9060         {
9061             struct linux_dirent *dirp;
9062             abi_long count = arg3;
9063 
9064             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9065                 return -TARGET_EFAULT;
9066             ret = get_errno(sys_getdents(arg1, dirp, count));
9067             if (!is_error(ret)) {
9068                 struct linux_dirent *de;
9069                 int len = ret;
9070                 int reclen;
9071                 de = dirp;
9072                 while (len > 0) {
9073                     reclen = de->d_reclen;
9074                     if (reclen > len)
9075                         break;
9076                     de->d_reclen = tswap16(reclen);
9077                     tswapls(&de->d_ino);
9078                     tswapls(&de->d_off);
9079                     de = (struct linux_dirent *)((char *)de + reclen);
9080                     len -= reclen;
9081                 }
9082             }
9083             unlock_user(dirp, arg2, ret);
9084         }
9085 #endif
9086 #else
9087         /* Implement getdents in terms of getdents64 */
9088         {
9089             struct linux_dirent64 *dirp;
9090             abi_long count = arg3;
9091 
9092             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9093             if (!dirp) {
9094                 return -TARGET_EFAULT;
9095             }
9096             ret = get_errno(sys_getdents64(arg1, dirp, count));
9097             if (!is_error(ret)) {
9098                 /* Convert the dirent64 structs to target dirent.  We do this
9099                  * in-place, since we can guarantee that a target_dirent is no
9100                  * larger than a dirent64; however this means we have to be
9101                  * careful to read everything before writing in the new format.
9102                  */
9103                 struct linux_dirent64 *de;
9104                 struct target_dirent *tde;
9105                 int len = ret;
9106                 int tlen = 0;
9107 
9108                 de = dirp;
9109                 tde = (struct target_dirent *)dirp;
9110                 while (len > 0) {
9111                     int namelen, treclen;
9112                     int reclen = de->d_reclen;
9113                     uint64_t ino = de->d_ino;
9114                     int64_t off = de->d_off;
9115                     uint8_t type = de->d_type;
9116 
9117                     namelen = strlen(de->d_name);
9118                     treclen = offsetof(struct target_dirent, d_name)
9119                         + namelen + 2;
9120                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9121 
9122                     memmove(tde->d_name, de->d_name, namelen + 1);
9123                     tde->d_ino = tswapal(ino);
9124                     tde->d_off = tswapal(off);
9125                     tde->d_reclen = tswap16(treclen);
9126                     /* The target_dirent type is in what was formerly a padding
9127                      * byte at the end of the structure:
9128                      */
9129                     *(((char *)tde) + treclen - 1) = type;
9130 
9131                     de = (struct linux_dirent64 *)((char *)de + reclen);
9132                     tde = (struct target_dirent *)((char *)tde + treclen);
9133                     len -= reclen;
9134                     tlen += treclen;
9135                 }
9136                 ret = tlen;
9137             }
9138             unlock_user(dirp, arg2, ret);
9139         }
9140 #endif
9141         return ret;
9142 #endif /* TARGET_NR_getdents */
9143 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9144     case TARGET_NR_getdents64:
9145         {
9146             struct linux_dirent64 *dirp;
9147             abi_long count = arg3;
9148             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9149                 return -TARGET_EFAULT;
9150             ret = get_errno(sys_getdents64(arg1, dirp, count));
9151             if (!is_error(ret)) {
9152                 struct linux_dirent64 *de;
9153                 int len = ret;
9154                 int reclen;
9155                 de = dirp;
9156                 while (len > 0) {
9157                     reclen = de->d_reclen;
9158                     if (reclen > len)
9159                         break;
9160                     de->d_reclen = tswap16(reclen);
9161                     tswap64s((uint64_t *)&de->d_ino);
9162                     tswap64s((uint64_t *)&de->d_off);
9163                     de = (struct linux_dirent64 *)((char *)de + reclen);
9164                     len -= reclen;
9165                 }
9166             }
9167             unlock_user(dirp, arg2, ret);
9168         }
9169         return ret;
9170 #endif /* TARGET_NR_getdents64 */
9171 #if defined(TARGET_NR__newselect)
9172     case TARGET_NR__newselect:
9173         return do_select(arg1, arg2, arg3, arg4, arg5);
9174 #endif
9175 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9176 # ifdef TARGET_NR_poll
9177     case TARGET_NR_poll:
9178 # endif
9179 # ifdef TARGET_NR_ppoll
9180     case TARGET_NR_ppoll:
9181 # endif
9182         {
9183             struct target_pollfd *target_pfd;
9184             unsigned int nfds = arg2;
9185             struct pollfd *pfd;
9186             unsigned int i;
9187 
9188             pfd = NULL;
9189             target_pfd = NULL;
9190             if (nfds) {
9191                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9192                     return -TARGET_EINVAL;
9193                 }
9194 
9195                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9196                                        sizeof(struct target_pollfd) * nfds, 1);
9197                 if (!target_pfd) {
9198                     return -TARGET_EFAULT;
9199                 }
9200 
9201                 pfd = alloca(sizeof(struct pollfd) * nfds);
9202                 for (i = 0; i < nfds; i++) {
9203                     pfd[i].fd = tswap32(target_pfd[i].fd);
9204                     pfd[i].events = tswap16(target_pfd[i].events);
9205                 }
9206             }
9207 
9208             switch (num) {
9209 # ifdef TARGET_NR_ppoll
9210             case TARGET_NR_ppoll:
9211             {
9212                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9213                 target_sigset_t *target_set;
9214                 sigset_t _set, *set = &_set;
9215 
9216                 if (arg3) {
9217                     if (target_to_host_timespec(timeout_ts, arg3)) {
9218                         unlock_user(target_pfd, arg1, 0);
9219                         return -TARGET_EFAULT;
9220                     }
9221                 } else {
9222                     timeout_ts = NULL;
9223                 }
9224 
9225                 if (arg4) {
9226                     if (arg5 != sizeof(target_sigset_t)) {
9227                         unlock_user(target_pfd, arg1, 0);
9228                         return -TARGET_EINVAL;
9229                     }
9230 
9231                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9232                     if (!target_set) {
9233                         unlock_user(target_pfd, arg1, 0);
9234                         return -TARGET_EFAULT;
9235                     }
9236                     target_to_host_sigset(set, target_set);
9237                 } else {
9238                     set = NULL;
9239                 }
9240 
9241                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9242                                            set, SIGSET_T_SIZE));
9243 
9244                 if (!is_error(ret) && arg3) {
9245                     host_to_target_timespec(arg3, timeout_ts);
9246                 }
9247                 if (arg4) {
9248                     unlock_user(target_set, arg4, 0);
9249                 }
9250                 break;
9251             }
9252 # endif
9253 # ifdef TARGET_NR_poll
9254             case TARGET_NR_poll:
9255             {
9256                 struct timespec ts, *pts;
9257 
9258                 if (arg3 >= 0) {
9259                     /* Convert ms to secs, ns */
9260                     ts.tv_sec = arg3 / 1000;
9261                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9262                     pts = &ts;
9263                 } else {
9264                     /* -ve poll() timeout means "infinite" */
9265                     pts = NULL;
9266                 }
9267                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9268                 break;
9269             }
9270 # endif
9271             default:
9272                 g_assert_not_reached();
9273             }
9274 
9275             if (!is_error(ret)) {
9276                 for(i = 0; i < nfds; i++) {
9277                     target_pfd[i].revents = tswap16(pfd[i].revents);
9278                 }
9279             }
9280             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9281         }
9282         return ret;
9283 #endif
9284     case TARGET_NR_flock:
9285         /* NOTE: the flock constant seems to be the same for every
9286            Linux platform */
9287         return get_errno(safe_flock(arg1, arg2));
9288     case TARGET_NR_readv:
9289         {
9290             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9291             if (vec != NULL) {
9292                 ret = get_errno(safe_readv(arg1, vec, arg3));
9293                 unlock_iovec(vec, arg2, arg3, 1);
9294             } else {
9295                 ret = -host_to_target_errno(errno);
9296             }
9297         }
9298         return ret;
9299     case TARGET_NR_writev:
9300         {
9301             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9302             if (vec != NULL) {
9303                 ret = get_errno(safe_writev(arg1, vec, arg3));
9304                 unlock_iovec(vec, arg2, arg3, 0);
9305             } else {
9306                 ret = -host_to_target_errno(errno);
9307             }
9308         }
9309         return ret;
9310 #if defined(TARGET_NR_preadv)
9311     case TARGET_NR_preadv:
9312         {
9313             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9314             if (vec != NULL) {
9315                 unsigned long low, high;
9316 
9317                 target_to_host_low_high(arg4, arg5, &low, &high);
9318                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9319                 unlock_iovec(vec, arg2, arg3, 1);
9320             } else {
9321                 ret = -host_to_target_errno(errno);
9322            }
9323         }
9324         return ret;
9325 #endif
9326 #if defined(TARGET_NR_pwritev)
9327     case TARGET_NR_pwritev:
9328         {
9329             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9330             if (vec != NULL) {
9331                 unsigned long low, high;
9332 
9333                 target_to_host_low_high(arg4, arg5, &low, &high);
9334                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9335                 unlock_iovec(vec, arg2, arg3, 0);
9336             } else {
9337                 ret = -host_to_target_errno(errno);
9338            }
9339         }
9340         return ret;
9341 #endif
9342     case TARGET_NR_getsid:
9343         return get_errno(getsid(arg1));
9344 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9345     case TARGET_NR_fdatasync:
9346         return get_errno(fdatasync(arg1));
9347 #endif
9348 #ifdef TARGET_NR__sysctl
9349     case TARGET_NR__sysctl:
9350         /* We don't implement this, but ENOTDIR is always a safe
9351            return value. */
9352         return -TARGET_ENOTDIR;
9353 #endif
9354     case TARGET_NR_sched_getaffinity:
9355         {
9356             unsigned int mask_size;
9357             unsigned long *mask;
9358 
9359             /*
9360              * sched_getaffinity needs multiples of ulong, so need to take
9361              * care of mismatches between target ulong and host ulong sizes.
9362              */
9363             if (arg2 & (sizeof(abi_ulong) - 1)) {
9364                 return -TARGET_EINVAL;
9365             }
9366             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9367 
9368             mask = alloca(mask_size);
9369             memset(mask, 0, mask_size);
9370             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9371 
9372             if (!is_error(ret)) {
9373                 if (ret > arg2) {
9374                     /* More data returned than the caller's buffer will fit.
9375                      * This only happens if sizeof(abi_long) < sizeof(long)
9376                      * and the caller passed us a buffer holding an odd number
9377                      * of abi_longs. If the host kernel is actually using the
9378                      * extra 4 bytes then fail EINVAL; otherwise we can just
9379                      * ignore them and only copy the interesting part.
9380                      */
9381                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9382                     if (numcpus > arg2 * 8) {
9383                         return -TARGET_EINVAL;
9384                     }
9385                     ret = arg2;
9386                 }
9387 
9388                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9389                     return -TARGET_EFAULT;
9390                 }
9391             }
9392         }
9393         return ret;
9394     case TARGET_NR_sched_setaffinity:
9395         {
9396             unsigned int mask_size;
9397             unsigned long *mask;
9398 
9399             /*
9400              * sched_setaffinity needs multiples of ulong, so need to take
9401              * care of mismatches between target ulong and host ulong sizes.
9402              */
9403             if (arg2 & (sizeof(abi_ulong) - 1)) {
9404                 return -TARGET_EINVAL;
9405             }
9406             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9407             mask = alloca(mask_size);
9408 
9409             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9410             if (ret) {
9411                 return ret;
9412             }
9413 
9414             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9415         }
9416     case TARGET_NR_getcpu:
9417         {
9418             unsigned cpu, node;
9419             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9420                                        arg2 ? &node : NULL,
9421                                        NULL));
9422             if (is_error(ret)) {
9423                 return ret;
9424             }
9425             if (arg1 && put_user_u32(cpu, arg1)) {
9426                 return -TARGET_EFAULT;
9427             }
9428             if (arg2 && put_user_u32(node, arg2)) {
9429                 return -TARGET_EFAULT;
9430             }
9431         }
9432         return ret;
9433     case TARGET_NR_sched_setparam:
9434         {
9435             struct sched_param *target_schp;
9436             struct sched_param schp;
9437 
9438             if (arg2 == 0) {
9439                 return -TARGET_EINVAL;
9440             }
9441             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9442                 return -TARGET_EFAULT;
9443             schp.sched_priority = tswap32(target_schp->sched_priority);
9444             unlock_user_struct(target_schp, arg2, 0);
9445             return get_errno(sched_setparam(arg1, &schp));
9446         }
9447     case TARGET_NR_sched_getparam:
9448         {
9449             struct sched_param *target_schp;
9450             struct sched_param schp;
9451 
9452             if (arg2 == 0) {
9453                 return -TARGET_EINVAL;
9454             }
9455             ret = get_errno(sched_getparam(arg1, &schp));
9456             if (!is_error(ret)) {
9457                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9458                     return -TARGET_EFAULT;
9459                 target_schp->sched_priority = tswap32(schp.sched_priority);
9460                 unlock_user_struct(target_schp, arg2, 1);
9461             }
9462         }
9463         return ret;
9464     case TARGET_NR_sched_setscheduler:
9465         {
9466             struct sched_param *target_schp;
9467             struct sched_param schp;
9468             if (arg3 == 0) {
9469                 return -TARGET_EINVAL;
9470             }
9471             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9472                 return -TARGET_EFAULT;
9473             schp.sched_priority = tswap32(target_schp->sched_priority);
9474             unlock_user_struct(target_schp, arg3, 0);
9475             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9476         }
9477     case TARGET_NR_sched_getscheduler:
9478         return get_errno(sched_getscheduler(arg1));
9479     case TARGET_NR_sched_yield:
9480         return get_errno(sched_yield());
9481     case TARGET_NR_sched_get_priority_max:
9482         return get_errno(sched_get_priority_max(arg1));
9483     case TARGET_NR_sched_get_priority_min:
9484         return get_errno(sched_get_priority_min(arg1));
9485     case TARGET_NR_sched_rr_get_interval:
9486         {
9487             struct timespec ts;
9488             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9489             if (!is_error(ret)) {
9490                 ret = host_to_target_timespec(arg2, &ts);
9491             }
9492         }
9493         return ret;
9494     case TARGET_NR_nanosleep:
9495         {
9496             struct timespec req, rem;
9497             target_to_host_timespec(&req, arg1);
9498             ret = get_errno(safe_nanosleep(&req, &rem));
9499             if (is_error(ret) && arg2) {
9500                 host_to_target_timespec(arg2, &rem);
9501             }
9502         }
9503         return ret;
9504     case TARGET_NR_prctl:
9505         switch (arg1) {
9506         case PR_GET_PDEATHSIG:
9507         {
9508             int deathsig;
9509             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9510             if (!is_error(ret) && arg2
9511                 && put_user_ual(deathsig, arg2)) {
9512                 return -TARGET_EFAULT;
9513             }
9514             return ret;
9515         }
9516 #ifdef PR_GET_NAME
9517         case PR_GET_NAME:
9518         {
9519             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9520             if (!name) {
9521                 return -TARGET_EFAULT;
9522             }
9523             ret = get_errno(prctl(arg1, (unsigned long)name,
9524                                   arg3, arg4, arg5));
9525             unlock_user(name, arg2, 16);
9526             return ret;
9527         }
9528         case PR_SET_NAME:
9529         {
9530             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9531             if (!name) {
9532                 return -TARGET_EFAULT;
9533             }
9534             ret = get_errno(prctl(arg1, (unsigned long)name,
9535                                   arg3, arg4, arg5));
9536             unlock_user(name, arg2, 0);
9537             return ret;
9538         }
9539 #endif
9540 #ifdef TARGET_MIPS
9541         case TARGET_PR_GET_FP_MODE:
9542         {
9543             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9544             ret = 0;
9545             if (env->CP0_Status & (1 << CP0St_FR)) {
9546                 ret |= TARGET_PR_FP_MODE_FR;
9547             }
9548             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9549                 ret |= TARGET_PR_FP_MODE_FRE;
9550             }
9551             return ret;
9552         }
9553         case TARGET_PR_SET_FP_MODE:
9554         {
9555             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9556             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9557             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9558             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9559             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9560 
9561             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9562                                             TARGET_PR_FP_MODE_FRE;
9563 
9564             /* If nothing to change, return right away, successfully.  */
9565             if (old_fr == new_fr && old_fre == new_fre) {
9566                 return 0;
9567             }
9568             /* Check the value is valid */
9569             if (arg2 & ~known_bits) {
9570                 return -TARGET_EOPNOTSUPP;
9571             }
9572             /* Setting FRE without FR is not supported.  */
9573             if (new_fre && !new_fr) {
9574                 return -TARGET_EOPNOTSUPP;
9575             }
9576             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9577                 /* FR1 is not supported */
9578                 return -TARGET_EOPNOTSUPP;
9579             }
9580             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9581                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9582                 /* cannot set FR=0 */
9583                 return -TARGET_EOPNOTSUPP;
9584             }
9585             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9586                 /* Cannot set FRE=1 */
9587                 return -TARGET_EOPNOTSUPP;
9588             }
9589 
9590             int i;
9591             fpr_t *fpr = env->active_fpu.fpr;
9592             for (i = 0; i < 32 ; i += 2) {
9593                 if (!old_fr && new_fr) {
9594                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9595                 } else if (old_fr && !new_fr) {
9596                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9597                 }
9598             }
9599 
9600             if (new_fr) {
9601                 env->CP0_Status |= (1 << CP0St_FR);
9602                 env->hflags |= MIPS_HFLAG_F64;
9603             } else {
9604                 env->CP0_Status &= ~(1 << CP0St_FR);
9605                 env->hflags &= ~MIPS_HFLAG_F64;
9606             }
9607             if (new_fre) {
9608                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9609                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9610                     env->hflags |= MIPS_HFLAG_FRE;
9611                 }
9612             } else {
9613                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9614                 env->hflags &= ~MIPS_HFLAG_FRE;
9615             }
9616 
9617             return 0;
9618         }
9619 #endif /* MIPS */
9620 #ifdef TARGET_AARCH64
9621         case TARGET_PR_SVE_SET_VL:
9622             /*
9623              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9624              * PR_SVE_VL_INHERIT.  Note the kernel definition
9625              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9626              * even though the current architectural maximum is VQ=16.
9627              */
9628             ret = -TARGET_EINVAL;
9629             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9630                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9631                 CPUARMState *env = cpu_env;
9632                 ARMCPU *cpu = arm_env_get_cpu(env);
9633                 uint32_t vq, old_vq;
9634 
9635                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9636                 vq = MAX(arg2 / 16, 1);
9637                 vq = MIN(vq, cpu->sve_max_vq);
9638 
9639                 if (vq < old_vq) {
9640                     aarch64_sve_narrow_vq(env, vq);
9641                 }
9642                 env->vfp.zcr_el[1] = vq - 1;
9643                 ret = vq * 16;
9644             }
9645             return ret;
9646         case TARGET_PR_SVE_GET_VL:
9647             ret = -TARGET_EINVAL;
9648             {
9649                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9650                 if (cpu_isar_feature(aa64_sve, cpu)) {
9651                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9652                 }
9653             }
9654             return ret;
9655 #endif /* AARCH64 */
9656         case PR_GET_SECCOMP:
9657         case PR_SET_SECCOMP:
9658             /* Disable seccomp to prevent the target disabling syscalls we
9659              * need. */
9660             return -TARGET_EINVAL;
9661         default:
9662             /* Most prctl options have no pointer arguments */
9663             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9664         }
9665         break;
9666 #ifdef TARGET_NR_arch_prctl
9667     case TARGET_NR_arch_prctl:
9668 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9669         return do_arch_prctl(cpu_env, arg1, arg2);
9670 #else
9671 #error unreachable
9672 #endif
9673 #endif
9674 #ifdef TARGET_NR_pread64
9675     case TARGET_NR_pread64:
9676         if (regpairs_aligned(cpu_env, num)) {
9677             arg4 = arg5;
9678             arg5 = arg6;
9679         }
9680         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9681             return -TARGET_EFAULT;
9682         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9683         unlock_user(p, arg2, ret);
9684         return ret;
9685     case TARGET_NR_pwrite64:
9686         if (regpairs_aligned(cpu_env, num)) {
9687             arg4 = arg5;
9688             arg5 = arg6;
9689         }
9690         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9691             return -TARGET_EFAULT;
9692         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9693         unlock_user(p, arg2, 0);
9694         return ret;
9695 #endif
9696     case TARGET_NR_getcwd:
9697         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9698             return -TARGET_EFAULT;
9699         ret = get_errno(sys_getcwd1(p, arg2));
9700         unlock_user(p, arg1, ret);
9701         return ret;
9702     case TARGET_NR_capget:
9703     case TARGET_NR_capset:
9704     {
9705         struct target_user_cap_header *target_header;
9706         struct target_user_cap_data *target_data = NULL;
9707         struct __user_cap_header_struct header;
9708         struct __user_cap_data_struct data[2];
9709         struct __user_cap_data_struct *dataptr = NULL;
9710         int i, target_datalen;
9711         int data_items = 1;
9712 
9713         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9714             return -TARGET_EFAULT;
9715         }
9716         header.version = tswap32(target_header->version);
9717         header.pid = tswap32(target_header->pid);
9718 
9719         if (header.version != _LINUX_CAPABILITY_VERSION) {
9720             /* Version 2 and up takes pointer to two user_data structs */
9721             data_items = 2;
9722         }
9723 
9724         target_datalen = sizeof(*target_data) * data_items;
9725 
9726         if (arg2) {
9727             if (num == TARGET_NR_capget) {
9728                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9729             } else {
9730                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9731             }
9732             if (!target_data) {
9733                 unlock_user_struct(target_header, arg1, 0);
9734                 return -TARGET_EFAULT;
9735             }
9736 
9737             if (num == TARGET_NR_capset) {
9738                 for (i = 0; i < data_items; i++) {
9739                     data[i].effective = tswap32(target_data[i].effective);
9740                     data[i].permitted = tswap32(target_data[i].permitted);
9741                     data[i].inheritable = tswap32(target_data[i].inheritable);
9742                 }
9743             }
9744 
9745             dataptr = data;
9746         }
9747 
9748         if (num == TARGET_NR_capget) {
9749             ret = get_errno(capget(&header, dataptr));
9750         } else {
9751             ret = get_errno(capset(&header, dataptr));
9752         }
9753 
9754         /* The kernel always updates version for both capget and capset */
9755         target_header->version = tswap32(header.version);
9756         unlock_user_struct(target_header, arg1, 1);
9757 
9758         if (arg2) {
9759             if (num == TARGET_NR_capget) {
9760                 for (i = 0; i < data_items; i++) {
9761                     target_data[i].effective = tswap32(data[i].effective);
9762                     target_data[i].permitted = tswap32(data[i].permitted);
9763                     target_data[i].inheritable = tswap32(data[i].inheritable);
9764                 }
9765                 unlock_user(target_data, arg2, target_datalen);
9766             } else {
9767                 unlock_user(target_data, arg2, 0);
9768             }
9769         }
9770         return ret;
9771     }
9772     case TARGET_NR_sigaltstack:
9773         return do_sigaltstack(arg1, arg2,
9774                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9775 
9776 #ifdef CONFIG_SENDFILE
9777 #ifdef TARGET_NR_sendfile
9778     case TARGET_NR_sendfile:
9779     {
9780         off_t *offp = NULL;
9781         off_t off;
9782         if (arg3) {
9783             ret = get_user_sal(off, arg3);
9784             if (is_error(ret)) {
9785                 return ret;
9786             }
9787             offp = &off;
9788         }
9789         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9790         if (!is_error(ret) && arg3) {
9791             abi_long ret2 = put_user_sal(off, arg3);
9792             if (is_error(ret2)) {
9793                 ret = ret2;
9794             }
9795         }
9796         return ret;
9797     }
9798 #endif
9799 #ifdef TARGET_NR_sendfile64
9800     case TARGET_NR_sendfile64:
9801     {
9802         off_t *offp = NULL;
9803         off_t off;
9804         if (arg3) {
9805             ret = get_user_s64(off, arg3);
9806             if (is_error(ret)) {
9807                 return ret;
9808             }
9809             offp = &off;
9810         }
9811         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9812         if (!is_error(ret) && arg3) {
9813             abi_long ret2 = put_user_s64(off, arg3);
9814             if (is_error(ret2)) {
9815                 ret = ret2;
9816             }
9817         }
9818         return ret;
9819     }
9820 #endif
9821 #endif
9822 #ifdef TARGET_NR_vfork
9823     case TARGET_NR_vfork:
9824         return get_errno(do_fork(cpu_env,
9825                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9826                          0, 0, 0, 0));
9827 #endif
9828 #ifdef TARGET_NR_ugetrlimit
9829     case TARGET_NR_ugetrlimit:
9830     {
9831 	struct rlimit rlim;
9832 	int resource = target_to_host_resource(arg1);
9833 	ret = get_errno(getrlimit(resource, &rlim));
9834 	if (!is_error(ret)) {
9835 	    struct target_rlimit *target_rlim;
9836             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9837                 return -TARGET_EFAULT;
9838 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9839 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9840             unlock_user_struct(target_rlim, arg2, 1);
9841 	}
9842         return ret;
9843     }
9844 #endif
9845 #ifdef TARGET_NR_truncate64
9846     case TARGET_NR_truncate64:
9847         if (!(p = lock_user_string(arg1)))
9848             return -TARGET_EFAULT;
9849 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9850         unlock_user(p, arg1, 0);
9851         return ret;
9852 #endif
9853 #ifdef TARGET_NR_ftruncate64
9854     case TARGET_NR_ftruncate64:
9855         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9856 #endif
9857 #ifdef TARGET_NR_stat64
9858     case TARGET_NR_stat64:
9859         if (!(p = lock_user_string(arg1))) {
9860             return -TARGET_EFAULT;
9861         }
9862         ret = get_errno(stat(path(p), &st));
9863         unlock_user(p, arg1, 0);
9864         if (!is_error(ret))
9865             ret = host_to_target_stat64(cpu_env, arg2, &st);
9866         return ret;
9867 #endif
9868 #ifdef TARGET_NR_lstat64
9869     case TARGET_NR_lstat64:
9870         if (!(p = lock_user_string(arg1))) {
9871             return -TARGET_EFAULT;
9872         }
9873         ret = get_errno(lstat(path(p), &st));
9874         unlock_user(p, arg1, 0);
9875         if (!is_error(ret))
9876             ret = host_to_target_stat64(cpu_env, arg2, &st);
9877         return ret;
9878 #endif
9879 #ifdef TARGET_NR_fstat64
9880     case TARGET_NR_fstat64:
9881         ret = get_errno(fstat(arg1, &st));
9882         if (!is_error(ret))
9883             ret = host_to_target_stat64(cpu_env, arg2, &st);
9884         return ret;
9885 #endif
9886 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9887 #ifdef TARGET_NR_fstatat64
9888     case TARGET_NR_fstatat64:
9889 #endif
9890 #ifdef TARGET_NR_newfstatat
9891     case TARGET_NR_newfstatat:
9892 #endif
9893         if (!(p = lock_user_string(arg2))) {
9894             return -TARGET_EFAULT;
9895         }
9896         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9897         unlock_user(p, arg2, 0);
9898         if (!is_error(ret))
9899             ret = host_to_target_stat64(cpu_env, arg3, &st);
9900         return ret;
9901 #endif
9902 #ifdef TARGET_NR_lchown
9903     case TARGET_NR_lchown:
9904         if (!(p = lock_user_string(arg1)))
9905             return -TARGET_EFAULT;
9906         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9907         unlock_user(p, arg1, 0);
9908         return ret;
9909 #endif
9910 #ifdef TARGET_NR_getuid
9911     case TARGET_NR_getuid:
9912         return get_errno(high2lowuid(getuid()));
9913 #endif
9914 #ifdef TARGET_NR_getgid
9915     case TARGET_NR_getgid:
9916         return get_errno(high2lowgid(getgid()));
9917 #endif
9918 #ifdef TARGET_NR_geteuid
9919     case TARGET_NR_geteuid:
9920         return get_errno(high2lowuid(geteuid()));
9921 #endif
9922 #ifdef TARGET_NR_getegid
9923     case TARGET_NR_getegid:
9924         return get_errno(high2lowgid(getegid()));
9925 #endif
9926     case TARGET_NR_setreuid:
9927         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9928     case TARGET_NR_setregid:
9929         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9930     case TARGET_NR_getgroups:
9931         {
9932             int gidsetsize = arg1;
9933             target_id *target_grouplist;
9934             gid_t *grouplist;
9935             int i;
9936 
9937             grouplist = alloca(gidsetsize * sizeof(gid_t));
9938             ret = get_errno(getgroups(gidsetsize, grouplist));
9939             if (gidsetsize == 0)
9940                 return ret;
9941             if (!is_error(ret)) {
9942                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9943                 if (!target_grouplist)
9944                     return -TARGET_EFAULT;
9945                 for(i = 0;i < ret; i++)
9946                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9947                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9948             }
9949         }
9950         return ret;
9951     case TARGET_NR_setgroups:
9952         {
9953             int gidsetsize = arg1;
9954             target_id *target_grouplist;
9955             gid_t *grouplist = NULL;
9956             int i;
9957             if (gidsetsize) {
9958                 grouplist = alloca(gidsetsize * sizeof(gid_t));
9959                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9960                 if (!target_grouplist) {
9961                     return -TARGET_EFAULT;
9962                 }
9963                 for (i = 0; i < gidsetsize; i++) {
9964                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9965                 }
9966                 unlock_user(target_grouplist, arg2, 0);
9967             }
9968             return get_errno(setgroups(gidsetsize, grouplist));
9969         }
9970     case TARGET_NR_fchown:
9971         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9972 #if defined(TARGET_NR_fchownat)
9973     case TARGET_NR_fchownat:
9974         if (!(p = lock_user_string(arg2)))
9975             return -TARGET_EFAULT;
9976         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9977                                  low2highgid(arg4), arg5));
9978         unlock_user(p, arg2, 0);
9979         return ret;
9980 #endif
9981 #ifdef TARGET_NR_setresuid
9982     case TARGET_NR_setresuid:
9983         return get_errno(sys_setresuid(low2highuid(arg1),
9984                                        low2highuid(arg2),
9985                                        low2highuid(arg3)));
9986 #endif
9987 #ifdef TARGET_NR_getresuid
9988     case TARGET_NR_getresuid:
9989         {
9990             uid_t ruid, euid, suid;
9991             ret = get_errno(getresuid(&ruid, &euid, &suid));
9992             if (!is_error(ret)) {
9993                 if (put_user_id(high2lowuid(ruid), arg1)
9994                     || put_user_id(high2lowuid(euid), arg2)
9995                     || put_user_id(high2lowuid(suid), arg3))
9996                     return -TARGET_EFAULT;
9997             }
9998         }
9999         return ret;
10000 #endif
10001 #ifdef TARGET_NR_getresgid
10002     case TARGET_NR_setresgid:
10003         return get_errno(sys_setresgid(low2highgid(arg1),
10004                                        low2highgid(arg2),
10005                                        low2highgid(arg3)));
10006 #endif
10007 #ifdef TARGET_NR_getresgid
10008     case TARGET_NR_getresgid:
10009         {
10010             gid_t rgid, egid, sgid;
10011             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10012             if (!is_error(ret)) {
10013                 if (put_user_id(high2lowgid(rgid), arg1)
10014                     || put_user_id(high2lowgid(egid), arg2)
10015                     || put_user_id(high2lowgid(sgid), arg3))
10016                     return -TARGET_EFAULT;
10017             }
10018         }
10019         return ret;
10020 #endif
10021 #ifdef TARGET_NR_chown
10022     case TARGET_NR_chown:
10023         if (!(p = lock_user_string(arg1)))
10024             return -TARGET_EFAULT;
10025         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10026         unlock_user(p, arg1, 0);
10027         return ret;
10028 #endif
10029     case TARGET_NR_setuid:
10030         return get_errno(sys_setuid(low2highuid(arg1)));
10031     case TARGET_NR_setgid:
10032         return get_errno(sys_setgid(low2highgid(arg1)));
10033     case TARGET_NR_setfsuid:
10034         return get_errno(setfsuid(arg1));
10035     case TARGET_NR_setfsgid:
10036         return get_errno(setfsgid(arg1));
10037 
10038 #ifdef TARGET_NR_lchown32
10039     case TARGET_NR_lchown32:
10040         if (!(p = lock_user_string(arg1)))
10041             return -TARGET_EFAULT;
10042         ret = get_errno(lchown(p, arg2, arg3));
10043         unlock_user(p, arg1, 0);
10044         return ret;
10045 #endif
10046 #ifdef TARGET_NR_getuid32
10047     case TARGET_NR_getuid32:
10048         return get_errno(getuid());
10049 #endif
10050 
10051 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10052    /* Alpha specific */
10053     case TARGET_NR_getxuid:
10054          {
10055             uid_t euid;
10056             euid=geteuid();
10057             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10058          }
10059         return get_errno(getuid());
10060 #endif
10061 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10062    /* Alpha specific */
10063     case TARGET_NR_getxgid:
10064          {
10065             uid_t egid;
10066             egid=getegid();
10067             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10068          }
10069         return get_errno(getgid());
10070 #endif
10071 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10072     /* Alpha specific */
10073     case TARGET_NR_osf_getsysinfo:
10074         ret = -TARGET_EOPNOTSUPP;
10075         switch (arg1) {
10076           case TARGET_GSI_IEEE_FP_CONTROL:
10077             {
10078                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10079 
10080                 /* Copied from linux ieee_fpcr_to_swcr.  */
10081                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10082                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10083                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10084                                         | SWCR_TRAP_ENABLE_DZE
10085                                         | SWCR_TRAP_ENABLE_OVF);
10086                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10087                                         | SWCR_TRAP_ENABLE_INE);
10088                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10089                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10090 
10091                 if (put_user_u64 (swcr, arg2))
10092                         return -TARGET_EFAULT;
10093                 ret = 0;
10094             }
10095             break;
10096 
10097           /* case GSI_IEEE_STATE_AT_SIGNAL:
10098              -- Not implemented in linux kernel.
10099              case GSI_UACPROC:
10100              -- Retrieves current unaligned access state; not much used.
10101              case GSI_PROC_TYPE:
10102              -- Retrieves implver information; surely not used.
10103              case GSI_GET_HWRPB:
10104              -- Grabs a copy of the HWRPB; surely not used.
10105           */
10106         }
10107         return ret;
10108 #endif
10109 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10110     /* Alpha specific */
10111     case TARGET_NR_osf_setsysinfo:
10112         ret = -TARGET_EOPNOTSUPP;
10113         switch (arg1) {
10114           case TARGET_SSI_IEEE_FP_CONTROL:
10115             {
10116                 uint64_t swcr, fpcr, orig_fpcr;
10117 
10118                 if (get_user_u64 (swcr, arg2)) {
10119                     return -TARGET_EFAULT;
10120                 }
10121                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10122                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10123 
10124                 /* Copied from linux ieee_swcr_to_fpcr.  */
10125                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10126                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10127                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10128                                   | SWCR_TRAP_ENABLE_DZE
10129                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10130                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10131                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10132                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10133                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10134 
10135                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10136                 ret = 0;
10137             }
10138             break;
10139 
10140           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10141             {
10142                 uint64_t exc, fpcr, orig_fpcr;
10143                 int si_code;
10144 
10145                 if (get_user_u64(exc, arg2)) {
10146                     return -TARGET_EFAULT;
10147                 }
10148 
10149                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10150 
10151                 /* We only add to the exception status here.  */
10152                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10153 
10154                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10155                 ret = 0;
10156 
10157                 /* Old exceptions are not signaled.  */
10158                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10159 
10160                 /* If any exceptions set by this call,
10161                    and are unmasked, send a signal.  */
10162                 si_code = 0;
10163                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10164                     si_code = TARGET_FPE_FLTRES;
10165                 }
10166                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10167                     si_code = TARGET_FPE_FLTUND;
10168                 }
10169                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10170                     si_code = TARGET_FPE_FLTOVF;
10171                 }
10172                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10173                     si_code = TARGET_FPE_FLTDIV;
10174                 }
10175                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10176                     si_code = TARGET_FPE_FLTINV;
10177                 }
10178                 if (si_code != 0) {
10179                     target_siginfo_t info;
10180                     info.si_signo = SIGFPE;
10181                     info.si_errno = 0;
10182                     info.si_code = si_code;
10183                     info._sifields._sigfault._addr
10184                         = ((CPUArchState *)cpu_env)->pc;
10185                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10186                                  QEMU_SI_FAULT, &info);
10187                 }
10188             }
10189             break;
10190 
10191           /* case SSI_NVPAIRS:
10192              -- Used with SSIN_UACPROC to enable unaligned accesses.
10193              case SSI_IEEE_STATE_AT_SIGNAL:
10194              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10195              -- Not implemented in linux kernel
10196           */
10197         }
10198         return ret;
10199 #endif
10200 #ifdef TARGET_NR_osf_sigprocmask
10201     /* Alpha specific.  */
10202     case TARGET_NR_osf_sigprocmask:
10203         {
10204             abi_ulong mask;
10205             int how;
10206             sigset_t set, oldset;
10207 
10208             switch(arg1) {
10209             case TARGET_SIG_BLOCK:
10210                 how = SIG_BLOCK;
10211                 break;
10212             case TARGET_SIG_UNBLOCK:
10213                 how = SIG_UNBLOCK;
10214                 break;
10215             case TARGET_SIG_SETMASK:
10216                 how = SIG_SETMASK;
10217                 break;
10218             default:
10219                 return -TARGET_EINVAL;
10220             }
10221             mask = arg2;
10222             target_to_host_old_sigset(&set, &mask);
10223             ret = do_sigprocmask(how, &set, &oldset);
10224             if (!ret) {
10225                 host_to_target_old_sigset(&mask, &oldset);
10226                 ret = mask;
10227             }
10228         }
10229         return ret;
10230 #endif
10231 
10232 #ifdef TARGET_NR_getgid32
10233     case TARGET_NR_getgid32:
10234         return get_errno(getgid());
10235 #endif
10236 #ifdef TARGET_NR_geteuid32
10237     case TARGET_NR_geteuid32:
10238         return get_errno(geteuid());
10239 #endif
10240 #ifdef TARGET_NR_getegid32
10241     case TARGET_NR_getegid32:
10242         return get_errno(getegid());
10243 #endif
10244 #ifdef TARGET_NR_setreuid32
10245     case TARGET_NR_setreuid32:
10246         return get_errno(setreuid(arg1, arg2));
10247 #endif
10248 #ifdef TARGET_NR_setregid32
10249     case TARGET_NR_setregid32:
10250         return get_errno(setregid(arg1, arg2));
10251 #endif
10252 #ifdef TARGET_NR_getgroups32
10253     case TARGET_NR_getgroups32:
10254         {
10255             int gidsetsize = arg1;
10256             uint32_t *target_grouplist;
10257             gid_t *grouplist;
10258             int i;
10259 
10260             grouplist = alloca(gidsetsize * sizeof(gid_t));
10261             ret = get_errno(getgroups(gidsetsize, grouplist));
10262             if (gidsetsize == 0)
10263                 return ret;
10264             if (!is_error(ret)) {
10265                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10266                 if (!target_grouplist) {
10267                     return -TARGET_EFAULT;
10268                 }
10269                 for(i = 0;i < ret; i++)
10270                     target_grouplist[i] = tswap32(grouplist[i]);
10271                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10272             }
10273         }
10274         return ret;
10275 #endif
10276 #ifdef TARGET_NR_setgroups32
10277     case TARGET_NR_setgroups32:
10278         {
10279             int gidsetsize = arg1;
10280             uint32_t *target_grouplist;
10281             gid_t *grouplist;
10282             int i;
10283 
10284             grouplist = alloca(gidsetsize * sizeof(gid_t));
10285             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10286             if (!target_grouplist) {
10287                 return -TARGET_EFAULT;
10288             }
10289             for(i = 0;i < gidsetsize; i++)
10290                 grouplist[i] = tswap32(target_grouplist[i]);
10291             unlock_user(target_grouplist, arg2, 0);
10292             return get_errno(setgroups(gidsetsize, grouplist));
10293         }
10294 #endif
10295 #ifdef TARGET_NR_fchown32
10296     case TARGET_NR_fchown32:
10297         return get_errno(fchown(arg1, arg2, arg3));
10298 #endif
10299 #ifdef TARGET_NR_setresuid32
10300     case TARGET_NR_setresuid32:
10301         return get_errno(sys_setresuid(arg1, arg2, arg3));
10302 #endif
10303 #ifdef TARGET_NR_getresuid32
10304     case TARGET_NR_getresuid32:
10305         {
10306             uid_t ruid, euid, suid;
10307             ret = get_errno(getresuid(&ruid, &euid, &suid));
10308             if (!is_error(ret)) {
10309                 if (put_user_u32(ruid, arg1)
10310                     || put_user_u32(euid, arg2)
10311                     || put_user_u32(suid, arg3))
10312                     return -TARGET_EFAULT;
10313             }
10314         }
10315         return ret;
10316 #endif
10317 #ifdef TARGET_NR_setresgid32
10318     case TARGET_NR_setresgid32:
10319         return get_errno(sys_setresgid(arg1, arg2, arg3));
10320 #endif
10321 #ifdef TARGET_NR_getresgid32
10322     case TARGET_NR_getresgid32:
10323         {
10324             gid_t rgid, egid, sgid;
10325             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10326             if (!is_error(ret)) {
10327                 if (put_user_u32(rgid, arg1)
10328                     || put_user_u32(egid, arg2)
10329                     || put_user_u32(sgid, arg3))
10330                     return -TARGET_EFAULT;
10331             }
10332         }
10333         return ret;
10334 #endif
10335 #ifdef TARGET_NR_chown32
10336     case TARGET_NR_chown32:
10337         if (!(p = lock_user_string(arg1)))
10338             return -TARGET_EFAULT;
10339         ret = get_errno(chown(p, arg2, arg3));
10340         unlock_user(p, arg1, 0);
10341         return ret;
10342 #endif
10343 #ifdef TARGET_NR_setuid32
10344     case TARGET_NR_setuid32:
10345         return get_errno(sys_setuid(arg1));
10346 #endif
10347 #ifdef TARGET_NR_setgid32
10348     case TARGET_NR_setgid32:
10349         return get_errno(sys_setgid(arg1));
10350 #endif
10351 #ifdef TARGET_NR_setfsuid32
10352     case TARGET_NR_setfsuid32:
10353         return get_errno(setfsuid(arg1));
10354 #endif
10355 #ifdef TARGET_NR_setfsgid32
10356     case TARGET_NR_setfsgid32:
10357         return get_errno(setfsgid(arg1));
10358 #endif
10359 #ifdef TARGET_NR_mincore
10360     case TARGET_NR_mincore:
10361         {
10362             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10363             if (!a) {
10364                 return -TARGET_ENOMEM;
10365             }
10366             p = lock_user_string(arg3);
10367             if (!p) {
10368                 ret = -TARGET_EFAULT;
10369             } else {
10370                 ret = get_errno(mincore(a, arg2, p));
10371                 unlock_user(p, arg3, ret);
10372             }
10373             unlock_user(a, arg1, 0);
10374         }
10375         return ret;
10376 #endif
10377 #ifdef TARGET_NR_arm_fadvise64_64
10378     case TARGET_NR_arm_fadvise64_64:
10379         /* arm_fadvise64_64 looks like fadvise64_64 but
10380          * with different argument order: fd, advice, offset, len
10381          * rather than the usual fd, offset, len, advice.
10382          * Note that offset and len are both 64-bit so appear as
10383          * pairs of 32-bit registers.
10384          */
10385         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10386                             target_offset64(arg5, arg6), arg2);
10387         return -host_to_target_errno(ret);
10388 #endif
10389 
10390 #if TARGET_ABI_BITS == 32
10391 
10392 #ifdef TARGET_NR_fadvise64_64
10393     case TARGET_NR_fadvise64_64:
10394 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10395         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10396         ret = arg2;
10397         arg2 = arg3;
10398         arg3 = arg4;
10399         arg4 = arg5;
10400         arg5 = arg6;
10401         arg6 = ret;
10402 #else
10403         /* 6 args: fd, offset (high, low), len (high, low), advice */
10404         if (regpairs_aligned(cpu_env, num)) {
10405             /* offset is in (3,4), len in (5,6) and advice in 7 */
10406             arg2 = arg3;
10407             arg3 = arg4;
10408             arg4 = arg5;
10409             arg5 = arg6;
10410             arg6 = arg7;
10411         }
10412 #endif
10413         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10414                             target_offset64(arg4, arg5), arg6);
10415         return -host_to_target_errno(ret);
10416 #endif
10417 
10418 #ifdef TARGET_NR_fadvise64
10419     case TARGET_NR_fadvise64:
10420         /* 5 args: fd, offset (high, low), len, advice */
10421         if (regpairs_aligned(cpu_env, num)) {
10422             /* offset is in (3,4), len in 5 and advice in 6 */
10423             arg2 = arg3;
10424             arg3 = arg4;
10425             arg4 = arg5;
10426             arg5 = arg6;
10427         }
10428         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10429         return -host_to_target_errno(ret);
10430 #endif
10431 
10432 #else /* not a 32-bit ABI */
10433 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10434 #ifdef TARGET_NR_fadvise64_64
10435     case TARGET_NR_fadvise64_64:
10436 #endif
10437 #ifdef TARGET_NR_fadvise64
10438     case TARGET_NR_fadvise64:
10439 #endif
10440 #ifdef TARGET_S390X
10441         switch (arg4) {
10442         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10443         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10444         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10445         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10446         default: break;
10447         }
10448 #endif
10449         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10450 #endif
10451 #endif /* end of 64-bit ABI fadvise handling */
10452 
10453 #ifdef TARGET_NR_madvise
10454     case TARGET_NR_madvise:
10455         /* A straight passthrough may not be safe because qemu sometimes
10456            turns private file-backed mappings into anonymous mappings.
10457            This will break MADV_DONTNEED.
10458            This is a hint, so ignoring and returning success is ok.  */
10459         return 0;
10460 #endif
10461 #if TARGET_ABI_BITS == 32
10462     case TARGET_NR_fcntl64:
10463     {
10464 	int cmd;
10465 	struct flock64 fl;
10466         from_flock64_fn *copyfrom = copy_from_user_flock64;
10467         to_flock64_fn *copyto = copy_to_user_flock64;
10468 
10469 #ifdef TARGET_ARM
10470         if (!((CPUARMState *)cpu_env)->eabi) {
10471             copyfrom = copy_from_user_oabi_flock64;
10472             copyto = copy_to_user_oabi_flock64;
10473         }
10474 #endif
10475 
10476 	cmd = target_to_host_fcntl_cmd(arg2);
10477         if (cmd == -TARGET_EINVAL) {
10478             return cmd;
10479         }
10480 
10481         switch(arg2) {
10482         case TARGET_F_GETLK64:
10483             ret = copyfrom(&fl, arg3);
10484             if (ret) {
10485                 break;
10486             }
10487             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10488             if (ret == 0) {
10489                 ret = copyto(arg3, &fl);
10490             }
10491 	    break;
10492 
10493         case TARGET_F_SETLK64:
10494         case TARGET_F_SETLKW64:
10495             ret = copyfrom(&fl, arg3);
10496             if (ret) {
10497                 break;
10498             }
10499             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10500 	    break;
10501         default:
10502             ret = do_fcntl(arg1, arg2, arg3);
10503             break;
10504         }
10505         return ret;
10506     }
10507 #endif
10508 #ifdef TARGET_NR_cacheflush
10509     case TARGET_NR_cacheflush:
10510         /* self-modifying code is handled automatically, so nothing needed */
10511         return 0;
10512 #endif
10513 #ifdef TARGET_NR_getpagesize
10514     case TARGET_NR_getpagesize:
10515         return TARGET_PAGE_SIZE;
10516 #endif
10517     case TARGET_NR_gettid:
10518         return get_errno(gettid());
10519 #ifdef TARGET_NR_readahead
10520     case TARGET_NR_readahead:
10521 #if TARGET_ABI_BITS == 32
10522         if (regpairs_aligned(cpu_env, num)) {
10523             arg2 = arg3;
10524             arg3 = arg4;
10525             arg4 = arg5;
10526         }
10527         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10528 #else
10529         ret = get_errno(readahead(arg1, arg2, arg3));
10530 #endif
10531         return ret;
10532 #endif
10533 #ifdef CONFIG_ATTR
10534 #ifdef TARGET_NR_setxattr
10535     case TARGET_NR_listxattr:
10536     case TARGET_NR_llistxattr:
10537     {
10538         void *p, *b = 0;
10539         if (arg2) {
10540             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10541             if (!b) {
10542                 return -TARGET_EFAULT;
10543             }
10544         }
10545         p = lock_user_string(arg1);
10546         if (p) {
10547             if (num == TARGET_NR_listxattr) {
10548                 ret = get_errno(listxattr(p, b, arg3));
10549             } else {
10550                 ret = get_errno(llistxattr(p, b, arg3));
10551             }
10552         } else {
10553             ret = -TARGET_EFAULT;
10554         }
10555         unlock_user(p, arg1, 0);
10556         unlock_user(b, arg2, arg3);
10557         return ret;
10558     }
10559     case TARGET_NR_flistxattr:
10560     {
10561         void *b = 0;
10562         if (arg2) {
10563             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10564             if (!b) {
10565                 return -TARGET_EFAULT;
10566             }
10567         }
10568         ret = get_errno(flistxattr(arg1, b, arg3));
10569         unlock_user(b, arg2, arg3);
10570         return ret;
10571     }
10572     case TARGET_NR_setxattr:
10573     case TARGET_NR_lsetxattr:
10574         {
10575             void *p, *n, *v = 0;
10576             if (arg3) {
10577                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10578                 if (!v) {
10579                     return -TARGET_EFAULT;
10580                 }
10581             }
10582             p = lock_user_string(arg1);
10583             n = lock_user_string(arg2);
10584             if (p && n) {
10585                 if (num == TARGET_NR_setxattr) {
10586                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10587                 } else {
10588                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10589                 }
10590             } else {
10591                 ret = -TARGET_EFAULT;
10592             }
10593             unlock_user(p, arg1, 0);
10594             unlock_user(n, arg2, 0);
10595             unlock_user(v, arg3, 0);
10596         }
10597         return ret;
10598     case TARGET_NR_fsetxattr:
10599         {
10600             void *n, *v = 0;
10601             if (arg3) {
10602                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10603                 if (!v) {
10604                     return -TARGET_EFAULT;
10605                 }
10606             }
10607             n = lock_user_string(arg2);
10608             if (n) {
10609                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10610             } else {
10611                 ret = -TARGET_EFAULT;
10612             }
10613             unlock_user(n, arg2, 0);
10614             unlock_user(v, arg3, 0);
10615         }
10616         return ret;
10617     case TARGET_NR_getxattr:
10618     case TARGET_NR_lgetxattr:
10619         {
10620             void *p, *n, *v = 0;
10621             if (arg3) {
10622                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10623                 if (!v) {
10624                     return -TARGET_EFAULT;
10625                 }
10626             }
10627             p = lock_user_string(arg1);
10628             n = lock_user_string(arg2);
10629             if (p && n) {
10630                 if (num == TARGET_NR_getxattr) {
10631                     ret = get_errno(getxattr(p, n, v, arg4));
10632                 } else {
10633                     ret = get_errno(lgetxattr(p, n, v, arg4));
10634                 }
10635             } else {
10636                 ret = -TARGET_EFAULT;
10637             }
10638             unlock_user(p, arg1, 0);
10639             unlock_user(n, arg2, 0);
10640             unlock_user(v, arg3, arg4);
10641         }
10642         return ret;
10643     case TARGET_NR_fgetxattr:
10644         {
10645             void *n, *v = 0;
10646             if (arg3) {
10647                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10648                 if (!v) {
10649                     return -TARGET_EFAULT;
10650                 }
10651             }
10652             n = lock_user_string(arg2);
10653             if (n) {
10654                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10655             } else {
10656                 ret = -TARGET_EFAULT;
10657             }
10658             unlock_user(n, arg2, 0);
10659             unlock_user(v, arg3, arg4);
10660         }
10661         return ret;
10662     case TARGET_NR_removexattr:
10663     case TARGET_NR_lremovexattr:
10664         {
10665             void *p, *n;
10666             p = lock_user_string(arg1);
10667             n = lock_user_string(arg2);
10668             if (p && n) {
10669                 if (num == TARGET_NR_removexattr) {
10670                     ret = get_errno(removexattr(p, n));
10671                 } else {
10672                     ret = get_errno(lremovexattr(p, n));
10673                 }
10674             } else {
10675                 ret = -TARGET_EFAULT;
10676             }
10677             unlock_user(p, arg1, 0);
10678             unlock_user(n, arg2, 0);
10679         }
10680         return ret;
10681     case TARGET_NR_fremovexattr:
10682         {
10683             void *n;
10684             n = lock_user_string(arg2);
10685             if (n) {
10686                 ret = get_errno(fremovexattr(arg1, n));
10687             } else {
10688                 ret = -TARGET_EFAULT;
10689             }
10690             unlock_user(n, arg2, 0);
10691         }
10692         return ret;
10693 #endif
10694 #endif /* CONFIG_ATTR */
10695 #ifdef TARGET_NR_set_thread_area
10696     case TARGET_NR_set_thread_area:
10697 #if defined(TARGET_MIPS)
10698       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10699       return 0;
10700 #elif defined(TARGET_CRIS)
10701       if (arg1 & 0xff)
10702           ret = -TARGET_EINVAL;
10703       else {
10704           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10705           ret = 0;
10706       }
10707       return ret;
10708 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10709       return do_set_thread_area(cpu_env, arg1);
10710 #elif defined(TARGET_M68K)
10711       {
10712           TaskState *ts = cpu->opaque;
10713           ts->tp_value = arg1;
10714           return 0;
10715       }
10716 #else
10717       return -TARGET_ENOSYS;
10718 #endif
10719 #endif
10720 #ifdef TARGET_NR_get_thread_area
10721     case TARGET_NR_get_thread_area:
10722 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10723         return do_get_thread_area(cpu_env, arg1);
10724 #elif defined(TARGET_M68K)
10725         {
10726             TaskState *ts = cpu->opaque;
10727             return ts->tp_value;
10728         }
10729 #else
10730         return -TARGET_ENOSYS;
10731 #endif
10732 #endif
10733 #ifdef TARGET_NR_getdomainname
10734     case TARGET_NR_getdomainname:
10735         return -TARGET_ENOSYS;
10736 #endif
10737 
10738 #ifdef TARGET_NR_clock_settime
10739     case TARGET_NR_clock_settime:
10740     {
10741         struct timespec ts;
10742 
10743         ret = target_to_host_timespec(&ts, arg2);
10744         if (!is_error(ret)) {
10745             ret = get_errno(clock_settime(arg1, &ts));
10746         }
10747         return ret;
10748     }
10749 #endif
10750 #ifdef TARGET_NR_clock_gettime
10751     case TARGET_NR_clock_gettime:
10752     {
10753         struct timespec ts;
10754         ret = get_errno(clock_gettime(arg1, &ts));
10755         if (!is_error(ret)) {
10756             ret = host_to_target_timespec(arg2, &ts);
10757         }
10758         return ret;
10759     }
10760 #endif
10761 #ifdef TARGET_NR_clock_getres
10762     case TARGET_NR_clock_getres:
10763     {
10764         struct timespec ts;
10765         ret = get_errno(clock_getres(arg1, &ts));
10766         if (!is_error(ret)) {
10767             host_to_target_timespec(arg2, &ts);
10768         }
10769         return ret;
10770     }
10771 #endif
10772 #ifdef TARGET_NR_clock_nanosleep
10773     case TARGET_NR_clock_nanosleep:
10774     {
10775         struct timespec ts;
10776         target_to_host_timespec(&ts, arg3);
10777         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10778                                              &ts, arg4 ? &ts : NULL));
10779         if (arg4)
10780             host_to_target_timespec(arg4, &ts);
10781 
10782 #if defined(TARGET_PPC)
10783         /* clock_nanosleep is odd in that it returns positive errno values.
10784          * On PPC, CR0 bit 3 should be set in such a situation. */
10785         if (ret && ret != -TARGET_ERESTARTSYS) {
10786             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10787         }
10788 #endif
10789         return ret;
10790     }
10791 #endif
10792 
10793 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10794     case TARGET_NR_set_tid_address:
10795         return get_errno(set_tid_address((int *)g2h(arg1)));
10796 #endif
10797 
10798     case TARGET_NR_tkill:
10799         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10800 
10801     case TARGET_NR_tgkill:
10802         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10803                          target_to_host_signal(arg3)));
10804 
10805 #ifdef TARGET_NR_set_robust_list
10806     case TARGET_NR_set_robust_list:
10807     case TARGET_NR_get_robust_list:
10808         /* The ABI for supporting robust futexes has userspace pass
10809          * the kernel a pointer to a linked list which is updated by
10810          * userspace after the syscall; the list is walked by the kernel
10811          * when the thread exits. Since the linked list in QEMU guest
10812          * memory isn't a valid linked list for the host and we have
10813          * no way to reliably intercept the thread-death event, we can't
10814          * support these. Silently return ENOSYS so that guest userspace
10815          * falls back to a non-robust futex implementation (which should
10816          * be OK except in the corner case of the guest crashing while
10817          * holding a mutex that is shared with another process via
10818          * shared memory).
10819          */
10820         return -TARGET_ENOSYS;
10821 #endif
10822 
10823 #if defined(TARGET_NR_utimensat)
10824     case TARGET_NR_utimensat:
10825         {
10826             struct timespec *tsp, ts[2];
10827             if (!arg3) {
10828                 tsp = NULL;
10829             } else {
10830                 target_to_host_timespec(ts, arg3);
10831                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10832                 tsp = ts;
10833             }
10834             if (!arg2)
10835                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10836             else {
10837                 if (!(p = lock_user_string(arg2))) {
10838                     return -TARGET_EFAULT;
10839                 }
10840                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10841                 unlock_user(p, arg2, 0);
10842             }
10843         }
10844         return ret;
10845 #endif
10846     case TARGET_NR_futex:
10847         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10848 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10849     case TARGET_NR_inotify_init:
10850         ret = get_errno(sys_inotify_init());
10851         if (ret >= 0) {
10852             fd_trans_register(ret, &target_inotify_trans);
10853         }
10854         return ret;
10855 #endif
10856 #ifdef CONFIG_INOTIFY1
10857 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10858     case TARGET_NR_inotify_init1:
10859         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10860                                           fcntl_flags_tbl)));
10861         if (ret >= 0) {
10862             fd_trans_register(ret, &target_inotify_trans);
10863         }
10864         return ret;
10865 #endif
10866 #endif
10867 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10868     case TARGET_NR_inotify_add_watch:
10869         p = lock_user_string(arg2);
10870         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10871         unlock_user(p, arg2, 0);
10872         return ret;
10873 #endif
10874 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10875     case TARGET_NR_inotify_rm_watch:
10876         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10877 #endif
10878 
10879 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10880     case TARGET_NR_mq_open:
10881         {
10882             struct mq_attr posix_mq_attr;
10883             struct mq_attr *pposix_mq_attr;
10884             int host_flags;
10885 
10886             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10887             pposix_mq_attr = NULL;
10888             if (arg4) {
10889                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10890                     return -TARGET_EFAULT;
10891                 }
10892                 pposix_mq_attr = &posix_mq_attr;
10893             }
10894             p = lock_user_string(arg1 - 1);
10895             if (!p) {
10896                 return -TARGET_EFAULT;
10897             }
10898             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
10899             unlock_user (p, arg1, 0);
10900         }
10901         return ret;
10902 
10903     case TARGET_NR_mq_unlink:
10904         p = lock_user_string(arg1 - 1);
10905         if (!p) {
10906             return -TARGET_EFAULT;
10907         }
10908         ret = get_errno(mq_unlink(p));
10909         unlock_user (p, arg1, 0);
10910         return ret;
10911 
10912     case TARGET_NR_mq_timedsend:
10913         {
10914             struct timespec ts;
10915 
10916             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10917             if (arg5 != 0) {
10918                 target_to_host_timespec(&ts, arg5);
10919                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10920                 host_to_target_timespec(arg5, &ts);
10921             } else {
10922                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10923             }
10924             unlock_user (p, arg2, arg3);
10925         }
10926         return ret;
10927 
10928     case TARGET_NR_mq_timedreceive:
10929         {
10930             struct timespec ts;
10931             unsigned int prio;
10932 
10933             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10934             if (arg5 != 0) {
10935                 target_to_host_timespec(&ts, arg5);
10936                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10937                                                      &prio, &ts));
10938                 host_to_target_timespec(arg5, &ts);
10939             } else {
10940                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10941                                                      &prio, NULL));
10942             }
10943             unlock_user (p, arg2, arg3);
10944             if (arg4 != 0)
10945                 put_user_u32(prio, arg4);
10946         }
10947         return ret;
10948 
10949     /* Not implemented for now... */
10950 /*     case TARGET_NR_mq_notify: */
10951 /*         break; */
10952 
10953     case TARGET_NR_mq_getsetattr:
10954         {
10955             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10956             ret = 0;
10957             if (arg2 != 0) {
10958                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10959                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
10960                                            &posix_mq_attr_out));
10961             } else if (arg3 != 0) {
10962                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
10963             }
10964             if (ret == 0 && arg3 != 0) {
10965                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10966             }
10967         }
10968         return ret;
10969 #endif
10970 
10971 #ifdef CONFIG_SPLICE
10972 #ifdef TARGET_NR_tee
10973     case TARGET_NR_tee:
10974         {
10975             ret = get_errno(tee(arg1,arg2,arg3,arg4));
10976         }
10977         return ret;
10978 #endif
10979 #ifdef TARGET_NR_splice
10980     case TARGET_NR_splice:
10981         {
10982             loff_t loff_in, loff_out;
10983             loff_t *ploff_in = NULL, *ploff_out = NULL;
10984             if (arg2) {
10985                 if (get_user_u64(loff_in, arg2)) {
10986                     return -TARGET_EFAULT;
10987                 }
10988                 ploff_in = &loff_in;
10989             }
10990             if (arg4) {
10991                 if (get_user_u64(loff_out, arg4)) {
10992                     return -TARGET_EFAULT;
10993                 }
10994                 ploff_out = &loff_out;
10995             }
10996             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10997             if (arg2) {
10998                 if (put_user_u64(loff_in, arg2)) {
10999                     return -TARGET_EFAULT;
11000                 }
11001             }
11002             if (arg4) {
11003                 if (put_user_u64(loff_out, arg4)) {
11004                     return -TARGET_EFAULT;
11005                 }
11006             }
11007         }
11008         return ret;
11009 #endif
11010 #ifdef TARGET_NR_vmsplice
11011 	case TARGET_NR_vmsplice:
11012         {
11013             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11014             if (vec != NULL) {
11015                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11016                 unlock_iovec(vec, arg2, arg3, 0);
11017             } else {
11018                 ret = -host_to_target_errno(errno);
11019             }
11020         }
11021         return ret;
11022 #endif
11023 #endif /* CONFIG_SPLICE */
11024 #ifdef CONFIG_EVENTFD
11025 #if defined(TARGET_NR_eventfd)
11026     case TARGET_NR_eventfd:
11027         ret = get_errno(eventfd(arg1, 0));
11028         if (ret >= 0) {
11029             fd_trans_register(ret, &target_eventfd_trans);
11030         }
11031         return ret;
11032 #endif
11033 #if defined(TARGET_NR_eventfd2)
11034     case TARGET_NR_eventfd2:
11035     {
11036         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11037         if (arg2 & TARGET_O_NONBLOCK) {
11038             host_flags |= O_NONBLOCK;
11039         }
11040         if (arg2 & TARGET_O_CLOEXEC) {
11041             host_flags |= O_CLOEXEC;
11042         }
11043         ret = get_errno(eventfd(arg1, host_flags));
11044         if (ret >= 0) {
11045             fd_trans_register(ret, &target_eventfd_trans);
11046         }
11047         return ret;
11048     }
11049 #endif
11050 #endif /* CONFIG_EVENTFD  */
11051 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11052     case TARGET_NR_fallocate:
11053 #if TARGET_ABI_BITS == 32
11054         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11055                                   target_offset64(arg5, arg6)));
11056 #else
11057         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11058 #endif
11059         return ret;
11060 #endif
11061 #if defined(CONFIG_SYNC_FILE_RANGE)
11062 #if defined(TARGET_NR_sync_file_range)
11063     case TARGET_NR_sync_file_range:
11064 #if TARGET_ABI_BITS == 32
11065 #if defined(TARGET_MIPS)
11066         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11067                                         target_offset64(arg5, arg6), arg7));
11068 #else
11069         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11070                                         target_offset64(arg4, arg5), arg6));
11071 #endif /* !TARGET_MIPS */
11072 #else
11073         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11074 #endif
11075         return ret;
11076 #endif
11077 #if defined(TARGET_NR_sync_file_range2)
11078     case TARGET_NR_sync_file_range2:
11079         /* This is like sync_file_range but the arguments are reordered */
11080 #if TARGET_ABI_BITS == 32
11081         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11082                                         target_offset64(arg5, arg6), arg2));
11083 #else
11084         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11085 #endif
11086         return ret;
11087 #endif
11088 #endif
11089 #if defined(TARGET_NR_signalfd4)
11090     case TARGET_NR_signalfd4:
11091         return do_signalfd4(arg1, arg2, arg4);
11092 #endif
11093 #if defined(TARGET_NR_signalfd)
11094     case TARGET_NR_signalfd:
11095         return do_signalfd4(arg1, arg2, 0);
11096 #endif
11097 #if defined(CONFIG_EPOLL)
11098 #if defined(TARGET_NR_epoll_create)
11099     case TARGET_NR_epoll_create:
11100         return get_errno(epoll_create(arg1));
11101 #endif
11102 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11103     case TARGET_NR_epoll_create1:
11104         return get_errno(epoll_create1(arg1));
11105 #endif
11106 #if defined(TARGET_NR_epoll_ctl)
11107     case TARGET_NR_epoll_ctl:
11108     {
11109         struct epoll_event ep;
11110         struct epoll_event *epp = 0;
11111         if (arg4) {
11112             struct target_epoll_event *target_ep;
11113             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11114                 return -TARGET_EFAULT;
11115             }
11116             ep.events = tswap32(target_ep->events);
11117             /* The epoll_data_t union is just opaque data to the kernel,
11118              * so we transfer all 64 bits across and need not worry what
11119              * actual data type it is.
11120              */
11121             ep.data.u64 = tswap64(target_ep->data.u64);
11122             unlock_user_struct(target_ep, arg4, 0);
11123             epp = &ep;
11124         }
11125         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11126     }
11127 #endif
11128 
11129 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11130 #if defined(TARGET_NR_epoll_wait)
11131     case TARGET_NR_epoll_wait:
11132 #endif
11133 #if defined(TARGET_NR_epoll_pwait)
11134     case TARGET_NR_epoll_pwait:
11135 #endif
11136     {
11137         struct target_epoll_event *target_ep;
11138         struct epoll_event *ep;
11139         int epfd = arg1;
11140         int maxevents = arg3;
11141         int timeout = arg4;
11142 
11143         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11144             return -TARGET_EINVAL;
11145         }
11146 
11147         target_ep = lock_user(VERIFY_WRITE, arg2,
11148                               maxevents * sizeof(struct target_epoll_event), 1);
11149         if (!target_ep) {
11150             return -TARGET_EFAULT;
11151         }
11152 
11153         ep = g_try_new(struct epoll_event, maxevents);
11154         if (!ep) {
11155             unlock_user(target_ep, arg2, 0);
11156             return -TARGET_ENOMEM;
11157         }
11158 
11159         switch (num) {
11160 #if defined(TARGET_NR_epoll_pwait)
11161         case TARGET_NR_epoll_pwait:
11162         {
11163             target_sigset_t *target_set;
11164             sigset_t _set, *set = &_set;
11165 
11166             if (arg5) {
11167                 if (arg6 != sizeof(target_sigset_t)) {
11168                     ret = -TARGET_EINVAL;
11169                     break;
11170                 }
11171 
11172                 target_set = lock_user(VERIFY_READ, arg5,
11173                                        sizeof(target_sigset_t), 1);
11174                 if (!target_set) {
11175                     ret = -TARGET_EFAULT;
11176                     break;
11177                 }
11178                 target_to_host_sigset(set, target_set);
11179                 unlock_user(target_set, arg5, 0);
11180             } else {
11181                 set = NULL;
11182             }
11183 
11184             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11185                                              set, SIGSET_T_SIZE));
11186             break;
11187         }
11188 #endif
11189 #if defined(TARGET_NR_epoll_wait)
11190         case TARGET_NR_epoll_wait:
11191             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11192                                              NULL, 0));
11193             break;
11194 #endif
11195         default:
11196             ret = -TARGET_ENOSYS;
11197         }
11198         if (!is_error(ret)) {
11199             int i;
11200             for (i = 0; i < ret; i++) {
11201                 target_ep[i].events = tswap32(ep[i].events);
11202                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11203             }
11204             unlock_user(target_ep, arg2,
11205                         ret * sizeof(struct target_epoll_event));
11206         } else {
11207             unlock_user(target_ep, arg2, 0);
11208         }
11209         g_free(ep);
11210         return ret;
11211     }
11212 #endif
11213 #endif
11214 #ifdef TARGET_NR_prlimit64
11215     case TARGET_NR_prlimit64:
11216     {
11217         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11218         struct target_rlimit64 *target_rnew, *target_rold;
11219         struct host_rlimit64 rnew, rold, *rnewp = 0;
11220         int resource = target_to_host_resource(arg2);
11221         if (arg3) {
11222             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11223                 return -TARGET_EFAULT;
11224             }
11225             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11226             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11227             unlock_user_struct(target_rnew, arg3, 0);
11228             rnewp = &rnew;
11229         }
11230 
11231         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11232         if (!is_error(ret) && arg4) {
11233             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11234                 return -TARGET_EFAULT;
11235             }
11236             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11237             target_rold->rlim_max = tswap64(rold.rlim_max);
11238             unlock_user_struct(target_rold, arg4, 1);
11239         }
11240         return ret;
11241     }
11242 #endif
11243 #ifdef TARGET_NR_gethostname
11244     case TARGET_NR_gethostname:
11245     {
11246         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11247         if (name) {
11248             ret = get_errno(gethostname(name, arg2));
11249             unlock_user(name, arg1, arg2);
11250         } else {
11251             ret = -TARGET_EFAULT;
11252         }
11253         return ret;
11254     }
11255 #endif
11256 #ifdef TARGET_NR_atomic_cmpxchg_32
11257     case TARGET_NR_atomic_cmpxchg_32:
11258     {
11259         /* should use start_exclusive from main.c */
11260         abi_ulong mem_value;
11261         if (get_user_u32(mem_value, arg6)) {
11262             target_siginfo_t info;
11263             info.si_signo = SIGSEGV;
11264             info.si_errno = 0;
11265             info.si_code = TARGET_SEGV_MAPERR;
11266             info._sifields._sigfault._addr = arg6;
11267             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11268                          QEMU_SI_FAULT, &info);
11269             ret = 0xdeadbeef;
11270 
11271         }
11272         if (mem_value == arg2)
11273             put_user_u32(arg1, arg6);
11274         return mem_value;
11275     }
11276 #endif
11277 #ifdef TARGET_NR_atomic_barrier
11278     case TARGET_NR_atomic_barrier:
11279         /* Like the kernel implementation and the
11280            qemu arm barrier, no-op this? */
11281         return 0;
11282 #endif
11283 
11284 #ifdef TARGET_NR_timer_create
11285     case TARGET_NR_timer_create:
11286     {
11287         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11288 
11289         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11290 
11291         int clkid = arg1;
11292         int timer_index = next_free_host_timer();
11293 
11294         if (timer_index < 0) {
11295             ret = -TARGET_EAGAIN;
11296         } else {
11297             timer_t *phtimer = g_posix_timers  + timer_index;
11298 
11299             if (arg2) {
11300                 phost_sevp = &host_sevp;
11301                 ret = target_to_host_sigevent(phost_sevp, arg2);
11302                 if (ret != 0) {
11303                     return ret;
11304                 }
11305             }
11306 
11307             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11308             if (ret) {
11309                 phtimer = NULL;
11310             } else {
11311                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11312                     return -TARGET_EFAULT;
11313                 }
11314             }
11315         }
11316         return ret;
11317     }
11318 #endif
11319 
11320 #ifdef TARGET_NR_timer_settime
11321     case TARGET_NR_timer_settime:
11322     {
11323         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11324          * struct itimerspec * old_value */
11325         target_timer_t timerid = get_timer_id(arg1);
11326 
11327         if (timerid < 0) {
11328             ret = timerid;
11329         } else if (arg3 == 0) {
11330             ret = -TARGET_EINVAL;
11331         } else {
11332             timer_t htimer = g_posix_timers[timerid];
11333             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11334 
11335             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11336                 return -TARGET_EFAULT;
11337             }
11338             ret = get_errno(
11339                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11340             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11341                 return -TARGET_EFAULT;
11342             }
11343         }
11344         return ret;
11345     }
11346 #endif
11347 
11348 #ifdef TARGET_NR_timer_gettime
11349     case TARGET_NR_timer_gettime:
11350     {
11351         /* args: timer_t timerid, struct itimerspec *curr_value */
11352         target_timer_t timerid = get_timer_id(arg1);
11353 
11354         if (timerid < 0) {
11355             ret = timerid;
11356         } else if (!arg2) {
11357             ret = -TARGET_EFAULT;
11358         } else {
11359             timer_t htimer = g_posix_timers[timerid];
11360             struct itimerspec hspec;
11361             ret = get_errno(timer_gettime(htimer, &hspec));
11362 
11363             if (host_to_target_itimerspec(arg2, &hspec)) {
11364                 ret = -TARGET_EFAULT;
11365             }
11366         }
11367         return ret;
11368     }
11369 #endif
11370 
11371 #ifdef TARGET_NR_timer_getoverrun
11372     case TARGET_NR_timer_getoverrun:
11373     {
11374         /* args: timer_t timerid */
11375         target_timer_t timerid = get_timer_id(arg1);
11376 
11377         if (timerid < 0) {
11378             ret = timerid;
11379         } else {
11380             timer_t htimer = g_posix_timers[timerid];
11381             ret = get_errno(timer_getoverrun(htimer));
11382         }
11383         fd_trans_unregister(ret);
11384         return ret;
11385     }
11386 #endif
11387 
11388 #ifdef TARGET_NR_timer_delete
11389     case TARGET_NR_timer_delete:
11390     {
11391         /* args: timer_t timerid */
11392         target_timer_t timerid = get_timer_id(arg1);
11393 
11394         if (timerid < 0) {
11395             ret = timerid;
11396         } else {
11397             timer_t htimer = g_posix_timers[timerid];
11398             ret = get_errno(timer_delete(htimer));
11399             g_posix_timers[timerid] = 0;
11400         }
11401         return ret;
11402     }
11403 #endif
11404 
11405 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11406     case TARGET_NR_timerfd_create:
11407         return get_errno(timerfd_create(arg1,
11408                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11409 #endif
11410 
11411 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11412     case TARGET_NR_timerfd_gettime:
11413         {
11414             struct itimerspec its_curr;
11415 
11416             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11417 
11418             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11419                 return -TARGET_EFAULT;
11420             }
11421         }
11422         return ret;
11423 #endif
11424 
11425 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11426     case TARGET_NR_timerfd_settime:
11427         {
11428             struct itimerspec its_new, its_old, *p_new;
11429 
11430             if (arg3) {
11431                 if (target_to_host_itimerspec(&its_new, arg3)) {
11432                     return -TARGET_EFAULT;
11433                 }
11434                 p_new = &its_new;
11435             } else {
11436                 p_new = NULL;
11437             }
11438 
11439             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11440 
11441             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11442                 return -TARGET_EFAULT;
11443             }
11444         }
11445         return ret;
11446 #endif
11447 
11448 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11449     case TARGET_NR_ioprio_get:
11450         return get_errno(ioprio_get(arg1, arg2));
11451 #endif
11452 
11453 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11454     case TARGET_NR_ioprio_set:
11455         return get_errno(ioprio_set(arg1, arg2, arg3));
11456 #endif
11457 
11458 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11459     case TARGET_NR_setns:
11460         return get_errno(setns(arg1, arg2));
11461 #endif
11462 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11463     case TARGET_NR_unshare:
11464         return get_errno(unshare(arg1));
11465 #endif
11466 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11467     case TARGET_NR_kcmp:
11468         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11469 #endif
11470 #ifdef TARGET_NR_swapcontext
11471     case TARGET_NR_swapcontext:
11472         /* PowerPC specific.  */
11473         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11474 #endif
11475 
11476     default:
11477         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11478         return -TARGET_ENOSYS;
11479     }
11480     return ret;
11481 }
11482 
11483 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11484                     abi_long arg2, abi_long arg3, abi_long arg4,
11485                     abi_long arg5, abi_long arg6, abi_long arg7,
11486                     abi_long arg8)
11487 {
11488     CPUState *cpu = ENV_GET_CPU(cpu_env);
11489     abi_long ret;
11490 
11491 #ifdef DEBUG_ERESTARTSYS
11492     /* Debug-only code for exercising the syscall-restart code paths
11493      * in the per-architecture cpu main loops: restart every syscall
11494      * the guest makes once before letting it through.
11495      */
11496     {
11497         static bool flag;
11498         flag = !flag;
11499         if (flag) {
11500             return -TARGET_ERESTARTSYS;
11501         }
11502     }
11503 #endif
11504 
11505     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11506                              arg5, arg6, arg7, arg8);
11507 
11508     if (unlikely(do_strace)) {
11509         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11510         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11511                           arg5, arg6, arg7, arg8);
11512         print_syscall_ret(num, ret);
11513     } else {
11514         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11515                           arg5, arg6, arg7, arg8);
11516     }
11517 
11518     trace_guest_user_syscall_ret(cpu, num, ret);
11519     return ret;
11520 }
11521