xref: /openbmc/qemu/linux-user/syscall.c (revision 53d28455)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 static int sys_getcwd1(char *buf, size_t size)
364 {
365   if (getcwd(buf, size) == NULL) {
366       /* getcwd() sets errno */
367       return (-1);
368   }
369   return strlen(buf)+1;
370 }
371 
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376           const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379                          const struct timespec times[2], int flags)
380 {
381     errno = ENOSYS;
382     return -1;
383 }
384 #endif
385 #endif /* TARGET_NR_utimensat */
386 
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391           const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394                          int newfd, const char *new, int flags)
395 {
396     if (flags == 0) {
397         return renameat(oldfd, old, newfd, new);
398     }
399     errno = ENOSYS;
400     return -1;
401 }
402 #endif
403 #endif /* TARGET_NR_renameat2 */
404 
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
407 
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
410 {
411   return (inotify_init());
412 }
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
416 {
417   return (inotify_add_watch(fd, pathname, mask));
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
422 {
423   return (inotify_rm_watch(fd, wd));
424 }
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
429 {
430   return (inotify_init1(flags));
431 }
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY  */
441 
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449     uint64_t rlim_cur;
450     uint64_t rlim_max;
451 };
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453           const struct host_rlimit64 *, new_limit,
454           struct host_rlimit64 *, old_limit)
455 #endif
456 
457 
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
461 
462 static inline int next_free_host_timer(void)
463 {
464     int k ;
465     /* FIXME: Does finding the next free slot require a lock? */
466     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467         if (g_posix_timers[k] == 0) {
468             g_posix_timers[k] = (timer_t) 1;
469             return k;
470         }
471     }
472     return -1;
473 }
474 #endif
475 
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
479 {
480     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 }
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486  * of registers which translates to the same as ARM/MIPS, because we start with
487  * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
492 {
493     switch (num) {
494     case TARGET_NR_pread64:
495     case TARGET_NR_pwrite64:
496         return 1;
497 
498     default:
499         return 0;
500     }
501 }
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
507 
508 #define ERRNO_TABLE_SIZE 1200
509 
510 /* target_to_host_errno_table[] is initialized from
511  * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
513 };
514 
515 /*
516  * This list is the union of errno values overridden in asm-<arch>/errno.h
517  * minus the errnos that are not actually generic to all archs.
518  */
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520     [EAGAIN]		= TARGET_EAGAIN,
521     [EIDRM]		= TARGET_EIDRM,
522     [ECHRNG]		= TARGET_ECHRNG,
523     [EL2NSYNC]		= TARGET_EL2NSYNC,
524     [EL3HLT]		= TARGET_EL3HLT,
525     [EL3RST]		= TARGET_EL3RST,
526     [ELNRNG]		= TARGET_ELNRNG,
527     [EUNATCH]		= TARGET_EUNATCH,
528     [ENOCSI]		= TARGET_ENOCSI,
529     [EL2HLT]		= TARGET_EL2HLT,
530     [EDEADLK]		= TARGET_EDEADLK,
531     [ENOLCK]		= TARGET_ENOLCK,
532     [EBADE]		= TARGET_EBADE,
533     [EBADR]		= TARGET_EBADR,
534     [EXFULL]		= TARGET_EXFULL,
535     [ENOANO]		= TARGET_ENOANO,
536     [EBADRQC]		= TARGET_EBADRQC,
537     [EBADSLT]		= TARGET_EBADSLT,
538     [EBFONT]		= TARGET_EBFONT,
539     [ENOSTR]		= TARGET_ENOSTR,
540     [ENODATA]		= TARGET_ENODATA,
541     [ETIME]		= TARGET_ETIME,
542     [ENOSR]		= TARGET_ENOSR,
543     [ENONET]		= TARGET_ENONET,
544     [ENOPKG]		= TARGET_ENOPKG,
545     [EREMOTE]		= TARGET_EREMOTE,
546     [ENOLINK]		= TARGET_ENOLINK,
547     [EADV]		= TARGET_EADV,
548     [ESRMNT]		= TARGET_ESRMNT,
549     [ECOMM]		= TARGET_ECOMM,
550     [EPROTO]		= TARGET_EPROTO,
551     [EDOTDOT]		= TARGET_EDOTDOT,
552     [EMULTIHOP]		= TARGET_EMULTIHOP,
553     [EBADMSG]		= TARGET_EBADMSG,
554     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
555     [EOVERFLOW]		= TARGET_EOVERFLOW,
556     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
557     [EBADFD]		= TARGET_EBADFD,
558     [EREMCHG]		= TARGET_EREMCHG,
559     [ELIBACC]		= TARGET_ELIBACC,
560     [ELIBBAD]		= TARGET_ELIBBAD,
561     [ELIBSCN]		= TARGET_ELIBSCN,
562     [ELIBMAX]		= TARGET_ELIBMAX,
563     [ELIBEXEC]		= TARGET_ELIBEXEC,
564     [EILSEQ]		= TARGET_EILSEQ,
565     [ENOSYS]		= TARGET_ENOSYS,
566     [ELOOP]		= TARGET_ELOOP,
567     [ERESTART]		= TARGET_ERESTART,
568     [ESTRPIPE]		= TARGET_ESTRPIPE,
569     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
570     [EUSERS]		= TARGET_EUSERS,
571     [ENOTSOCK]		= TARGET_ENOTSOCK,
572     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
573     [EMSGSIZE]		= TARGET_EMSGSIZE,
574     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
575     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
576     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
577     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
578     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
579     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
580     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
581     [EADDRINUSE]	= TARGET_EADDRINUSE,
582     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
583     [ENETDOWN]		= TARGET_ENETDOWN,
584     [ENETUNREACH]	= TARGET_ENETUNREACH,
585     [ENETRESET]		= TARGET_ENETRESET,
586     [ECONNABORTED]	= TARGET_ECONNABORTED,
587     [ECONNRESET]	= TARGET_ECONNRESET,
588     [ENOBUFS]		= TARGET_ENOBUFS,
589     [EISCONN]		= TARGET_EISCONN,
590     [ENOTCONN]		= TARGET_ENOTCONN,
591     [EUCLEAN]		= TARGET_EUCLEAN,
592     [ENOTNAM]		= TARGET_ENOTNAM,
593     [ENAVAIL]		= TARGET_ENAVAIL,
594     [EISNAM]		= TARGET_EISNAM,
595     [EREMOTEIO]		= TARGET_EREMOTEIO,
596     [EDQUOT]            = TARGET_EDQUOT,
597     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
598     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
599     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
600     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
601     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
602     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
603     [EALREADY]		= TARGET_EALREADY,
604     [EINPROGRESS]	= TARGET_EINPROGRESS,
605     [ESTALE]		= TARGET_ESTALE,
606     [ECANCELED]		= TARGET_ECANCELED,
607     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
608     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610     [ENOKEY]		= TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628     [ENOMSG]            = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631     [ERFKILL]           = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634     [EHWPOISON]         = TARGET_EHWPOISON,
635 #endif
636 };
637 
638 static inline int host_to_target_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         host_to_target_errno_table[err]) {
642         return host_to_target_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline int target_to_host_errno(int err)
648 {
649     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650         target_to_host_errno_table[err]) {
651         return target_to_host_errno_table[err];
652     }
653     return err;
654 }
655 
656 static inline abi_long get_errno(abi_long ret)
657 {
658     if (ret == -1)
659         return -host_to_target_errno(errno);
660     else
661         return ret;
662 }
663 
664 const char *target_strerror(int err)
665 {
666     if (err == TARGET_ERESTARTSYS) {
667         return "To be restarted";
668     }
669     if (err == TARGET_QEMU_ESIGRETURN) {
670         return "Successful exit from sigreturn";
671     }
672 
673     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674         return NULL;
675     }
676     return strerror(target_to_host_errno(err));
677 }
678 
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
681 { \
682     return safe_syscall(__NR_##name); \
683 }
684 
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
687 { \
688     return safe_syscall(__NR_##name, arg1); \
689 }
690 
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2); \
695 }
696 
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
701 }
702 
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
708 }
709 
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
716 }
717 
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721     type5 arg5, type6 arg6) \
722 { \
723     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
724 }
725 
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729               int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731               struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733               int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744               const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752               unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754               unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756               socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765               const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767               int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769               struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772               const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776               int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778               long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780               unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783  * wrappers for the sub-operations to hide this implementation detail.
784  * Annoyingly we can't include linux/ipc.h to get the constant definitions
785  * for the call parameter because some structs in there conflict with the
786  * sys/ipc.h ones. So we just define them here, and rely on them being
787  * the same for all host architectures.
788  */
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
793 
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
799 }
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
801 {
802     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
803 }
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805                            const struct timespec *timeout)
806 {
807     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808                     (long)timeout);
809 }
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818  * "third argument might be integer or pointer or not present" behaviour of
819  * the libc function.
820  */
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824  *  use the flock64 struct rather than unsuffixed flock
825  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826  */
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
832 
833 static inline int host_to_target_sock_type(int host_type)
834 {
835     int target_type;
836 
837     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838     case SOCK_DGRAM:
839         target_type = TARGET_SOCK_DGRAM;
840         break;
841     case SOCK_STREAM:
842         target_type = TARGET_SOCK_STREAM;
843         break;
844     default:
845         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846         break;
847     }
848 
849 #if defined(SOCK_CLOEXEC)
850     if (host_type & SOCK_CLOEXEC) {
851         target_type |= TARGET_SOCK_CLOEXEC;
852     }
853 #endif
854 
855 #if defined(SOCK_NONBLOCK)
856     if (host_type & SOCK_NONBLOCK) {
857         target_type |= TARGET_SOCK_NONBLOCK;
858     }
859 #endif
860 
861     return target_type;
862 }
863 
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
867 
868 void target_set_brk(abi_ulong new_brk)
869 {
870     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871     brk_page = HOST_PAGE_ALIGN(target_brk);
872 }
873 
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
876 
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
879 {
880     abi_long mapped_addr;
881     abi_ulong new_alloc_size;
882 
883     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 
885     if (!new_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887         return target_brk;
888     }
889     if (new_brk < target_original_brk) {
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891                    target_brk);
892         return target_brk;
893     }
894 
895     /* If the new brk is less than the highest page reserved to the
896      * target heap allocation, set it and we're almost done...  */
897     if (new_brk <= brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  */
900         if (new_brk > target_brk) {
901             memset(g2h(target_brk), 0, new_brk - target_brk);
902         }
903 	target_brk = new_brk;
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905     	return target_brk;
906     }
907 
908     /* We need to allocate more memory after the brk... Note that
909      * we don't use MAP_FIXED because that will map over the top of
910      * any existing mapping (like the one with the host libc or qemu
911      * itself); instead we treat "mapped but at wrong address" as
912      * a failure and unmap again.
913      */
914     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916                                         PROT_READ|PROT_WRITE,
917                                         MAP_ANON|MAP_PRIVATE, 0, 0));
918 
919     if (mapped_addr == brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  Technically the new pages are already
922          * initialized to zero since they *are* anonymous mapped
923          * pages, however we have to take care with the contents that
924          * come from the remaining part of the previous page: it may
925          * contains garbage data due to a previous heap usage (grown
926          * then shrunken).  */
927         memset(g2h(target_brk), 0, brk_page - target_brk);
928 
929         target_brk = new_brk;
930         brk_page = HOST_PAGE_ALIGN(target_brk);
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932             target_brk);
933         return target_brk;
934     } else if (mapped_addr != -1) {
935         /* Mapped but at wrong address, meaning there wasn't actually
936          * enough space for this brk.
937          */
938         target_munmap(mapped_addr, new_alloc_size);
939         mapped_addr = -1;
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941     }
942     else {
943         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
944     }
945 
946 #if defined(TARGET_ALPHA)
947     /* We (partially) emulate OSF/1 on Alpha, which requires we
948        return a proper errno, not an unchanged brk value.  */
949     return -TARGET_ENOMEM;
950 #endif
951     /* For everything else, return the previous break. */
952     return target_brk;
953 }
954 
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956                                             abi_ulong target_fds_addr,
957                                             int n)
958 {
959     int i, nw, j, k;
960     abi_ulong b, *target_fds;
961 
962     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963     if (!(target_fds = lock_user(VERIFY_READ,
964                                  target_fds_addr,
965                                  sizeof(abi_ulong) * nw,
966                                  1)))
967         return -TARGET_EFAULT;
968 
969     FD_ZERO(fds);
970     k = 0;
971     for (i = 0; i < nw; i++) {
972         /* grab the abi_ulong */
973         __get_user(b, &target_fds[i]);
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             /* check the bit inside the abi_ulong */
976             if ((b >> j) & 1)
977                 FD_SET(k, fds);
978             k++;
979         }
980     }
981 
982     unlock_user(target_fds, target_fds_addr, 0);
983 
984     return 0;
985 }
986 
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988                                                  abi_ulong target_fds_addr,
989                                                  int n)
990 {
991     if (target_fds_addr) {
992         if (copy_from_user_fdset(fds, target_fds_addr, n))
993             return -TARGET_EFAULT;
994         *fds_ptr = fds;
995     } else {
996         *fds_ptr = NULL;
997     }
998     return 0;
999 }
1000 
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002                                           const fd_set *fds,
1003                                           int n)
1004 {
1005     int i, nw, j, k;
1006     abi_long v;
1007     abi_ulong *target_fds;
1008 
1009     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010     if (!(target_fds = lock_user(VERIFY_WRITE,
1011                                  target_fds_addr,
1012                                  sizeof(abi_ulong) * nw,
1013                                  0)))
1014         return -TARGET_EFAULT;
1015 
1016     k = 0;
1017     for (i = 0; i < nw; i++) {
1018         v = 0;
1019         for (j = 0; j < TARGET_ABI_BITS; j++) {
1020             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021             k++;
1022         }
1023         __put_user(v, &target_fds[i]);
1024     }
1025 
1026     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1027 
1028     return 0;
1029 }
1030 
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1036 
1037 static inline abi_long host_to_target_clock_t(long ticks)
1038 {
1039 #if HOST_HZ == TARGET_HZ
1040     return ticks;
1041 #else
1042     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1044 }
1045 
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047                                              const struct rusage *rusage)
1048 {
1049     struct target_rusage *target_rusage;
1050 
1051     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052         return -TARGET_EFAULT;
1053     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071     unlock_user_struct(target_rusage, target_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 {
1094     abi_ulong target_rlim_swap;
1095     abi_ulong result;
1096 
1097     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098         target_rlim_swap = TARGET_RLIM_INFINITY;
1099     else
1100         target_rlim_swap = rlim;
1101     result = tswapal(target_rlim_swap);
1102 
1103     return result;
1104 }
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150         return -TARGET_EFAULT;
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161                                             const struct timeval *tv)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166         return -TARGET_EFAULT;
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177                                                abi_ulong target_tz_addr)
1178 {
1179     struct target_timezone *target_tz;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184 
1185     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1187 
1188     unlock_user_struct(target_tz, target_tz_addr, 0);
1189 
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1195 
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197                                               abi_ulong target_mq_attr_addr)
1198 {
1199     struct target_mq_attr *target_mq_attr;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202                           target_mq_attr_addr, 1))
1203         return -TARGET_EFAULT;
1204 
1205     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1209 
1210     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1211 
1212     return 0;
1213 }
1214 
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216                                             const struct mq_attr *attr)
1217 {
1218     struct target_mq_attr *target_mq_attr;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221                           target_mq_attr_addr, 0))
1222         return -TARGET_EFAULT;
1223 
1224     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1228 
1229     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1239                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1240 {
1241     fd_set rfds, wfds, efds;
1242     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243     struct timeval tv;
1244     struct timespec ts, *ts_ptr;
1245     abi_long ret;
1246 
1247     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256     if (ret) {
1257         return ret;
1258     }
1259 
1260     if (target_tv_addr) {
1261         if (copy_from_user_timeval(&tv, target_tv_addr))
1262             return -TARGET_EFAULT;
1263         ts.tv_sec = tv.tv_sec;
1264         ts.tv_nsec = tv.tv_usec * 1000;
1265         ts_ptr = &ts;
1266     } else {
1267         ts_ptr = NULL;
1268     }
1269 
1270     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271                                   ts_ptr, NULL));
1272 
1273     if (!is_error(ret)) {
1274         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275             return -TARGET_EFAULT;
1276         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277             return -TARGET_EFAULT;
1278         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279             return -TARGET_EFAULT;
1280 
1281         if (target_tv_addr) {
1282             tv.tv_sec = ts.tv_sec;
1283             tv.tv_usec = ts.tv_nsec / 1000;
1284             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285                 return -TARGET_EFAULT;
1286             }
1287         }
1288     }
1289 
1290     return ret;
1291 }
1292 
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1295 {
1296     struct target_sel_arg_struct *sel;
1297     abi_ulong inp, outp, exp, tvp;
1298     long nsel;
1299 
1300     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     nsel = tswapal(sel->n);
1305     inp = tswapal(sel->inp);
1306     outp = tswapal(sel->outp);
1307     exp = tswapal(sel->exp);
1308     tvp = tswapal(sel->tvp);
1309 
1310     unlock_user_struct(sel, arg1, 0);
1311 
1312     return do_select(nsel, inp, outp, exp, tvp);
1313 }
1314 #endif
1315 #endif
1316 
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1318 {
1319 #ifdef CONFIG_PIPE2
1320     return pipe2(host_pipe, flags);
1321 #else
1322     return -ENOSYS;
1323 #endif
1324 }
1325 
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327                         int flags, int is_pipe2)
1328 {
1329     int host_pipe[2];
1330     abi_long ret;
1331     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1332 
1333     if (is_error(ret))
1334         return get_errno(ret);
1335 
1336     /* Several targets have special calling conventions for the original
1337        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1338     if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341         return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344         return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347         return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350         return host_pipe[0];
1351 #endif
1352     }
1353 
1354     if (put_user_s32(host_pipe[0], pipedes)
1355         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356         return -TARGET_EFAULT;
1357     return get_errno(ret);
1358 }
1359 
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361                                               abi_ulong target_addr,
1362                                               socklen_t len)
1363 {
1364     struct target_ip_mreqn *target_smreqn;
1365 
1366     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367     if (!target_smreqn)
1368         return -TARGET_EFAULT;
1369     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371     if (len == sizeof(struct target_ip_mreqn))
1372         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373     unlock_user(target_smreqn, target_addr, 0);
1374 
1375     return 0;
1376 }
1377 
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379                                                abi_ulong target_addr,
1380                                                socklen_t len)
1381 {
1382     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383     sa_family_t sa_family;
1384     struct target_sockaddr *target_saddr;
1385 
1386     if (fd_trans_target_to_host_addr(fd)) {
1387         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1388     }
1389 
1390     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391     if (!target_saddr)
1392         return -TARGET_EFAULT;
1393 
1394     sa_family = tswap16(target_saddr->sa_family);
1395 
1396     /* Oops. The caller might send a incomplete sun_path; sun_path
1397      * must be terminated by \0 (see the manual page), but
1398      * unfortunately it is quite common to specify sockaddr_un
1399      * length as "strlen(x->sun_path)" while it should be
1400      * "strlen(...) + 1". We'll fix that here if needed.
1401      * Linux kernel has a similar feature.
1402      */
1403 
1404     if (sa_family == AF_UNIX) {
1405         if (len < unix_maxlen && len > 0) {
1406             char *cp = (char*)target_saddr;
1407 
1408             if ( cp[len-1] && !cp[len] )
1409                 len++;
1410         }
1411         if (len > unix_maxlen)
1412             len = unix_maxlen;
1413     }
1414 
1415     memcpy(addr, target_saddr, len);
1416     addr->sa_family = sa_family;
1417     if (sa_family == AF_NETLINK) {
1418         struct sockaddr_nl *nladdr;
1419 
1420         nladdr = (struct sockaddr_nl *)addr;
1421         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423     } else if (sa_family == AF_PACKET) {
1424 	struct target_sockaddr_ll *lladdr;
1425 
1426 	lladdr = (struct target_sockaddr_ll *)addr;
1427 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1429     }
1430     unlock_user(target_saddr, target_addr, 0);
1431 
1432     return 0;
1433 }
1434 
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436                                                struct sockaddr *addr,
1437                                                socklen_t len)
1438 {
1439     struct target_sockaddr *target_saddr;
1440 
1441     if (len == 0) {
1442         return 0;
1443     }
1444     assert(addr);
1445 
1446     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447     if (!target_saddr)
1448         return -TARGET_EFAULT;
1449     memcpy(target_saddr, addr, len);
1450     if (len >= offsetof(struct target_sockaddr, sa_family) +
1451         sizeof(target_saddr->sa_family)) {
1452         target_saddr->sa_family = tswap16(addr->sa_family);
1453     }
1454     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458     } else if (addr->sa_family == AF_PACKET) {
1459         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462     } else if (addr->sa_family == AF_INET6 &&
1463                len >= sizeof(struct target_sockaddr_in6)) {
1464         struct target_sockaddr_in6 *target_in6 =
1465                (struct target_sockaddr_in6 *)target_saddr;
1466         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1467     }
1468     unlock_user(target_saddr, target_addr, len);
1469 
1470     return 0;
1471 }
1472 
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474                                            struct target_msghdr *target_msgh)
1475 {
1476     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477     abi_long msg_controllen;
1478     abi_ulong target_cmsg_addr;
1479     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480     socklen_t space = 0;
1481 
1482     msg_controllen = tswapal(target_msgh->msg_controllen);
1483     if (msg_controllen < sizeof (struct target_cmsghdr))
1484         goto the_end;
1485     target_cmsg_addr = tswapal(target_msgh->msg_control);
1486     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487     target_cmsg_start = target_cmsg;
1488     if (!target_cmsg)
1489         return -TARGET_EFAULT;
1490 
1491     while (cmsg && target_cmsg) {
1492         void *data = CMSG_DATA(cmsg);
1493         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1494 
1495         int len = tswapal(target_cmsg->cmsg_len)
1496             - sizeof(struct target_cmsghdr);
1497 
1498         space += CMSG_SPACE(len);
1499         if (space > msgh->msg_controllen) {
1500             space -= CMSG_SPACE(len);
1501             /* This is a QEMU bug, since we allocated the payload
1502              * area ourselves (unlike overflow in host-to-target
1503              * conversion, which is just the guest giving us a buffer
1504              * that's too small). It can't happen for the payload types
1505              * we currently support; if it becomes an issue in future
1506              * we would need to improve our allocation strategy to
1507              * something more intelligent than "twice the size of the
1508              * target buffer we're reading from".
1509              */
1510             gemu_log("Host cmsg overflow\n");
1511             break;
1512         }
1513 
1514         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515             cmsg->cmsg_level = SOL_SOCKET;
1516         } else {
1517             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1518         }
1519         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520         cmsg->cmsg_len = CMSG_LEN(len);
1521 
1522         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523             int *fd = (int *)data;
1524             int *target_fd = (int *)target_data;
1525             int i, numfds = len / sizeof(int);
1526 
1527             for (i = 0; i < numfds; i++) {
1528                 __get_user(fd[i], target_fd + i);
1529             }
1530         } else if (cmsg->cmsg_level == SOL_SOCKET
1531                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1532             struct ucred *cred = (struct ucred *)data;
1533             struct target_ucred *target_cred =
1534                 (struct target_ucred *)target_data;
1535 
1536             __get_user(cred->pid, &target_cred->pid);
1537             __get_user(cred->uid, &target_cred->uid);
1538             __get_user(cred->gid, &target_cred->gid);
1539         } else {
1540             gemu_log("Unsupported ancillary data: %d/%d\n",
1541                                         cmsg->cmsg_level, cmsg->cmsg_type);
1542             memcpy(data, target_data, len);
1543         }
1544 
1545         cmsg = CMSG_NXTHDR(msgh, cmsg);
1546         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547                                          target_cmsg_start);
1548     }
1549     unlock_user(target_cmsg, target_cmsg_addr, 0);
1550  the_end:
1551     msgh->msg_controllen = space;
1552     return 0;
1553 }
1554 
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556                                            struct msghdr *msgh)
1557 {
1558     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559     abi_long msg_controllen;
1560     abi_ulong target_cmsg_addr;
1561     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562     socklen_t space = 0;
1563 
1564     msg_controllen = tswapal(target_msgh->msg_controllen);
1565     if (msg_controllen < sizeof (struct target_cmsghdr))
1566         goto the_end;
1567     target_cmsg_addr = tswapal(target_msgh->msg_control);
1568     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569     target_cmsg_start = target_cmsg;
1570     if (!target_cmsg)
1571         return -TARGET_EFAULT;
1572 
1573     while (cmsg && target_cmsg) {
1574         void *data = CMSG_DATA(cmsg);
1575         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1576 
1577         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578         int tgt_len, tgt_space;
1579 
1580         /* We never copy a half-header but may copy half-data;
1581          * this is Linux's behaviour in put_cmsg(). Note that
1582          * truncation here is a guest problem (which we report
1583          * to the guest via the CTRUNC bit), unlike truncation
1584          * in target_to_host_cmsg, which is a QEMU bug.
1585          */
1586         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             break;
1589         }
1590 
1591         if (cmsg->cmsg_level == SOL_SOCKET) {
1592             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593         } else {
1594             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1595         }
1596         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1597 
1598         /* Payload types which need a different size of payload on
1599          * the target must adjust tgt_len here.
1600          */
1601         tgt_len = len;
1602         switch (cmsg->cmsg_level) {
1603         case SOL_SOCKET:
1604             switch (cmsg->cmsg_type) {
1605             case SO_TIMESTAMP:
1606                 tgt_len = sizeof(struct target_timeval);
1607                 break;
1608             default:
1609                 break;
1610             }
1611             break;
1612         default:
1613             break;
1614         }
1615 
1616         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1619         }
1620 
1621         /* We must now copy-and-convert len bytes of payload
1622          * into tgt_len bytes of destination space. Bear in mind
1623          * that in both source and destination we may be dealing
1624          * with a truncated value!
1625          */
1626         switch (cmsg->cmsg_level) {
1627         case SOL_SOCKET:
1628             switch (cmsg->cmsg_type) {
1629             case SCM_RIGHTS:
1630             {
1631                 int *fd = (int *)data;
1632                 int *target_fd = (int *)target_data;
1633                 int i, numfds = tgt_len / sizeof(int);
1634 
1635                 for (i = 0; i < numfds; i++) {
1636                     __put_user(fd[i], target_fd + i);
1637                 }
1638                 break;
1639             }
1640             case SO_TIMESTAMP:
1641             {
1642                 struct timeval *tv = (struct timeval *)data;
1643                 struct target_timeval *target_tv =
1644                     (struct target_timeval *)target_data;
1645 
1646                 if (len != sizeof(struct timeval) ||
1647                     tgt_len != sizeof(struct target_timeval)) {
1648                     goto unimplemented;
1649                 }
1650 
1651                 /* copy struct timeval to target */
1652                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654                 break;
1655             }
1656             case SCM_CREDENTIALS:
1657             {
1658                 struct ucred *cred = (struct ucred *)data;
1659                 struct target_ucred *target_cred =
1660                     (struct target_ucred *)target_data;
1661 
1662                 __put_user(cred->pid, &target_cred->pid);
1663                 __put_user(cred->uid, &target_cred->uid);
1664                 __put_user(cred->gid, &target_cred->gid);
1665                 break;
1666             }
1667             default:
1668                 goto unimplemented;
1669             }
1670             break;
1671 
1672         case SOL_IP:
1673             switch (cmsg->cmsg_type) {
1674             case IP_TTL:
1675             {
1676                 uint32_t *v = (uint32_t *)data;
1677                 uint32_t *t_int = (uint32_t *)target_data;
1678 
1679                 if (len != sizeof(uint32_t) ||
1680                     tgt_len != sizeof(uint32_t)) {
1681                     goto unimplemented;
1682                 }
1683                 __put_user(*v, t_int);
1684                 break;
1685             }
1686             case IP_RECVERR:
1687             {
1688                 struct errhdr_t {
1689                    struct sock_extended_err ee;
1690                    struct sockaddr_in offender;
1691                 };
1692                 struct errhdr_t *errh = (struct errhdr_t *)data;
1693                 struct errhdr_t *target_errh =
1694                     (struct errhdr_t *)target_data;
1695 
1696                 if (len != sizeof(struct errhdr_t) ||
1697                     tgt_len != sizeof(struct errhdr_t)) {
1698                     goto unimplemented;
1699                 }
1700                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1703                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708                     (void *) &errh->offender, sizeof(errh->offender));
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IPV6:
1717             switch (cmsg->cmsg_type) {
1718             case IPV6_HOPLIMIT:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IPV6_RECVERR:
1731             {
1732                 struct errhdr6_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in6 offender;
1735                 };
1736                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737                 struct errhdr6_t *target_errh =
1738                     (struct errhdr6_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr6_t) ||
1741                     tgt_len != sizeof(struct errhdr6_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         default:
1761         unimplemented:
1762             gemu_log("Unsupported ancillary data: %d/%d\n",
1763                                         cmsg->cmsg_level, cmsg->cmsg_type);
1764             memcpy(target_data, data, MIN(len, tgt_len));
1765             if (tgt_len > len) {
1766                 memset(target_data + len, 0, tgt_len - len);
1767             }
1768         }
1769 
1770         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772         if (msg_controllen < tgt_space) {
1773             tgt_space = msg_controllen;
1774         }
1775         msg_controllen -= tgt_space;
1776         space += tgt_space;
1777         cmsg = CMSG_NXTHDR(msgh, cmsg);
1778         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779                                          target_cmsg_start);
1780     }
1781     unlock_user(target_cmsg, target_cmsg_addr, space);
1782  the_end:
1783     target_msgh->msg_controllen = tswapal(space);
1784     return 0;
1785 }
1786 
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789                               abi_ulong optval_addr, socklen_t optlen)
1790 {
1791     abi_long ret;
1792     int val;
1793     struct ip_mreqn *ip_mreq;
1794     struct ip_mreq_source *ip_mreq_source;
1795 
1796     switch(level) {
1797     case SOL_TCP:
1798         /* TCP options all take an 'int' value.  */
1799         if (optlen < sizeof(uint32_t))
1800             return -TARGET_EINVAL;
1801 
1802         if (get_user_u32(val, optval_addr))
1803             return -TARGET_EFAULT;
1804         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805         break;
1806     case SOL_IP:
1807         switch(optname) {
1808         case IP_TOS:
1809         case IP_TTL:
1810         case IP_HDRINCL:
1811         case IP_ROUTER_ALERT:
1812         case IP_RECVOPTS:
1813         case IP_RETOPTS:
1814         case IP_PKTINFO:
1815         case IP_MTU_DISCOVER:
1816         case IP_RECVERR:
1817         case IP_RECVTTL:
1818         case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820         case IP_FREEBIND:
1821 #endif
1822         case IP_MULTICAST_TTL:
1823         case IP_MULTICAST_LOOP:
1824             val = 0;
1825             if (optlen >= sizeof(uint32_t)) {
1826                 if (get_user_u32(val, optval_addr))
1827                     return -TARGET_EFAULT;
1828             } else if (optlen >= 1) {
1829                 if (get_user_u8(val, optval_addr))
1830                     return -TARGET_EFAULT;
1831             }
1832             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833             break;
1834         case IP_ADD_MEMBERSHIP:
1835         case IP_DROP_MEMBERSHIP:
1836             if (optlen < sizeof (struct target_ip_mreq) ||
1837                 optlen > sizeof (struct target_ip_mreqn))
1838                 return -TARGET_EINVAL;
1839 
1840             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843             break;
1844 
1845         case IP_BLOCK_SOURCE:
1846         case IP_UNBLOCK_SOURCE:
1847         case IP_ADD_SOURCE_MEMBERSHIP:
1848         case IP_DROP_SOURCE_MEMBERSHIP:
1849             if (optlen != sizeof (struct target_ip_mreq_source))
1850                 return -TARGET_EINVAL;
1851 
1852             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854             unlock_user (ip_mreq_source, optval_addr, 0);
1855             break;
1856 
1857         default:
1858             goto unimplemented;
1859         }
1860         break;
1861     case SOL_IPV6:
1862         switch (optname) {
1863         case IPV6_MTU_DISCOVER:
1864         case IPV6_MTU:
1865         case IPV6_V6ONLY:
1866         case IPV6_RECVPKTINFO:
1867         case IPV6_UNICAST_HOPS:
1868         case IPV6_MULTICAST_HOPS:
1869         case IPV6_MULTICAST_LOOP:
1870         case IPV6_RECVERR:
1871         case IPV6_RECVHOPLIMIT:
1872         case IPV6_2292HOPLIMIT:
1873         case IPV6_CHECKSUM:
1874             val = 0;
1875             if (optlen < sizeof(uint32_t)) {
1876                 return -TARGET_EINVAL;
1877             }
1878             if (get_user_u32(val, optval_addr)) {
1879                 return -TARGET_EFAULT;
1880             }
1881             ret = get_errno(setsockopt(sockfd, level, optname,
1882                                        &val, sizeof(val)));
1883             break;
1884         case IPV6_PKTINFO:
1885         {
1886             struct in6_pktinfo pki;
1887 
1888             if (optlen < sizeof(pki)) {
1889                 return -TARGET_EINVAL;
1890             }
1891 
1892             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893                 return -TARGET_EFAULT;
1894             }
1895 
1896             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1897 
1898             ret = get_errno(setsockopt(sockfd, level, optname,
1899                                        &pki, sizeof(pki)));
1900             break;
1901         }
1902         default:
1903             goto unimplemented;
1904         }
1905         break;
1906     case SOL_ICMPV6:
1907         switch (optname) {
1908         case ICMPV6_FILTER:
1909         {
1910             struct icmp6_filter icmp6f;
1911 
1912             if (optlen > sizeof(icmp6f)) {
1913                 optlen = sizeof(icmp6f);
1914             }
1915 
1916             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917                 return -TARGET_EFAULT;
1918             }
1919 
1920             for (val = 0; val < 8; val++) {
1921                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1922             }
1923 
1924             ret = get_errno(setsockopt(sockfd, level, optname,
1925                                        &icmp6f, optlen));
1926             break;
1927         }
1928         default:
1929             goto unimplemented;
1930         }
1931         break;
1932     case SOL_RAW:
1933         switch (optname) {
1934         case ICMP_FILTER:
1935         case IPV6_CHECKSUM:
1936             /* those take an u32 value */
1937             if (optlen < sizeof(uint32_t)) {
1938                 return -TARGET_EINVAL;
1939             }
1940 
1941             if (get_user_u32(val, optval_addr)) {
1942                 return -TARGET_EFAULT;
1943             }
1944             ret = get_errno(setsockopt(sockfd, level, optname,
1945                                        &val, sizeof(val)));
1946             break;
1947 
1948         default:
1949             goto unimplemented;
1950         }
1951         break;
1952     case TARGET_SOL_SOCKET:
1953         switch (optname) {
1954         case TARGET_SO_RCVTIMEO:
1955         {
1956                 struct timeval tv;
1957 
1958                 optname = SO_RCVTIMEO;
1959 
1960 set_timeout:
1961                 if (optlen != sizeof(struct target_timeval)) {
1962                     return -TARGET_EINVAL;
1963                 }
1964 
1965                 if (copy_from_user_timeval(&tv, optval_addr)) {
1966                     return -TARGET_EFAULT;
1967                 }
1968 
1969                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970                                 &tv, sizeof(tv)));
1971                 return ret;
1972         }
1973         case TARGET_SO_SNDTIMEO:
1974                 optname = SO_SNDTIMEO;
1975                 goto set_timeout;
1976         case TARGET_SO_ATTACH_FILTER:
1977         {
1978                 struct target_sock_fprog *tfprog;
1979                 struct target_sock_filter *tfilter;
1980                 struct sock_fprog fprog;
1981                 struct sock_filter *filter;
1982                 int i;
1983 
1984                 if (optlen != sizeof(*tfprog)) {
1985                     return -TARGET_EINVAL;
1986                 }
1987                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988                     return -TARGET_EFAULT;
1989                 }
1990                 if (!lock_user_struct(VERIFY_READ, tfilter,
1991                                       tswapal(tfprog->filter), 0)) {
1992                     unlock_user_struct(tfprog, optval_addr, 1);
1993                     return -TARGET_EFAULT;
1994                 }
1995 
1996                 fprog.len = tswap16(tfprog->len);
1997                 filter = g_try_new(struct sock_filter, fprog.len);
1998                 if (filter == NULL) {
1999                     unlock_user_struct(tfilter, tfprog->filter, 1);
2000                     unlock_user_struct(tfprog, optval_addr, 1);
2001                     return -TARGET_ENOMEM;
2002                 }
2003                 for (i = 0; i < fprog.len; i++) {
2004                     filter[i].code = tswap16(tfilter[i].code);
2005                     filter[i].jt = tfilter[i].jt;
2006                     filter[i].jf = tfilter[i].jf;
2007                     filter[i].k = tswap32(tfilter[i].k);
2008                 }
2009                 fprog.filter = filter;
2010 
2011                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013                 g_free(filter);
2014 
2015                 unlock_user_struct(tfilter, tfprog->filter, 1);
2016                 unlock_user_struct(tfprog, optval_addr, 1);
2017                 return ret;
2018         }
2019 	case TARGET_SO_BINDTODEVICE:
2020 	{
2021 		char *dev_ifname, *addr_ifname;
2022 
2023 		if (optlen > IFNAMSIZ - 1) {
2024 		    optlen = IFNAMSIZ - 1;
2025 		}
2026 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 		if (!dev_ifname) {
2028 		    return -TARGET_EFAULT;
2029 		}
2030 		optname = SO_BINDTODEVICE;
2031 		addr_ifname = alloca(IFNAMSIZ);
2032 		memcpy(addr_ifname, dev_ifname, optlen);
2033 		addr_ifname[optlen] = 0;
2034 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035                                            addr_ifname, optlen));
2036 		unlock_user (dev_ifname, optval_addr, 0);
2037 		return ret;
2038 	}
2039         case TARGET_SO_LINGER:
2040         {
2041                 struct linger lg;
2042                 struct target_linger *tlg;
2043 
2044                 if (optlen != sizeof(struct target_linger)) {
2045                     return -TARGET_EINVAL;
2046                 }
2047                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048                     return -TARGET_EFAULT;
2049                 }
2050                 __get_user(lg.l_onoff, &tlg->l_onoff);
2051                 __get_user(lg.l_linger, &tlg->l_linger);
2052                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053                                 &lg, sizeof(lg)));
2054                 unlock_user_struct(tlg, optval_addr, 0);
2055                 return ret;
2056         }
2057             /* Options with 'int' argument.  */
2058         case TARGET_SO_DEBUG:
2059 		optname = SO_DEBUG;
2060 		break;
2061         case TARGET_SO_REUSEADDR:
2062 		optname = SO_REUSEADDR;
2063 		break;
2064         case TARGET_SO_TYPE:
2065 		optname = SO_TYPE;
2066 		break;
2067         case TARGET_SO_ERROR:
2068 		optname = SO_ERROR;
2069 		break;
2070         case TARGET_SO_DONTROUTE:
2071 		optname = SO_DONTROUTE;
2072 		break;
2073         case TARGET_SO_BROADCAST:
2074 		optname = SO_BROADCAST;
2075 		break;
2076         case TARGET_SO_SNDBUF:
2077 		optname = SO_SNDBUF;
2078 		break;
2079         case TARGET_SO_SNDBUFFORCE:
2080                 optname = SO_SNDBUFFORCE;
2081                 break;
2082         case TARGET_SO_RCVBUF:
2083 		optname = SO_RCVBUF;
2084 		break;
2085         case TARGET_SO_RCVBUFFORCE:
2086                 optname = SO_RCVBUFFORCE;
2087                 break;
2088         case TARGET_SO_KEEPALIVE:
2089 		optname = SO_KEEPALIVE;
2090 		break;
2091         case TARGET_SO_OOBINLINE:
2092 		optname = SO_OOBINLINE;
2093 		break;
2094         case TARGET_SO_NO_CHECK:
2095 		optname = SO_NO_CHECK;
2096 		break;
2097         case TARGET_SO_PRIORITY:
2098 		optname = SO_PRIORITY;
2099 		break;
2100 #ifdef SO_BSDCOMPAT
2101         case TARGET_SO_BSDCOMPAT:
2102 		optname = SO_BSDCOMPAT;
2103 		break;
2104 #endif
2105         case TARGET_SO_PASSCRED:
2106 		optname = SO_PASSCRED;
2107 		break;
2108         case TARGET_SO_PASSSEC:
2109                 optname = SO_PASSSEC;
2110                 break;
2111         case TARGET_SO_TIMESTAMP:
2112 		optname = SO_TIMESTAMP;
2113 		break;
2114         case TARGET_SO_RCVLOWAT:
2115 		optname = SO_RCVLOWAT;
2116 		break;
2117         default:
2118             goto unimplemented;
2119         }
2120 	if (optlen < sizeof(uint32_t))
2121             return -TARGET_EINVAL;
2122 
2123 	if (get_user_u32(val, optval_addr))
2124             return -TARGET_EFAULT;
2125 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2126         break;
2127     default:
2128     unimplemented:
2129         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2130         ret = -TARGET_ENOPROTOOPT;
2131     }
2132     return ret;
2133 }
2134 
2135 /* do_getsockopt() Must return target values and target errnos. */
2136 static abi_long do_getsockopt(int sockfd, int level, int optname,
2137                               abi_ulong optval_addr, abi_ulong optlen)
2138 {
2139     abi_long ret;
2140     int len, val;
2141     socklen_t lv;
2142 
2143     switch(level) {
2144     case TARGET_SOL_SOCKET:
2145         level = SOL_SOCKET;
2146         switch (optname) {
2147         /* These don't just return a single integer */
2148         case TARGET_SO_RCVTIMEO:
2149         case TARGET_SO_SNDTIMEO:
2150         case TARGET_SO_PEERNAME:
2151             goto unimplemented;
2152         case TARGET_SO_PEERCRED: {
2153             struct ucred cr;
2154             socklen_t crlen;
2155             struct target_ucred *tcr;
2156 
2157             if (get_user_u32(len, optlen)) {
2158                 return -TARGET_EFAULT;
2159             }
2160             if (len < 0) {
2161                 return -TARGET_EINVAL;
2162             }
2163 
2164             crlen = sizeof(cr);
2165             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2166                                        &cr, &crlen));
2167             if (ret < 0) {
2168                 return ret;
2169             }
2170             if (len > crlen) {
2171                 len = crlen;
2172             }
2173             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2174                 return -TARGET_EFAULT;
2175             }
2176             __put_user(cr.pid, &tcr->pid);
2177             __put_user(cr.uid, &tcr->uid);
2178             __put_user(cr.gid, &tcr->gid);
2179             unlock_user_struct(tcr, optval_addr, 1);
2180             if (put_user_u32(len, optlen)) {
2181                 return -TARGET_EFAULT;
2182             }
2183             break;
2184         }
2185         case TARGET_SO_LINGER:
2186         {
2187             struct linger lg;
2188             socklen_t lglen;
2189             struct target_linger *tlg;
2190 
2191             if (get_user_u32(len, optlen)) {
2192                 return -TARGET_EFAULT;
2193             }
2194             if (len < 0) {
2195                 return -TARGET_EINVAL;
2196             }
2197 
2198             lglen = sizeof(lg);
2199             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2200                                        &lg, &lglen));
2201             if (ret < 0) {
2202                 return ret;
2203             }
2204             if (len > lglen) {
2205                 len = lglen;
2206             }
2207             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2208                 return -TARGET_EFAULT;
2209             }
2210             __put_user(lg.l_onoff, &tlg->l_onoff);
2211             __put_user(lg.l_linger, &tlg->l_linger);
2212             unlock_user_struct(tlg, optval_addr, 1);
2213             if (put_user_u32(len, optlen)) {
2214                 return -TARGET_EFAULT;
2215             }
2216             break;
2217         }
2218         /* Options with 'int' argument.  */
2219         case TARGET_SO_DEBUG:
2220             optname = SO_DEBUG;
2221             goto int_case;
2222         case TARGET_SO_REUSEADDR:
2223             optname = SO_REUSEADDR;
2224             goto int_case;
2225         case TARGET_SO_TYPE:
2226             optname = SO_TYPE;
2227             goto int_case;
2228         case TARGET_SO_ERROR:
2229             optname = SO_ERROR;
2230             goto int_case;
2231         case TARGET_SO_DONTROUTE:
2232             optname = SO_DONTROUTE;
2233             goto int_case;
2234         case TARGET_SO_BROADCAST:
2235             optname = SO_BROADCAST;
2236             goto int_case;
2237         case TARGET_SO_SNDBUF:
2238             optname = SO_SNDBUF;
2239             goto int_case;
2240         case TARGET_SO_RCVBUF:
2241             optname = SO_RCVBUF;
2242             goto int_case;
2243         case TARGET_SO_KEEPALIVE:
2244             optname = SO_KEEPALIVE;
2245             goto int_case;
2246         case TARGET_SO_OOBINLINE:
2247             optname = SO_OOBINLINE;
2248             goto int_case;
2249         case TARGET_SO_NO_CHECK:
2250             optname = SO_NO_CHECK;
2251             goto int_case;
2252         case TARGET_SO_PRIORITY:
2253             optname = SO_PRIORITY;
2254             goto int_case;
2255 #ifdef SO_BSDCOMPAT
2256         case TARGET_SO_BSDCOMPAT:
2257             optname = SO_BSDCOMPAT;
2258             goto int_case;
2259 #endif
2260         case TARGET_SO_PASSCRED:
2261             optname = SO_PASSCRED;
2262             goto int_case;
2263         case TARGET_SO_TIMESTAMP:
2264             optname = SO_TIMESTAMP;
2265             goto int_case;
2266         case TARGET_SO_RCVLOWAT:
2267             optname = SO_RCVLOWAT;
2268             goto int_case;
2269         case TARGET_SO_ACCEPTCONN:
2270             optname = SO_ACCEPTCONN;
2271             goto int_case;
2272         default:
2273             goto int_case;
2274         }
2275         break;
2276     case SOL_TCP:
2277         /* TCP options all take an 'int' value.  */
2278     int_case:
2279         if (get_user_u32(len, optlen))
2280             return -TARGET_EFAULT;
2281         if (len < 0)
2282             return -TARGET_EINVAL;
2283         lv = sizeof(lv);
2284         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2285         if (ret < 0)
2286             return ret;
2287         if (optname == SO_TYPE) {
2288             val = host_to_target_sock_type(val);
2289         }
2290         if (len > lv)
2291             len = lv;
2292         if (len == 4) {
2293             if (put_user_u32(val, optval_addr))
2294                 return -TARGET_EFAULT;
2295         } else {
2296             if (put_user_u8(val, optval_addr))
2297                 return -TARGET_EFAULT;
2298         }
2299         if (put_user_u32(len, optlen))
2300             return -TARGET_EFAULT;
2301         break;
2302     case SOL_IP:
2303         switch(optname) {
2304         case IP_TOS:
2305         case IP_TTL:
2306         case IP_HDRINCL:
2307         case IP_ROUTER_ALERT:
2308         case IP_RECVOPTS:
2309         case IP_RETOPTS:
2310         case IP_PKTINFO:
2311         case IP_MTU_DISCOVER:
2312         case IP_RECVERR:
2313         case IP_RECVTOS:
2314 #ifdef IP_FREEBIND
2315         case IP_FREEBIND:
2316 #endif
2317         case IP_MULTICAST_TTL:
2318         case IP_MULTICAST_LOOP:
2319             if (get_user_u32(len, optlen))
2320                 return -TARGET_EFAULT;
2321             if (len < 0)
2322                 return -TARGET_EINVAL;
2323             lv = sizeof(lv);
2324             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2325             if (ret < 0)
2326                 return ret;
2327             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2328                 len = 1;
2329                 if (put_user_u32(len, optlen)
2330                     || put_user_u8(val, optval_addr))
2331                     return -TARGET_EFAULT;
2332             } else {
2333                 if (len > sizeof(int))
2334                     len = sizeof(int);
2335                 if (put_user_u32(len, optlen)
2336                     || put_user_u32(val, optval_addr))
2337                     return -TARGET_EFAULT;
2338             }
2339             break;
2340         default:
2341             ret = -TARGET_ENOPROTOOPT;
2342             break;
2343         }
2344         break;
2345     default:
2346     unimplemented:
2347         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2348                  level, optname);
2349         ret = -TARGET_EOPNOTSUPP;
2350         break;
2351     }
2352     return ret;
2353 }
2354 
2355 /* Convert target low/high pair representing file offset into the host
2356  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2357  * as the kernel doesn't handle them either.
2358  */
2359 static void target_to_host_low_high(abi_ulong tlow,
2360                                     abi_ulong thigh,
2361                                     unsigned long *hlow,
2362                                     unsigned long *hhigh)
2363 {
2364     uint64_t off = tlow |
2365         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2366         TARGET_LONG_BITS / 2;
2367 
2368     *hlow = off;
2369     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2370 }
2371 
2372 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2373                                 abi_ulong count, int copy)
2374 {
2375     struct target_iovec *target_vec;
2376     struct iovec *vec;
2377     abi_ulong total_len, max_len;
2378     int i;
2379     int err = 0;
2380     bool bad_address = false;
2381 
2382     if (count == 0) {
2383         errno = 0;
2384         return NULL;
2385     }
2386     if (count > IOV_MAX) {
2387         errno = EINVAL;
2388         return NULL;
2389     }
2390 
2391     vec = g_try_new0(struct iovec, count);
2392     if (vec == NULL) {
2393         errno = ENOMEM;
2394         return NULL;
2395     }
2396 
2397     target_vec = lock_user(VERIFY_READ, target_addr,
2398                            count * sizeof(struct target_iovec), 1);
2399     if (target_vec == NULL) {
2400         err = EFAULT;
2401         goto fail2;
2402     }
2403 
2404     /* ??? If host page size > target page size, this will result in a
2405        value larger than what we can actually support.  */
2406     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2407     total_len = 0;
2408 
2409     for (i = 0; i < count; i++) {
2410         abi_ulong base = tswapal(target_vec[i].iov_base);
2411         abi_long len = tswapal(target_vec[i].iov_len);
2412 
2413         if (len < 0) {
2414             err = EINVAL;
2415             goto fail;
2416         } else if (len == 0) {
2417             /* Zero length pointer is ignored.  */
2418             vec[i].iov_base = 0;
2419         } else {
2420             vec[i].iov_base = lock_user(type, base, len, copy);
2421             /* If the first buffer pointer is bad, this is a fault.  But
2422              * subsequent bad buffers will result in a partial write; this
2423              * is realized by filling the vector with null pointers and
2424              * zero lengths. */
2425             if (!vec[i].iov_base) {
2426                 if (i == 0) {
2427                     err = EFAULT;
2428                     goto fail;
2429                 } else {
2430                     bad_address = true;
2431                 }
2432             }
2433             if (bad_address) {
2434                 len = 0;
2435             }
2436             if (len > max_len - total_len) {
2437                 len = max_len - total_len;
2438             }
2439         }
2440         vec[i].iov_len = len;
2441         total_len += len;
2442     }
2443 
2444     unlock_user(target_vec, target_addr, 0);
2445     return vec;
2446 
2447  fail:
2448     while (--i >= 0) {
2449         if (tswapal(target_vec[i].iov_len) > 0) {
2450             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2451         }
2452     }
2453     unlock_user(target_vec, target_addr, 0);
2454  fail2:
2455     g_free(vec);
2456     errno = err;
2457     return NULL;
2458 }
2459 
2460 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2461                          abi_ulong count, int copy)
2462 {
2463     struct target_iovec *target_vec;
2464     int i;
2465 
2466     target_vec = lock_user(VERIFY_READ, target_addr,
2467                            count * sizeof(struct target_iovec), 1);
2468     if (target_vec) {
2469         for (i = 0; i < count; i++) {
2470             abi_ulong base = tswapal(target_vec[i].iov_base);
2471             abi_long len = tswapal(target_vec[i].iov_len);
2472             if (len < 0) {
2473                 break;
2474             }
2475             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2476         }
2477         unlock_user(target_vec, target_addr, 0);
2478     }
2479 
2480     g_free(vec);
2481 }
2482 
2483 static inline int target_to_host_sock_type(int *type)
2484 {
2485     int host_type = 0;
2486     int target_type = *type;
2487 
2488     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2489     case TARGET_SOCK_DGRAM:
2490         host_type = SOCK_DGRAM;
2491         break;
2492     case TARGET_SOCK_STREAM:
2493         host_type = SOCK_STREAM;
2494         break;
2495     default:
2496         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2497         break;
2498     }
2499     if (target_type & TARGET_SOCK_CLOEXEC) {
2500 #if defined(SOCK_CLOEXEC)
2501         host_type |= SOCK_CLOEXEC;
2502 #else
2503         return -TARGET_EINVAL;
2504 #endif
2505     }
2506     if (target_type & TARGET_SOCK_NONBLOCK) {
2507 #if defined(SOCK_NONBLOCK)
2508         host_type |= SOCK_NONBLOCK;
2509 #elif !defined(O_NONBLOCK)
2510         return -TARGET_EINVAL;
2511 #endif
2512     }
2513     *type = host_type;
2514     return 0;
2515 }
2516 
2517 /* Try to emulate socket type flags after socket creation.  */
2518 static int sock_flags_fixup(int fd, int target_type)
2519 {
2520 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2521     if (target_type & TARGET_SOCK_NONBLOCK) {
2522         int flags = fcntl(fd, F_GETFL);
2523         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2524             close(fd);
2525             return -TARGET_EINVAL;
2526         }
2527     }
2528 #endif
2529     return fd;
2530 }
2531 
2532 /* do_socket() Must return target values and target errnos. */
2533 static abi_long do_socket(int domain, int type, int protocol)
2534 {
2535     int target_type = type;
2536     int ret;
2537 
2538     ret = target_to_host_sock_type(&type);
2539     if (ret) {
2540         return ret;
2541     }
2542 
2543     if (domain == PF_NETLINK && !(
2544 #ifdef CONFIG_RTNETLINK
2545          protocol == NETLINK_ROUTE ||
2546 #endif
2547          protocol == NETLINK_KOBJECT_UEVENT ||
2548          protocol == NETLINK_AUDIT)) {
2549         return -EPFNOSUPPORT;
2550     }
2551 
2552     if (domain == AF_PACKET ||
2553         (domain == AF_INET && type == SOCK_PACKET)) {
2554         protocol = tswap16(protocol);
2555     }
2556 
2557     ret = get_errno(socket(domain, type, protocol));
2558     if (ret >= 0) {
2559         ret = sock_flags_fixup(ret, target_type);
2560         if (type == SOCK_PACKET) {
2561             /* Manage an obsolete case :
2562              * if socket type is SOCK_PACKET, bind by name
2563              */
2564             fd_trans_register(ret, &target_packet_trans);
2565         } else if (domain == PF_NETLINK) {
2566             switch (protocol) {
2567 #ifdef CONFIG_RTNETLINK
2568             case NETLINK_ROUTE:
2569                 fd_trans_register(ret, &target_netlink_route_trans);
2570                 break;
2571 #endif
2572             case NETLINK_KOBJECT_UEVENT:
2573                 /* nothing to do: messages are strings */
2574                 break;
2575             case NETLINK_AUDIT:
2576                 fd_trans_register(ret, &target_netlink_audit_trans);
2577                 break;
2578             default:
2579                 g_assert_not_reached();
2580             }
2581         }
2582     }
2583     return ret;
2584 }
2585 
2586 /* do_bind() Must return target values and target errnos. */
2587 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2588                         socklen_t addrlen)
2589 {
2590     void *addr;
2591     abi_long ret;
2592 
2593     if ((int)addrlen < 0) {
2594         return -TARGET_EINVAL;
2595     }
2596 
2597     addr = alloca(addrlen+1);
2598 
2599     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2600     if (ret)
2601         return ret;
2602 
2603     return get_errno(bind(sockfd, addr, addrlen));
2604 }
2605 
2606 /* do_connect() Must return target values and target errnos. */
2607 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2608                            socklen_t addrlen)
2609 {
2610     void *addr;
2611     abi_long ret;
2612 
2613     if ((int)addrlen < 0) {
2614         return -TARGET_EINVAL;
2615     }
2616 
2617     addr = alloca(addrlen+1);
2618 
2619     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2620     if (ret)
2621         return ret;
2622 
2623     return get_errno(safe_connect(sockfd, addr, addrlen));
2624 }
2625 
2626 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2627 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2628                                       int flags, int send)
2629 {
2630     abi_long ret, len;
2631     struct msghdr msg;
2632     abi_ulong count;
2633     struct iovec *vec;
2634     abi_ulong target_vec;
2635 
2636     if (msgp->msg_name) {
2637         msg.msg_namelen = tswap32(msgp->msg_namelen);
2638         msg.msg_name = alloca(msg.msg_namelen+1);
2639         ret = target_to_host_sockaddr(fd, msg.msg_name,
2640                                       tswapal(msgp->msg_name),
2641                                       msg.msg_namelen);
2642         if (ret == -TARGET_EFAULT) {
2643             /* For connected sockets msg_name and msg_namelen must
2644              * be ignored, so returning EFAULT immediately is wrong.
2645              * Instead, pass a bad msg_name to the host kernel, and
2646              * let it decide whether to return EFAULT or not.
2647              */
2648             msg.msg_name = (void *)-1;
2649         } else if (ret) {
2650             goto out2;
2651         }
2652     } else {
2653         msg.msg_name = NULL;
2654         msg.msg_namelen = 0;
2655     }
2656     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2657     msg.msg_control = alloca(msg.msg_controllen);
2658     memset(msg.msg_control, 0, msg.msg_controllen);
2659 
2660     msg.msg_flags = tswap32(msgp->msg_flags);
2661 
2662     count = tswapal(msgp->msg_iovlen);
2663     target_vec = tswapal(msgp->msg_iov);
2664 
2665     if (count > IOV_MAX) {
2666         /* sendrcvmsg returns a different errno for this condition than
2667          * readv/writev, so we must catch it here before lock_iovec() does.
2668          */
2669         ret = -TARGET_EMSGSIZE;
2670         goto out2;
2671     }
2672 
2673     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2674                      target_vec, count, send);
2675     if (vec == NULL) {
2676         ret = -host_to_target_errno(errno);
2677         goto out2;
2678     }
2679     msg.msg_iovlen = count;
2680     msg.msg_iov = vec;
2681 
2682     if (send) {
2683         if (fd_trans_target_to_host_data(fd)) {
2684             void *host_msg;
2685 
2686             host_msg = g_malloc(msg.msg_iov->iov_len);
2687             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2688             ret = fd_trans_target_to_host_data(fd)(host_msg,
2689                                                    msg.msg_iov->iov_len);
2690             if (ret >= 0) {
2691                 msg.msg_iov->iov_base = host_msg;
2692                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2693             }
2694             g_free(host_msg);
2695         } else {
2696             ret = target_to_host_cmsg(&msg, msgp);
2697             if (ret == 0) {
2698                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2699             }
2700         }
2701     } else {
2702         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2703         if (!is_error(ret)) {
2704             len = ret;
2705             if (fd_trans_host_to_target_data(fd)) {
2706                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2707                                                MIN(msg.msg_iov->iov_len, len));
2708             } else {
2709                 ret = host_to_target_cmsg(msgp, &msg);
2710             }
2711             if (!is_error(ret)) {
2712                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2713                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2714                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2715                                     msg.msg_name, msg.msg_namelen);
2716                     if (ret) {
2717                         goto out;
2718                     }
2719                 }
2720 
2721                 ret = len;
2722             }
2723         }
2724     }
2725 
2726 out:
2727     unlock_iovec(vec, target_vec, count, !send);
2728 out2:
2729     return ret;
2730 }
2731 
2732 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2733                                int flags, int send)
2734 {
2735     abi_long ret;
2736     struct target_msghdr *msgp;
2737 
2738     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2739                           msgp,
2740                           target_msg,
2741                           send ? 1 : 0)) {
2742         return -TARGET_EFAULT;
2743     }
2744     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2745     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2746     return ret;
2747 }
2748 
2749 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2750  * so it might not have this *mmsg-specific flag either.
2751  */
2752 #ifndef MSG_WAITFORONE
2753 #define MSG_WAITFORONE 0x10000
2754 #endif
2755 
2756 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2757                                 unsigned int vlen, unsigned int flags,
2758                                 int send)
2759 {
2760     struct target_mmsghdr *mmsgp;
2761     abi_long ret = 0;
2762     int i;
2763 
2764     if (vlen > UIO_MAXIOV) {
2765         vlen = UIO_MAXIOV;
2766     }
2767 
2768     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2769     if (!mmsgp) {
2770         return -TARGET_EFAULT;
2771     }
2772 
2773     for (i = 0; i < vlen; i++) {
2774         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2775         if (is_error(ret)) {
2776             break;
2777         }
2778         mmsgp[i].msg_len = tswap32(ret);
2779         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2780         if (flags & MSG_WAITFORONE) {
2781             flags |= MSG_DONTWAIT;
2782         }
2783     }
2784 
2785     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2786 
2787     /* Return number of datagrams sent if we sent any at all;
2788      * otherwise return the error.
2789      */
2790     if (i) {
2791         return i;
2792     }
2793     return ret;
2794 }
2795 
2796 /* do_accept4() Must return target values and target errnos. */
2797 static abi_long do_accept4(int fd, abi_ulong target_addr,
2798                            abi_ulong target_addrlen_addr, int flags)
2799 {
2800     socklen_t addrlen;
2801     void *addr;
2802     abi_long ret;
2803     int host_flags;
2804 
2805     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2806 
2807     if (target_addr == 0) {
2808         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2809     }
2810 
2811     /* linux returns EINVAL if addrlen pointer is invalid */
2812     if (get_user_u32(addrlen, target_addrlen_addr))
2813         return -TARGET_EINVAL;
2814 
2815     if ((int)addrlen < 0) {
2816         return -TARGET_EINVAL;
2817     }
2818 
2819     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2820         return -TARGET_EINVAL;
2821 
2822     addr = alloca(addrlen);
2823 
2824     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2825     if (!is_error(ret)) {
2826         host_to_target_sockaddr(target_addr, addr, addrlen);
2827         if (put_user_u32(addrlen, target_addrlen_addr))
2828             ret = -TARGET_EFAULT;
2829     }
2830     return ret;
2831 }
2832 
2833 /* do_getpeername() Must return target values and target errnos. */
2834 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2835                                abi_ulong target_addrlen_addr)
2836 {
2837     socklen_t addrlen;
2838     void *addr;
2839     abi_long ret;
2840 
2841     if (get_user_u32(addrlen, target_addrlen_addr))
2842         return -TARGET_EFAULT;
2843 
2844     if ((int)addrlen < 0) {
2845         return -TARGET_EINVAL;
2846     }
2847 
2848     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2849         return -TARGET_EFAULT;
2850 
2851     addr = alloca(addrlen);
2852 
2853     ret = get_errno(getpeername(fd, addr, &addrlen));
2854     if (!is_error(ret)) {
2855         host_to_target_sockaddr(target_addr, addr, addrlen);
2856         if (put_user_u32(addrlen, target_addrlen_addr))
2857             ret = -TARGET_EFAULT;
2858     }
2859     return ret;
2860 }
2861 
2862 /* do_getsockname() Must return target values and target errnos. */
2863 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2864                                abi_ulong target_addrlen_addr)
2865 {
2866     socklen_t addrlen;
2867     void *addr;
2868     abi_long ret;
2869 
2870     if (get_user_u32(addrlen, target_addrlen_addr))
2871         return -TARGET_EFAULT;
2872 
2873     if ((int)addrlen < 0) {
2874         return -TARGET_EINVAL;
2875     }
2876 
2877     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2878         return -TARGET_EFAULT;
2879 
2880     addr = alloca(addrlen);
2881 
2882     ret = get_errno(getsockname(fd, addr, &addrlen));
2883     if (!is_error(ret)) {
2884         host_to_target_sockaddr(target_addr, addr, addrlen);
2885         if (put_user_u32(addrlen, target_addrlen_addr))
2886             ret = -TARGET_EFAULT;
2887     }
2888     return ret;
2889 }
2890 
2891 /* do_socketpair() Must return target values and target errnos. */
2892 static abi_long do_socketpair(int domain, int type, int protocol,
2893                               abi_ulong target_tab_addr)
2894 {
2895     int tab[2];
2896     abi_long ret;
2897 
2898     target_to_host_sock_type(&type);
2899 
2900     ret = get_errno(socketpair(domain, type, protocol, tab));
2901     if (!is_error(ret)) {
2902         if (put_user_s32(tab[0], target_tab_addr)
2903             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2904             ret = -TARGET_EFAULT;
2905     }
2906     return ret;
2907 }
2908 
2909 /* do_sendto() Must return target values and target errnos. */
2910 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2911                           abi_ulong target_addr, socklen_t addrlen)
2912 {
2913     void *addr;
2914     void *host_msg;
2915     void *copy_msg = NULL;
2916     abi_long ret;
2917 
2918     if ((int)addrlen < 0) {
2919         return -TARGET_EINVAL;
2920     }
2921 
2922     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2923     if (!host_msg)
2924         return -TARGET_EFAULT;
2925     if (fd_trans_target_to_host_data(fd)) {
2926         copy_msg = host_msg;
2927         host_msg = g_malloc(len);
2928         memcpy(host_msg, copy_msg, len);
2929         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2930         if (ret < 0) {
2931             goto fail;
2932         }
2933     }
2934     if (target_addr) {
2935         addr = alloca(addrlen+1);
2936         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2937         if (ret) {
2938             goto fail;
2939         }
2940         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2941     } else {
2942         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2943     }
2944 fail:
2945     if (copy_msg) {
2946         g_free(host_msg);
2947         host_msg = copy_msg;
2948     }
2949     unlock_user(host_msg, msg, 0);
2950     return ret;
2951 }
2952 
2953 /* do_recvfrom() Must return target values and target errnos. */
2954 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2955                             abi_ulong target_addr,
2956                             abi_ulong target_addrlen)
2957 {
2958     socklen_t addrlen;
2959     void *addr;
2960     void *host_msg;
2961     abi_long ret;
2962 
2963     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2964     if (!host_msg)
2965         return -TARGET_EFAULT;
2966     if (target_addr) {
2967         if (get_user_u32(addrlen, target_addrlen)) {
2968             ret = -TARGET_EFAULT;
2969             goto fail;
2970         }
2971         if ((int)addrlen < 0) {
2972             ret = -TARGET_EINVAL;
2973             goto fail;
2974         }
2975         addr = alloca(addrlen);
2976         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
2977                                       addr, &addrlen));
2978     } else {
2979         addr = NULL; /* To keep compiler quiet.  */
2980         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
2981     }
2982     if (!is_error(ret)) {
2983         if (fd_trans_host_to_target_data(fd)) {
2984             abi_long trans;
2985             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
2986             if (is_error(trans)) {
2987                 ret = trans;
2988                 goto fail;
2989             }
2990         }
2991         if (target_addr) {
2992             host_to_target_sockaddr(target_addr, addr, addrlen);
2993             if (put_user_u32(addrlen, target_addrlen)) {
2994                 ret = -TARGET_EFAULT;
2995                 goto fail;
2996             }
2997         }
2998         unlock_user(host_msg, msg, len);
2999     } else {
3000 fail:
3001         unlock_user(host_msg, msg, 0);
3002     }
3003     return ret;
3004 }
3005 
3006 #ifdef TARGET_NR_socketcall
3007 /* do_socketcall() must return target values and target errnos. */
3008 static abi_long do_socketcall(int num, abi_ulong vptr)
3009 {
3010     static const unsigned nargs[] = { /* number of arguments per operation */
3011         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3012         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3013         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3014         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3015         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3016         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3017         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3018         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3019         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3020         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3021         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3022         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3023         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3024         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3025         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3026         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3027         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3028         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3029         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3030         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3031     };
3032     abi_long a[6]; /* max 6 args */
3033     unsigned i;
3034 
3035     /* check the range of the first argument num */
3036     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3037     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3038         return -TARGET_EINVAL;
3039     }
3040     /* ensure we have space for args */
3041     if (nargs[num] > ARRAY_SIZE(a)) {
3042         return -TARGET_EINVAL;
3043     }
3044     /* collect the arguments in a[] according to nargs[] */
3045     for (i = 0; i < nargs[num]; ++i) {
3046         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3047             return -TARGET_EFAULT;
3048         }
3049     }
3050     /* now when we have the args, invoke the appropriate underlying function */
3051     switch (num) {
3052     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3053         return do_socket(a[0], a[1], a[2]);
3054     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3055         return do_bind(a[0], a[1], a[2]);
3056     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3057         return do_connect(a[0], a[1], a[2]);
3058     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3059         return get_errno(listen(a[0], a[1]));
3060     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3061         return do_accept4(a[0], a[1], a[2], 0);
3062     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3063         return do_getsockname(a[0], a[1], a[2]);
3064     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3065         return do_getpeername(a[0], a[1], a[2]);
3066     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3067         return do_socketpair(a[0], a[1], a[2], a[3]);
3068     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3069         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3070     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3071         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3072     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3073         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3074     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3075         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3076     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3077         return get_errno(shutdown(a[0], a[1]));
3078     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3079         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3080     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3081         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3082     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3083         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3084     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3085         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3086     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3087         return do_accept4(a[0], a[1], a[2], a[3]);
3088     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3089         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3090     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3091         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3092     default:
3093         gemu_log("Unsupported socketcall: %d\n", num);
3094         return -TARGET_EINVAL;
3095     }
3096 }
3097 #endif
3098 
3099 #define N_SHM_REGIONS	32
3100 
3101 static struct shm_region {
3102     abi_ulong start;
3103     abi_ulong size;
3104     bool in_use;
3105 } shm_regions[N_SHM_REGIONS];
3106 
3107 #ifndef TARGET_SEMID64_DS
3108 /* asm-generic version of this struct */
3109 struct target_semid64_ds
3110 {
3111   struct target_ipc_perm sem_perm;
3112   abi_ulong sem_otime;
3113 #if TARGET_ABI_BITS == 32
3114   abi_ulong __unused1;
3115 #endif
3116   abi_ulong sem_ctime;
3117 #if TARGET_ABI_BITS == 32
3118   abi_ulong __unused2;
3119 #endif
3120   abi_ulong sem_nsems;
3121   abi_ulong __unused3;
3122   abi_ulong __unused4;
3123 };
3124 #endif
3125 
3126 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3127                                                abi_ulong target_addr)
3128 {
3129     struct target_ipc_perm *target_ip;
3130     struct target_semid64_ds *target_sd;
3131 
3132     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3133         return -TARGET_EFAULT;
3134     target_ip = &(target_sd->sem_perm);
3135     host_ip->__key = tswap32(target_ip->__key);
3136     host_ip->uid = tswap32(target_ip->uid);
3137     host_ip->gid = tswap32(target_ip->gid);
3138     host_ip->cuid = tswap32(target_ip->cuid);
3139     host_ip->cgid = tswap32(target_ip->cgid);
3140 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3141     host_ip->mode = tswap32(target_ip->mode);
3142 #else
3143     host_ip->mode = tswap16(target_ip->mode);
3144 #endif
3145 #if defined(TARGET_PPC)
3146     host_ip->__seq = tswap32(target_ip->__seq);
3147 #else
3148     host_ip->__seq = tswap16(target_ip->__seq);
3149 #endif
3150     unlock_user_struct(target_sd, target_addr, 0);
3151     return 0;
3152 }
3153 
3154 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3155                                                struct ipc_perm *host_ip)
3156 {
3157     struct target_ipc_perm *target_ip;
3158     struct target_semid64_ds *target_sd;
3159 
3160     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3161         return -TARGET_EFAULT;
3162     target_ip = &(target_sd->sem_perm);
3163     target_ip->__key = tswap32(host_ip->__key);
3164     target_ip->uid = tswap32(host_ip->uid);
3165     target_ip->gid = tswap32(host_ip->gid);
3166     target_ip->cuid = tswap32(host_ip->cuid);
3167     target_ip->cgid = tswap32(host_ip->cgid);
3168 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3169     target_ip->mode = tswap32(host_ip->mode);
3170 #else
3171     target_ip->mode = tswap16(host_ip->mode);
3172 #endif
3173 #if defined(TARGET_PPC)
3174     target_ip->__seq = tswap32(host_ip->__seq);
3175 #else
3176     target_ip->__seq = tswap16(host_ip->__seq);
3177 #endif
3178     unlock_user_struct(target_sd, target_addr, 1);
3179     return 0;
3180 }
3181 
3182 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3183                                                abi_ulong target_addr)
3184 {
3185     struct target_semid64_ds *target_sd;
3186 
3187     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3188         return -TARGET_EFAULT;
3189     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3190         return -TARGET_EFAULT;
3191     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3192     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3193     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3194     unlock_user_struct(target_sd, target_addr, 0);
3195     return 0;
3196 }
3197 
3198 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3199                                                struct semid_ds *host_sd)
3200 {
3201     struct target_semid64_ds *target_sd;
3202 
3203     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3204         return -TARGET_EFAULT;
3205     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3206         return -TARGET_EFAULT;
3207     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3208     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3209     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3210     unlock_user_struct(target_sd, target_addr, 1);
3211     return 0;
3212 }
3213 
3214 struct target_seminfo {
3215     int semmap;
3216     int semmni;
3217     int semmns;
3218     int semmnu;
3219     int semmsl;
3220     int semopm;
3221     int semume;
3222     int semusz;
3223     int semvmx;
3224     int semaem;
3225 };
3226 
3227 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3228                                               struct seminfo *host_seminfo)
3229 {
3230     struct target_seminfo *target_seminfo;
3231     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3232         return -TARGET_EFAULT;
3233     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3234     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3235     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3236     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3237     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3238     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3239     __put_user(host_seminfo->semume, &target_seminfo->semume);
3240     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3241     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3242     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3243     unlock_user_struct(target_seminfo, target_addr, 1);
3244     return 0;
3245 }
3246 
3247 union semun {
3248 	int val;
3249 	struct semid_ds *buf;
3250 	unsigned short *array;
3251 	struct seminfo *__buf;
3252 };
3253 
3254 union target_semun {
3255 	int val;
3256 	abi_ulong buf;
3257 	abi_ulong array;
3258 	abi_ulong __buf;
3259 };
3260 
3261 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3262                                                abi_ulong target_addr)
3263 {
3264     int nsems;
3265     unsigned short *array;
3266     union semun semun;
3267     struct semid_ds semid_ds;
3268     int i, ret;
3269 
3270     semun.buf = &semid_ds;
3271 
3272     ret = semctl(semid, 0, IPC_STAT, semun);
3273     if (ret == -1)
3274         return get_errno(ret);
3275 
3276     nsems = semid_ds.sem_nsems;
3277 
3278     *host_array = g_try_new(unsigned short, nsems);
3279     if (!*host_array) {
3280         return -TARGET_ENOMEM;
3281     }
3282     array = lock_user(VERIFY_READ, target_addr,
3283                       nsems*sizeof(unsigned short), 1);
3284     if (!array) {
3285         g_free(*host_array);
3286         return -TARGET_EFAULT;
3287     }
3288 
3289     for(i=0; i<nsems; i++) {
3290         __get_user((*host_array)[i], &array[i]);
3291     }
3292     unlock_user(array, target_addr, 0);
3293 
3294     return 0;
3295 }
3296 
3297 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3298                                                unsigned short **host_array)
3299 {
3300     int nsems;
3301     unsigned short *array;
3302     union semun semun;
3303     struct semid_ds semid_ds;
3304     int i, ret;
3305 
3306     semun.buf = &semid_ds;
3307 
3308     ret = semctl(semid, 0, IPC_STAT, semun);
3309     if (ret == -1)
3310         return get_errno(ret);
3311 
3312     nsems = semid_ds.sem_nsems;
3313 
3314     array = lock_user(VERIFY_WRITE, target_addr,
3315                       nsems*sizeof(unsigned short), 0);
3316     if (!array)
3317         return -TARGET_EFAULT;
3318 
3319     for(i=0; i<nsems; i++) {
3320         __put_user((*host_array)[i], &array[i]);
3321     }
3322     g_free(*host_array);
3323     unlock_user(array, target_addr, 1);
3324 
3325     return 0;
3326 }
3327 
3328 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3329                                  abi_ulong target_arg)
3330 {
3331     union target_semun target_su = { .buf = target_arg };
3332     union semun arg;
3333     struct semid_ds dsarg;
3334     unsigned short *array = NULL;
3335     struct seminfo seminfo;
3336     abi_long ret = -TARGET_EINVAL;
3337     abi_long err;
3338     cmd &= 0xff;
3339 
3340     switch( cmd ) {
3341 	case GETVAL:
3342 	case SETVAL:
3343             /* In 64 bit cross-endian situations, we will erroneously pick up
3344              * the wrong half of the union for the "val" element.  To rectify
3345              * this, the entire 8-byte structure is byteswapped, followed by
3346 	     * a swap of the 4 byte val field. In other cases, the data is
3347 	     * already in proper host byte order. */
3348 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3349 		target_su.buf = tswapal(target_su.buf);
3350 		arg.val = tswap32(target_su.val);
3351 	    } else {
3352 		arg.val = target_su.val;
3353 	    }
3354             ret = get_errno(semctl(semid, semnum, cmd, arg));
3355             break;
3356 	case GETALL:
3357 	case SETALL:
3358             err = target_to_host_semarray(semid, &array, target_su.array);
3359             if (err)
3360                 return err;
3361             arg.array = array;
3362             ret = get_errno(semctl(semid, semnum, cmd, arg));
3363             err = host_to_target_semarray(semid, target_su.array, &array);
3364             if (err)
3365                 return err;
3366             break;
3367 	case IPC_STAT:
3368 	case IPC_SET:
3369 	case SEM_STAT:
3370             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3371             if (err)
3372                 return err;
3373             arg.buf = &dsarg;
3374             ret = get_errno(semctl(semid, semnum, cmd, arg));
3375             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3376             if (err)
3377                 return err;
3378             break;
3379 	case IPC_INFO:
3380 	case SEM_INFO:
3381             arg.__buf = &seminfo;
3382             ret = get_errno(semctl(semid, semnum, cmd, arg));
3383             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3384             if (err)
3385                 return err;
3386             break;
3387 	case IPC_RMID:
3388 	case GETPID:
3389 	case GETNCNT:
3390 	case GETZCNT:
3391             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3392             break;
3393     }
3394 
3395     return ret;
3396 }
3397 
3398 struct target_sembuf {
3399     unsigned short sem_num;
3400     short sem_op;
3401     short sem_flg;
3402 };
3403 
3404 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3405                                              abi_ulong target_addr,
3406                                              unsigned nsops)
3407 {
3408     struct target_sembuf *target_sembuf;
3409     int i;
3410 
3411     target_sembuf = lock_user(VERIFY_READ, target_addr,
3412                               nsops*sizeof(struct target_sembuf), 1);
3413     if (!target_sembuf)
3414         return -TARGET_EFAULT;
3415 
3416     for(i=0; i<nsops; i++) {
3417         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3418         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3419         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3420     }
3421 
3422     unlock_user(target_sembuf, target_addr, 0);
3423 
3424     return 0;
3425 }
3426 
3427 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3428 {
3429     struct sembuf sops[nsops];
3430 
3431     if (target_to_host_sembuf(sops, ptr, nsops))
3432         return -TARGET_EFAULT;
3433 
3434     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3435 }
3436 
3437 struct target_msqid_ds
3438 {
3439     struct target_ipc_perm msg_perm;
3440     abi_ulong msg_stime;
3441 #if TARGET_ABI_BITS == 32
3442     abi_ulong __unused1;
3443 #endif
3444     abi_ulong msg_rtime;
3445 #if TARGET_ABI_BITS == 32
3446     abi_ulong __unused2;
3447 #endif
3448     abi_ulong msg_ctime;
3449 #if TARGET_ABI_BITS == 32
3450     abi_ulong __unused3;
3451 #endif
3452     abi_ulong __msg_cbytes;
3453     abi_ulong msg_qnum;
3454     abi_ulong msg_qbytes;
3455     abi_ulong msg_lspid;
3456     abi_ulong msg_lrpid;
3457     abi_ulong __unused4;
3458     abi_ulong __unused5;
3459 };
3460 
3461 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3462                                                abi_ulong target_addr)
3463 {
3464     struct target_msqid_ds *target_md;
3465 
3466     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3467         return -TARGET_EFAULT;
3468     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3469         return -TARGET_EFAULT;
3470     host_md->msg_stime = tswapal(target_md->msg_stime);
3471     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3472     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3473     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3474     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3475     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3476     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3477     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3478     unlock_user_struct(target_md, target_addr, 0);
3479     return 0;
3480 }
3481 
3482 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3483                                                struct msqid_ds *host_md)
3484 {
3485     struct target_msqid_ds *target_md;
3486 
3487     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3488         return -TARGET_EFAULT;
3489     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3490         return -TARGET_EFAULT;
3491     target_md->msg_stime = tswapal(host_md->msg_stime);
3492     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3493     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3494     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3495     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3496     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3497     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3498     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3499     unlock_user_struct(target_md, target_addr, 1);
3500     return 0;
3501 }
3502 
3503 struct target_msginfo {
3504     int msgpool;
3505     int msgmap;
3506     int msgmax;
3507     int msgmnb;
3508     int msgmni;
3509     int msgssz;
3510     int msgtql;
3511     unsigned short int msgseg;
3512 };
3513 
3514 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3515                                               struct msginfo *host_msginfo)
3516 {
3517     struct target_msginfo *target_msginfo;
3518     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3519         return -TARGET_EFAULT;
3520     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3521     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3522     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3523     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3524     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3525     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3526     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3527     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3528     unlock_user_struct(target_msginfo, target_addr, 1);
3529     return 0;
3530 }
3531 
3532 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3533 {
3534     struct msqid_ds dsarg;
3535     struct msginfo msginfo;
3536     abi_long ret = -TARGET_EINVAL;
3537 
3538     cmd &= 0xff;
3539 
3540     switch (cmd) {
3541     case IPC_STAT:
3542     case IPC_SET:
3543     case MSG_STAT:
3544         if (target_to_host_msqid_ds(&dsarg,ptr))
3545             return -TARGET_EFAULT;
3546         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3547         if (host_to_target_msqid_ds(ptr,&dsarg))
3548             return -TARGET_EFAULT;
3549         break;
3550     case IPC_RMID:
3551         ret = get_errno(msgctl(msgid, cmd, NULL));
3552         break;
3553     case IPC_INFO:
3554     case MSG_INFO:
3555         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3556         if (host_to_target_msginfo(ptr, &msginfo))
3557             return -TARGET_EFAULT;
3558         break;
3559     }
3560 
3561     return ret;
3562 }
3563 
3564 struct target_msgbuf {
3565     abi_long mtype;
3566     char	mtext[1];
3567 };
3568 
3569 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3570                                  ssize_t msgsz, int msgflg)
3571 {
3572     struct target_msgbuf *target_mb;
3573     struct msgbuf *host_mb;
3574     abi_long ret = 0;
3575 
3576     if (msgsz < 0) {
3577         return -TARGET_EINVAL;
3578     }
3579 
3580     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3581         return -TARGET_EFAULT;
3582     host_mb = g_try_malloc(msgsz + sizeof(long));
3583     if (!host_mb) {
3584         unlock_user_struct(target_mb, msgp, 0);
3585         return -TARGET_ENOMEM;
3586     }
3587     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3588     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3589     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3590     g_free(host_mb);
3591     unlock_user_struct(target_mb, msgp, 0);
3592 
3593     return ret;
3594 }
3595 
3596 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3597                                  ssize_t msgsz, abi_long msgtyp,
3598                                  int msgflg)
3599 {
3600     struct target_msgbuf *target_mb;
3601     char *target_mtext;
3602     struct msgbuf *host_mb;
3603     abi_long ret = 0;
3604 
3605     if (msgsz < 0) {
3606         return -TARGET_EINVAL;
3607     }
3608 
3609     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3610         return -TARGET_EFAULT;
3611 
3612     host_mb = g_try_malloc(msgsz + sizeof(long));
3613     if (!host_mb) {
3614         ret = -TARGET_ENOMEM;
3615         goto end;
3616     }
3617     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3618 
3619     if (ret > 0) {
3620         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3621         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3622         if (!target_mtext) {
3623             ret = -TARGET_EFAULT;
3624             goto end;
3625         }
3626         memcpy(target_mb->mtext, host_mb->mtext, ret);
3627         unlock_user(target_mtext, target_mtext_addr, ret);
3628     }
3629 
3630     target_mb->mtype = tswapal(host_mb->mtype);
3631 
3632 end:
3633     if (target_mb)
3634         unlock_user_struct(target_mb, msgp, 1);
3635     g_free(host_mb);
3636     return ret;
3637 }
3638 
3639 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3640                                                abi_ulong target_addr)
3641 {
3642     struct target_shmid_ds *target_sd;
3643 
3644     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3645         return -TARGET_EFAULT;
3646     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3647         return -TARGET_EFAULT;
3648     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3649     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3650     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3651     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3652     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3653     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3654     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3655     unlock_user_struct(target_sd, target_addr, 0);
3656     return 0;
3657 }
3658 
3659 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3660                                                struct shmid_ds *host_sd)
3661 {
3662     struct target_shmid_ds *target_sd;
3663 
3664     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3665         return -TARGET_EFAULT;
3666     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3667         return -TARGET_EFAULT;
3668     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3669     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3670     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3671     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3672     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3673     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3674     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3675     unlock_user_struct(target_sd, target_addr, 1);
3676     return 0;
3677 }
3678 
3679 struct  target_shminfo {
3680     abi_ulong shmmax;
3681     abi_ulong shmmin;
3682     abi_ulong shmmni;
3683     abi_ulong shmseg;
3684     abi_ulong shmall;
3685 };
3686 
3687 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3688                                               struct shminfo *host_shminfo)
3689 {
3690     struct target_shminfo *target_shminfo;
3691     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3692         return -TARGET_EFAULT;
3693     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3694     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3695     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3696     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3697     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3698     unlock_user_struct(target_shminfo, target_addr, 1);
3699     return 0;
3700 }
3701 
3702 struct target_shm_info {
3703     int used_ids;
3704     abi_ulong shm_tot;
3705     abi_ulong shm_rss;
3706     abi_ulong shm_swp;
3707     abi_ulong swap_attempts;
3708     abi_ulong swap_successes;
3709 };
3710 
3711 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3712                                                struct shm_info *host_shm_info)
3713 {
3714     struct target_shm_info *target_shm_info;
3715     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3716         return -TARGET_EFAULT;
3717     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3718     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3719     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3720     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3721     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3722     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3723     unlock_user_struct(target_shm_info, target_addr, 1);
3724     return 0;
3725 }
3726 
3727 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3728 {
3729     struct shmid_ds dsarg;
3730     struct shminfo shminfo;
3731     struct shm_info shm_info;
3732     abi_long ret = -TARGET_EINVAL;
3733 
3734     cmd &= 0xff;
3735 
3736     switch(cmd) {
3737     case IPC_STAT:
3738     case IPC_SET:
3739     case SHM_STAT:
3740         if (target_to_host_shmid_ds(&dsarg, buf))
3741             return -TARGET_EFAULT;
3742         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3743         if (host_to_target_shmid_ds(buf, &dsarg))
3744             return -TARGET_EFAULT;
3745         break;
3746     case IPC_INFO:
3747         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3748         if (host_to_target_shminfo(buf, &shminfo))
3749             return -TARGET_EFAULT;
3750         break;
3751     case SHM_INFO:
3752         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3753         if (host_to_target_shm_info(buf, &shm_info))
3754             return -TARGET_EFAULT;
3755         break;
3756     case IPC_RMID:
3757     case SHM_LOCK:
3758     case SHM_UNLOCK:
3759         ret = get_errno(shmctl(shmid, cmd, NULL));
3760         break;
3761     }
3762 
3763     return ret;
3764 }
3765 
3766 #ifndef TARGET_FORCE_SHMLBA
3767 /* For most architectures, SHMLBA is the same as the page size;
3768  * some architectures have larger values, in which case they should
3769  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3770  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3771  * and defining its own value for SHMLBA.
3772  *
3773  * The kernel also permits SHMLBA to be set by the architecture to a
3774  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3775  * this means that addresses are rounded to the large size if
3776  * SHM_RND is set but addresses not aligned to that size are not rejected
3777  * as long as they are at least page-aligned. Since the only architecture
3778  * which uses this is ia64 this code doesn't provide for that oddity.
3779  */
3780 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3781 {
3782     return TARGET_PAGE_SIZE;
3783 }
3784 #endif
3785 
3786 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3787                                  int shmid, abi_ulong shmaddr, int shmflg)
3788 {
3789     abi_long raddr;
3790     void *host_raddr;
3791     struct shmid_ds shm_info;
3792     int i,ret;
3793     abi_ulong shmlba;
3794 
3795     /* find out the length of the shared memory segment */
3796     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3797     if (is_error(ret)) {
3798         /* can't get length, bail out */
3799         return ret;
3800     }
3801 
3802     shmlba = target_shmlba(cpu_env);
3803 
3804     if (shmaddr & (shmlba - 1)) {
3805         if (shmflg & SHM_RND) {
3806             shmaddr &= ~(shmlba - 1);
3807         } else {
3808             return -TARGET_EINVAL;
3809         }
3810     }
3811     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3812         return -TARGET_EINVAL;
3813     }
3814 
3815     mmap_lock();
3816 
3817     if (shmaddr)
3818         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3819     else {
3820         abi_ulong mmap_start;
3821 
3822         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3823 
3824         if (mmap_start == -1) {
3825             errno = ENOMEM;
3826             host_raddr = (void *)-1;
3827         } else
3828             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3829     }
3830 
3831     if (host_raddr == (void *)-1) {
3832         mmap_unlock();
3833         return get_errno((long)host_raddr);
3834     }
3835     raddr=h2g((unsigned long)host_raddr);
3836 
3837     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3838                    PAGE_VALID | PAGE_READ |
3839                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3840 
3841     for (i = 0; i < N_SHM_REGIONS; i++) {
3842         if (!shm_regions[i].in_use) {
3843             shm_regions[i].in_use = true;
3844             shm_regions[i].start = raddr;
3845             shm_regions[i].size = shm_info.shm_segsz;
3846             break;
3847         }
3848     }
3849 
3850     mmap_unlock();
3851     return raddr;
3852 
3853 }
3854 
3855 static inline abi_long do_shmdt(abi_ulong shmaddr)
3856 {
3857     int i;
3858     abi_long rv;
3859 
3860     mmap_lock();
3861 
3862     for (i = 0; i < N_SHM_REGIONS; ++i) {
3863         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3864             shm_regions[i].in_use = false;
3865             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3866             break;
3867         }
3868     }
3869     rv = get_errno(shmdt(g2h(shmaddr)));
3870 
3871     mmap_unlock();
3872 
3873     return rv;
3874 }
3875 
3876 #ifdef TARGET_NR_ipc
3877 /* ??? This only works with linear mappings.  */
3878 /* do_ipc() must return target values and target errnos. */
3879 static abi_long do_ipc(CPUArchState *cpu_env,
3880                        unsigned int call, abi_long first,
3881                        abi_long second, abi_long third,
3882                        abi_long ptr, abi_long fifth)
3883 {
3884     int version;
3885     abi_long ret = 0;
3886 
3887     version = call >> 16;
3888     call &= 0xffff;
3889 
3890     switch (call) {
3891     case IPCOP_semop:
3892         ret = do_semop(first, ptr, second);
3893         break;
3894 
3895     case IPCOP_semget:
3896         ret = get_errno(semget(first, second, third));
3897         break;
3898 
3899     case IPCOP_semctl: {
3900         /* The semun argument to semctl is passed by value, so dereference the
3901          * ptr argument. */
3902         abi_ulong atptr;
3903         get_user_ual(atptr, ptr);
3904         ret = do_semctl(first, second, third, atptr);
3905         break;
3906     }
3907 
3908     case IPCOP_msgget:
3909         ret = get_errno(msgget(first, second));
3910         break;
3911 
3912     case IPCOP_msgsnd:
3913         ret = do_msgsnd(first, ptr, second, third);
3914         break;
3915 
3916     case IPCOP_msgctl:
3917         ret = do_msgctl(first, second, ptr);
3918         break;
3919 
3920     case IPCOP_msgrcv:
3921         switch (version) {
3922         case 0:
3923             {
3924                 struct target_ipc_kludge {
3925                     abi_long msgp;
3926                     abi_long msgtyp;
3927                 } *tmp;
3928 
3929                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3930                     ret = -TARGET_EFAULT;
3931                     break;
3932                 }
3933 
3934                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3935 
3936                 unlock_user_struct(tmp, ptr, 0);
3937                 break;
3938             }
3939         default:
3940             ret = do_msgrcv(first, ptr, second, fifth, third);
3941         }
3942         break;
3943 
3944     case IPCOP_shmat:
3945         switch (version) {
3946         default:
3947         {
3948             abi_ulong raddr;
3949             raddr = do_shmat(cpu_env, first, ptr, second);
3950             if (is_error(raddr))
3951                 return get_errno(raddr);
3952             if (put_user_ual(raddr, third))
3953                 return -TARGET_EFAULT;
3954             break;
3955         }
3956         case 1:
3957             ret = -TARGET_EINVAL;
3958             break;
3959         }
3960 	break;
3961     case IPCOP_shmdt:
3962         ret = do_shmdt(ptr);
3963 	break;
3964 
3965     case IPCOP_shmget:
3966 	/* IPC_* flag values are the same on all linux platforms */
3967 	ret = get_errno(shmget(first, second, third));
3968 	break;
3969 
3970 	/* IPC_* and SHM_* command values are the same on all linux platforms */
3971     case IPCOP_shmctl:
3972         ret = do_shmctl(first, second, ptr);
3973         break;
3974     default:
3975 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3976 	ret = -TARGET_ENOSYS;
3977 	break;
3978     }
3979     return ret;
3980 }
3981 #endif
3982 
3983 /* kernel structure types definitions */
3984 
3985 #define STRUCT(name, ...) STRUCT_ ## name,
3986 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3987 enum {
3988 #include "syscall_types.h"
3989 STRUCT_MAX
3990 };
3991 #undef STRUCT
3992 #undef STRUCT_SPECIAL
3993 
3994 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
3995 #define STRUCT_SPECIAL(name)
3996 #include "syscall_types.h"
3997 #undef STRUCT
3998 #undef STRUCT_SPECIAL
3999 
4000 typedef struct IOCTLEntry IOCTLEntry;
4001 
4002 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4003                              int fd, int cmd, abi_long arg);
4004 
4005 struct IOCTLEntry {
4006     int target_cmd;
4007     unsigned int host_cmd;
4008     const char *name;
4009     int access;
4010     do_ioctl_fn *do_ioctl;
4011     const argtype arg_type[5];
4012 };
4013 
4014 #define IOC_R 0x0001
4015 #define IOC_W 0x0002
4016 #define IOC_RW (IOC_R | IOC_W)
4017 
4018 #define MAX_STRUCT_SIZE 4096
4019 
4020 #ifdef CONFIG_FIEMAP
4021 /* So fiemap access checks don't overflow on 32 bit systems.
4022  * This is very slightly smaller than the limit imposed by
4023  * the underlying kernel.
4024  */
4025 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4026                             / sizeof(struct fiemap_extent))
4027 
4028 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4029                                        int fd, int cmd, abi_long arg)
4030 {
4031     /* The parameter for this ioctl is a struct fiemap followed
4032      * by an array of struct fiemap_extent whose size is set
4033      * in fiemap->fm_extent_count. The array is filled in by the
4034      * ioctl.
4035      */
4036     int target_size_in, target_size_out;
4037     struct fiemap *fm;
4038     const argtype *arg_type = ie->arg_type;
4039     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4040     void *argptr, *p;
4041     abi_long ret;
4042     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4043     uint32_t outbufsz;
4044     int free_fm = 0;
4045 
4046     assert(arg_type[0] == TYPE_PTR);
4047     assert(ie->access == IOC_RW);
4048     arg_type++;
4049     target_size_in = thunk_type_size(arg_type, 0);
4050     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4051     if (!argptr) {
4052         return -TARGET_EFAULT;
4053     }
4054     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4055     unlock_user(argptr, arg, 0);
4056     fm = (struct fiemap *)buf_temp;
4057     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4058         return -TARGET_EINVAL;
4059     }
4060 
4061     outbufsz = sizeof (*fm) +
4062         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4063 
4064     if (outbufsz > MAX_STRUCT_SIZE) {
4065         /* We can't fit all the extents into the fixed size buffer.
4066          * Allocate one that is large enough and use it instead.
4067          */
4068         fm = g_try_malloc(outbufsz);
4069         if (!fm) {
4070             return -TARGET_ENOMEM;
4071         }
4072         memcpy(fm, buf_temp, sizeof(struct fiemap));
4073         free_fm = 1;
4074     }
4075     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4076     if (!is_error(ret)) {
4077         target_size_out = target_size_in;
4078         /* An extent_count of 0 means we were only counting the extents
4079          * so there are no structs to copy
4080          */
4081         if (fm->fm_extent_count != 0) {
4082             target_size_out += fm->fm_mapped_extents * extent_size;
4083         }
4084         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4085         if (!argptr) {
4086             ret = -TARGET_EFAULT;
4087         } else {
4088             /* Convert the struct fiemap */
4089             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4090             if (fm->fm_extent_count != 0) {
4091                 p = argptr + target_size_in;
4092                 /* ...and then all the struct fiemap_extents */
4093                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4094                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4095                                   THUNK_TARGET);
4096                     p += extent_size;
4097                 }
4098             }
4099             unlock_user(argptr, arg, target_size_out);
4100         }
4101     }
4102     if (free_fm) {
4103         g_free(fm);
4104     }
4105     return ret;
4106 }
4107 #endif
4108 
4109 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4110                                 int fd, int cmd, abi_long arg)
4111 {
4112     const argtype *arg_type = ie->arg_type;
4113     int target_size;
4114     void *argptr;
4115     int ret;
4116     struct ifconf *host_ifconf;
4117     uint32_t outbufsz;
4118     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4119     int target_ifreq_size;
4120     int nb_ifreq;
4121     int free_buf = 0;
4122     int i;
4123     int target_ifc_len;
4124     abi_long target_ifc_buf;
4125     int host_ifc_len;
4126     char *host_ifc_buf;
4127 
4128     assert(arg_type[0] == TYPE_PTR);
4129     assert(ie->access == IOC_RW);
4130 
4131     arg_type++;
4132     target_size = thunk_type_size(arg_type, 0);
4133 
4134     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4135     if (!argptr)
4136         return -TARGET_EFAULT;
4137     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4138     unlock_user(argptr, arg, 0);
4139 
4140     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4141     target_ifc_len = host_ifconf->ifc_len;
4142     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4143 
4144     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4145     nb_ifreq = target_ifc_len / target_ifreq_size;
4146     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4147 
4148     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4149     if (outbufsz > MAX_STRUCT_SIZE) {
4150         /* We can't fit all the extents into the fixed size buffer.
4151          * Allocate one that is large enough and use it instead.
4152          */
4153         host_ifconf = malloc(outbufsz);
4154         if (!host_ifconf) {
4155             return -TARGET_ENOMEM;
4156         }
4157         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4158         free_buf = 1;
4159     }
4160     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4161 
4162     host_ifconf->ifc_len = host_ifc_len;
4163     host_ifconf->ifc_buf = host_ifc_buf;
4164 
4165     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4166     if (!is_error(ret)) {
4167 	/* convert host ifc_len to target ifc_len */
4168 
4169         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4170         target_ifc_len = nb_ifreq * target_ifreq_size;
4171         host_ifconf->ifc_len = target_ifc_len;
4172 
4173 	/* restore target ifc_buf */
4174 
4175         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4176 
4177 	/* copy struct ifconf to target user */
4178 
4179         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4180         if (!argptr)
4181             return -TARGET_EFAULT;
4182         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4183         unlock_user(argptr, arg, target_size);
4184 
4185 	/* copy ifreq[] to target user */
4186 
4187         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4188         for (i = 0; i < nb_ifreq ; i++) {
4189             thunk_convert(argptr + i * target_ifreq_size,
4190                           host_ifc_buf + i * sizeof(struct ifreq),
4191                           ifreq_arg_type, THUNK_TARGET);
4192         }
4193         unlock_user(argptr, target_ifc_buf, target_ifc_len);
4194     }
4195 
4196     if (free_buf) {
4197         free(host_ifconf);
4198     }
4199 
4200     return ret;
4201 }
4202 
4203 #if defined(CONFIG_USBFS)
4204 #if HOST_LONG_BITS > 64
4205 #error USBDEVFS thunks do not support >64 bit hosts yet.
4206 #endif
4207 struct live_urb {
4208     uint64_t target_urb_adr;
4209     uint64_t target_buf_adr;
4210     char *target_buf_ptr;
4211     struct usbdevfs_urb host_urb;
4212 };
4213 
4214 static GHashTable *usbdevfs_urb_hashtable(void)
4215 {
4216     static GHashTable *urb_hashtable;
4217 
4218     if (!urb_hashtable) {
4219         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4220     }
4221     return urb_hashtable;
4222 }
4223 
4224 static void urb_hashtable_insert(struct live_urb *urb)
4225 {
4226     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4227     g_hash_table_insert(urb_hashtable, urb, urb);
4228 }
4229 
4230 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4231 {
4232     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4233     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4234 }
4235 
4236 static void urb_hashtable_remove(struct live_urb *urb)
4237 {
4238     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4239     g_hash_table_remove(urb_hashtable, urb);
4240 }
4241 
4242 static abi_long
4243 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4244                           int fd, int cmd, abi_long arg)
4245 {
4246     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4247     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4248     struct live_urb *lurb;
4249     void *argptr;
4250     uint64_t hurb;
4251     int target_size;
4252     uintptr_t target_urb_adr;
4253     abi_long ret;
4254 
4255     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4256 
4257     memset(buf_temp, 0, sizeof(uint64_t));
4258     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4259     if (is_error(ret)) {
4260         return ret;
4261     }
4262 
4263     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4264     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4265     if (!lurb->target_urb_adr) {
4266         return -TARGET_EFAULT;
4267     }
4268     urb_hashtable_remove(lurb);
4269     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4270         lurb->host_urb.buffer_length);
4271     lurb->target_buf_ptr = NULL;
4272 
4273     /* restore the guest buffer pointer */
4274     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4275 
4276     /* update the guest urb struct */
4277     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4278     if (!argptr) {
4279         g_free(lurb);
4280         return -TARGET_EFAULT;
4281     }
4282     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4283     unlock_user(argptr, lurb->target_urb_adr, target_size);
4284 
4285     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4286     /* write back the urb handle */
4287     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4288     if (!argptr) {
4289         g_free(lurb);
4290         return -TARGET_EFAULT;
4291     }
4292 
4293     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4294     target_urb_adr = lurb->target_urb_adr;
4295     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4296     unlock_user(argptr, arg, target_size);
4297 
4298     g_free(lurb);
4299     return ret;
4300 }
4301 
4302 static abi_long
4303 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4304                              uint8_t *buf_temp __attribute__((unused)),
4305                              int fd, int cmd, abi_long arg)
4306 {
4307     struct live_urb *lurb;
4308 
4309     /* map target address back to host URB with metadata. */
4310     lurb = urb_hashtable_lookup(arg);
4311     if (!lurb) {
4312         return -TARGET_EFAULT;
4313     }
4314     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4315 }
4316 
4317 static abi_long
4318 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4319                             int fd, int cmd, abi_long arg)
4320 {
4321     const argtype *arg_type = ie->arg_type;
4322     int target_size;
4323     abi_long ret;
4324     void *argptr;
4325     int rw_dir;
4326     struct live_urb *lurb;
4327 
4328     /*
4329      * each submitted URB needs to map to a unique ID for the
4330      * kernel, and that unique ID needs to be a pointer to
4331      * host memory.  hence, we need to malloc for each URB.
4332      * isochronous transfers have a variable length struct.
4333      */
4334     arg_type++;
4335     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4336 
4337     /* construct host copy of urb and metadata */
4338     lurb = g_try_malloc0(sizeof(struct live_urb));
4339     if (!lurb) {
4340         return -TARGET_ENOMEM;
4341     }
4342 
4343     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4344     if (!argptr) {
4345         g_free(lurb);
4346         return -TARGET_EFAULT;
4347     }
4348     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4349     unlock_user(argptr, arg, 0);
4350 
4351     lurb->target_urb_adr = arg;
4352     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4353 
4354     /* buffer space used depends on endpoint type so lock the entire buffer */
4355     /* control type urbs should check the buffer contents for true direction */
4356     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4357     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4358         lurb->host_urb.buffer_length, 1);
4359     if (lurb->target_buf_ptr == NULL) {
4360         g_free(lurb);
4361         return -TARGET_EFAULT;
4362     }
4363 
4364     /* update buffer pointer in host copy */
4365     lurb->host_urb.buffer = lurb->target_buf_ptr;
4366 
4367     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4368     if (is_error(ret)) {
4369         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4370         g_free(lurb);
4371     } else {
4372         urb_hashtable_insert(lurb);
4373     }
4374 
4375     return ret;
4376 }
4377 #endif /* CONFIG_USBFS */
4378 
4379 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4380                             int cmd, abi_long arg)
4381 {
4382     void *argptr;
4383     struct dm_ioctl *host_dm;
4384     abi_long guest_data;
4385     uint32_t guest_data_size;
4386     int target_size;
4387     const argtype *arg_type = ie->arg_type;
4388     abi_long ret;
4389     void *big_buf = NULL;
4390     char *host_data;
4391 
4392     arg_type++;
4393     target_size = thunk_type_size(arg_type, 0);
4394     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4395     if (!argptr) {
4396         ret = -TARGET_EFAULT;
4397         goto out;
4398     }
4399     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4400     unlock_user(argptr, arg, 0);
4401 
4402     /* buf_temp is too small, so fetch things into a bigger buffer */
4403     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4404     memcpy(big_buf, buf_temp, target_size);
4405     buf_temp = big_buf;
4406     host_dm = big_buf;
4407 
4408     guest_data = arg + host_dm->data_start;
4409     if ((guest_data - arg) < 0) {
4410         ret = -TARGET_EINVAL;
4411         goto out;
4412     }
4413     guest_data_size = host_dm->data_size - host_dm->data_start;
4414     host_data = (char*)host_dm + host_dm->data_start;
4415 
4416     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4417     if (!argptr) {
4418         ret = -TARGET_EFAULT;
4419         goto out;
4420     }
4421 
4422     switch (ie->host_cmd) {
4423     case DM_REMOVE_ALL:
4424     case DM_LIST_DEVICES:
4425     case DM_DEV_CREATE:
4426     case DM_DEV_REMOVE:
4427     case DM_DEV_SUSPEND:
4428     case DM_DEV_STATUS:
4429     case DM_DEV_WAIT:
4430     case DM_TABLE_STATUS:
4431     case DM_TABLE_CLEAR:
4432     case DM_TABLE_DEPS:
4433     case DM_LIST_VERSIONS:
4434         /* no input data */
4435         break;
4436     case DM_DEV_RENAME:
4437     case DM_DEV_SET_GEOMETRY:
4438         /* data contains only strings */
4439         memcpy(host_data, argptr, guest_data_size);
4440         break;
4441     case DM_TARGET_MSG:
4442         memcpy(host_data, argptr, guest_data_size);
4443         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4444         break;
4445     case DM_TABLE_LOAD:
4446     {
4447         void *gspec = argptr;
4448         void *cur_data = host_data;
4449         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4450         int spec_size = thunk_type_size(arg_type, 0);
4451         int i;
4452 
4453         for (i = 0; i < host_dm->target_count; i++) {
4454             struct dm_target_spec *spec = cur_data;
4455             uint32_t next;
4456             int slen;
4457 
4458             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4459             slen = strlen((char*)gspec + spec_size) + 1;
4460             next = spec->next;
4461             spec->next = sizeof(*spec) + slen;
4462             strcpy((char*)&spec[1], gspec + spec_size);
4463             gspec += next;
4464             cur_data += spec->next;
4465         }
4466         break;
4467     }
4468     default:
4469         ret = -TARGET_EINVAL;
4470         unlock_user(argptr, guest_data, 0);
4471         goto out;
4472     }
4473     unlock_user(argptr, guest_data, 0);
4474 
4475     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4476     if (!is_error(ret)) {
4477         guest_data = arg + host_dm->data_start;
4478         guest_data_size = host_dm->data_size - host_dm->data_start;
4479         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4480         switch (ie->host_cmd) {
4481         case DM_REMOVE_ALL:
4482         case DM_DEV_CREATE:
4483         case DM_DEV_REMOVE:
4484         case DM_DEV_RENAME:
4485         case DM_DEV_SUSPEND:
4486         case DM_DEV_STATUS:
4487         case DM_TABLE_LOAD:
4488         case DM_TABLE_CLEAR:
4489         case DM_TARGET_MSG:
4490         case DM_DEV_SET_GEOMETRY:
4491             /* no return data */
4492             break;
4493         case DM_LIST_DEVICES:
4494         {
4495             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4496             uint32_t remaining_data = guest_data_size;
4497             void *cur_data = argptr;
4498             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4499             int nl_size = 12; /* can't use thunk_size due to alignment */
4500 
4501             while (1) {
4502                 uint32_t next = nl->next;
4503                 if (next) {
4504                     nl->next = nl_size + (strlen(nl->name) + 1);
4505                 }
4506                 if (remaining_data < nl->next) {
4507                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4508                     break;
4509                 }
4510                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4511                 strcpy(cur_data + nl_size, nl->name);
4512                 cur_data += nl->next;
4513                 remaining_data -= nl->next;
4514                 if (!next) {
4515                     break;
4516                 }
4517                 nl = (void*)nl + next;
4518             }
4519             break;
4520         }
4521         case DM_DEV_WAIT:
4522         case DM_TABLE_STATUS:
4523         {
4524             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4525             void *cur_data = argptr;
4526             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4527             int spec_size = thunk_type_size(arg_type, 0);
4528             int i;
4529 
4530             for (i = 0; i < host_dm->target_count; i++) {
4531                 uint32_t next = spec->next;
4532                 int slen = strlen((char*)&spec[1]) + 1;
4533                 spec->next = (cur_data - argptr) + spec_size + slen;
4534                 if (guest_data_size < spec->next) {
4535                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4536                     break;
4537                 }
4538                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4539                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4540                 cur_data = argptr + spec->next;
4541                 spec = (void*)host_dm + host_dm->data_start + next;
4542             }
4543             break;
4544         }
4545         case DM_TABLE_DEPS:
4546         {
4547             void *hdata = (void*)host_dm + host_dm->data_start;
4548             int count = *(uint32_t*)hdata;
4549             uint64_t *hdev = hdata + 8;
4550             uint64_t *gdev = argptr + 8;
4551             int i;
4552 
4553             *(uint32_t*)argptr = tswap32(count);
4554             for (i = 0; i < count; i++) {
4555                 *gdev = tswap64(*hdev);
4556                 gdev++;
4557                 hdev++;
4558             }
4559             break;
4560         }
4561         case DM_LIST_VERSIONS:
4562         {
4563             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4564             uint32_t remaining_data = guest_data_size;
4565             void *cur_data = argptr;
4566             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4567             int vers_size = thunk_type_size(arg_type, 0);
4568 
4569             while (1) {
4570                 uint32_t next = vers->next;
4571                 if (next) {
4572                     vers->next = vers_size + (strlen(vers->name) + 1);
4573                 }
4574                 if (remaining_data < vers->next) {
4575                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4576                     break;
4577                 }
4578                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4579                 strcpy(cur_data + vers_size, vers->name);
4580                 cur_data += vers->next;
4581                 remaining_data -= vers->next;
4582                 if (!next) {
4583                     break;
4584                 }
4585                 vers = (void*)vers + next;
4586             }
4587             break;
4588         }
4589         default:
4590             unlock_user(argptr, guest_data, 0);
4591             ret = -TARGET_EINVAL;
4592             goto out;
4593         }
4594         unlock_user(argptr, guest_data, guest_data_size);
4595 
4596         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4597         if (!argptr) {
4598             ret = -TARGET_EFAULT;
4599             goto out;
4600         }
4601         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4602         unlock_user(argptr, arg, target_size);
4603     }
4604 out:
4605     g_free(big_buf);
4606     return ret;
4607 }
4608 
4609 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4610                                int cmd, abi_long arg)
4611 {
4612     void *argptr;
4613     int target_size;
4614     const argtype *arg_type = ie->arg_type;
4615     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4616     abi_long ret;
4617 
4618     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4619     struct blkpg_partition host_part;
4620 
4621     /* Read and convert blkpg */
4622     arg_type++;
4623     target_size = thunk_type_size(arg_type, 0);
4624     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4625     if (!argptr) {
4626         ret = -TARGET_EFAULT;
4627         goto out;
4628     }
4629     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4630     unlock_user(argptr, arg, 0);
4631 
4632     switch (host_blkpg->op) {
4633     case BLKPG_ADD_PARTITION:
4634     case BLKPG_DEL_PARTITION:
4635         /* payload is struct blkpg_partition */
4636         break;
4637     default:
4638         /* Unknown opcode */
4639         ret = -TARGET_EINVAL;
4640         goto out;
4641     }
4642 
4643     /* Read and convert blkpg->data */
4644     arg = (abi_long)(uintptr_t)host_blkpg->data;
4645     target_size = thunk_type_size(part_arg_type, 0);
4646     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4647     if (!argptr) {
4648         ret = -TARGET_EFAULT;
4649         goto out;
4650     }
4651     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4652     unlock_user(argptr, arg, 0);
4653 
4654     /* Swizzle the data pointer to our local copy and call! */
4655     host_blkpg->data = &host_part;
4656     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4657 
4658 out:
4659     return ret;
4660 }
4661 
4662 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4663                                 int fd, int cmd, abi_long arg)
4664 {
4665     const argtype *arg_type = ie->arg_type;
4666     const StructEntry *se;
4667     const argtype *field_types;
4668     const int *dst_offsets, *src_offsets;
4669     int target_size;
4670     void *argptr;
4671     abi_ulong *target_rt_dev_ptr;
4672     unsigned long *host_rt_dev_ptr;
4673     abi_long ret;
4674     int i;
4675 
4676     assert(ie->access == IOC_W);
4677     assert(*arg_type == TYPE_PTR);
4678     arg_type++;
4679     assert(*arg_type == TYPE_STRUCT);
4680     target_size = thunk_type_size(arg_type, 0);
4681     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4682     if (!argptr) {
4683         return -TARGET_EFAULT;
4684     }
4685     arg_type++;
4686     assert(*arg_type == (int)STRUCT_rtentry);
4687     se = struct_entries + *arg_type++;
4688     assert(se->convert[0] == NULL);
4689     /* convert struct here to be able to catch rt_dev string */
4690     field_types = se->field_types;
4691     dst_offsets = se->field_offsets[THUNK_HOST];
4692     src_offsets = se->field_offsets[THUNK_TARGET];
4693     for (i = 0; i < se->nb_fields; i++) {
4694         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4695             assert(*field_types == TYPE_PTRVOID);
4696             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4697             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4698             if (*target_rt_dev_ptr != 0) {
4699                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4700                                                   tswapal(*target_rt_dev_ptr));
4701                 if (!*host_rt_dev_ptr) {
4702                     unlock_user(argptr, arg, 0);
4703                     return -TARGET_EFAULT;
4704                 }
4705             } else {
4706                 *host_rt_dev_ptr = 0;
4707             }
4708             field_types++;
4709             continue;
4710         }
4711         field_types = thunk_convert(buf_temp + dst_offsets[i],
4712                                     argptr + src_offsets[i],
4713                                     field_types, THUNK_HOST);
4714     }
4715     unlock_user(argptr, arg, 0);
4716 
4717     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4718     if (*host_rt_dev_ptr != 0) {
4719         unlock_user((void *)*host_rt_dev_ptr,
4720                     *target_rt_dev_ptr, 0);
4721     }
4722     return ret;
4723 }
4724 
4725 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4726                                      int fd, int cmd, abi_long arg)
4727 {
4728     int sig = target_to_host_signal(arg);
4729     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4730 }
4731 
4732 #ifdef TIOCGPTPEER
4733 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4734                                      int fd, int cmd, abi_long arg)
4735 {
4736     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4737     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4738 }
4739 #endif
4740 
4741 static IOCTLEntry ioctl_entries[] = {
4742 #define IOCTL(cmd, access, ...) \
4743     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4744 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4745     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4746 #define IOCTL_IGNORE(cmd) \
4747     { TARGET_ ## cmd, 0, #cmd },
4748 #include "ioctls.h"
4749     { 0, 0, },
4750 };
4751 
4752 /* ??? Implement proper locking for ioctls.  */
4753 /* do_ioctl() Must return target values and target errnos. */
4754 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4755 {
4756     const IOCTLEntry *ie;
4757     const argtype *arg_type;
4758     abi_long ret;
4759     uint8_t buf_temp[MAX_STRUCT_SIZE];
4760     int target_size;
4761     void *argptr;
4762 
4763     ie = ioctl_entries;
4764     for(;;) {
4765         if (ie->target_cmd == 0) {
4766             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4767             return -TARGET_ENOSYS;
4768         }
4769         if (ie->target_cmd == cmd)
4770             break;
4771         ie++;
4772     }
4773     arg_type = ie->arg_type;
4774     if (ie->do_ioctl) {
4775         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4776     } else if (!ie->host_cmd) {
4777         /* Some architectures define BSD ioctls in their headers
4778            that are not implemented in Linux.  */
4779         return -TARGET_ENOSYS;
4780     }
4781 
4782     switch(arg_type[0]) {
4783     case TYPE_NULL:
4784         /* no argument */
4785         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4786         break;
4787     case TYPE_PTRVOID:
4788     case TYPE_INT:
4789         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4790         break;
4791     case TYPE_PTR:
4792         arg_type++;
4793         target_size = thunk_type_size(arg_type, 0);
4794         switch(ie->access) {
4795         case IOC_R:
4796             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4797             if (!is_error(ret)) {
4798                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4799                 if (!argptr)
4800                     return -TARGET_EFAULT;
4801                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4802                 unlock_user(argptr, arg, target_size);
4803             }
4804             break;
4805         case IOC_W:
4806             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4807             if (!argptr)
4808                 return -TARGET_EFAULT;
4809             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4810             unlock_user(argptr, arg, 0);
4811             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4812             break;
4813         default:
4814         case IOC_RW:
4815             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4816             if (!argptr)
4817                 return -TARGET_EFAULT;
4818             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4819             unlock_user(argptr, arg, 0);
4820             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4821             if (!is_error(ret)) {
4822                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4823                 if (!argptr)
4824                     return -TARGET_EFAULT;
4825                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4826                 unlock_user(argptr, arg, target_size);
4827             }
4828             break;
4829         }
4830         break;
4831     default:
4832         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4833                  (long)cmd, arg_type[0]);
4834         ret = -TARGET_ENOSYS;
4835         break;
4836     }
4837     return ret;
4838 }
4839 
4840 static const bitmask_transtbl iflag_tbl[] = {
4841         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4842         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4843         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4844         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4845         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4846         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4847         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4848         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4849         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4850         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4851         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4852         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4853         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4854         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4855         { 0, 0, 0, 0 }
4856 };
4857 
4858 static const bitmask_transtbl oflag_tbl[] = {
4859 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4860 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4861 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4862 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4863 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4864 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4865 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4866 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4867 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4868 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4869 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4870 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4871 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4872 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4873 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4874 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4875 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4876 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4877 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4878 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4879 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4880 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4881 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4882 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4883 	{ 0, 0, 0, 0 }
4884 };
4885 
4886 static const bitmask_transtbl cflag_tbl[] = {
4887 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4888 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4889 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4890 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4891 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4892 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4893 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4894 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4895 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4896 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4897 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4898 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4899 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4900 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4901 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4902 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4903 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4904 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4905 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4906 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4907 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4908 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4909 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4910 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4911 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4912 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4913 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4914 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4915 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4916 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4917 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4918 	{ 0, 0, 0, 0 }
4919 };
4920 
4921 static const bitmask_transtbl lflag_tbl[] = {
4922 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4923 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4924 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4925 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4926 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4927 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4928 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4929 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4930 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4931 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4932 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4933 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4934 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4935 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4936 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4937 	{ 0, 0, 0, 0 }
4938 };
4939 
4940 static void target_to_host_termios (void *dst, const void *src)
4941 {
4942     struct host_termios *host = dst;
4943     const struct target_termios *target = src;
4944 
4945     host->c_iflag =
4946         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4947     host->c_oflag =
4948         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4949     host->c_cflag =
4950         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4951     host->c_lflag =
4952         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4953     host->c_line = target->c_line;
4954 
4955     memset(host->c_cc, 0, sizeof(host->c_cc));
4956     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4957     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4958     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4959     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4960     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4961     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4962     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4963     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4964     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4965     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4966     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4967     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4968     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4969     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4970     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4971     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4972     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4973 }
4974 
4975 static void host_to_target_termios (void *dst, const void *src)
4976 {
4977     struct target_termios *target = dst;
4978     const struct host_termios *host = src;
4979 
4980     target->c_iflag =
4981         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4982     target->c_oflag =
4983         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4984     target->c_cflag =
4985         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4986     target->c_lflag =
4987         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4988     target->c_line = host->c_line;
4989 
4990     memset(target->c_cc, 0, sizeof(target->c_cc));
4991     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4992     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4993     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4994     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4995     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4996     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4997     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4998     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4999     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5000     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5001     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5002     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5003     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5004     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5005     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5006     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5007     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5008 }
5009 
5010 static const StructEntry struct_termios_def = {
5011     .convert = { host_to_target_termios, target_to_host_termios },
5012     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5013     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5014 };
5015 
5016 static bitmask_transtbl mmap_flags_tbl[] = {
5017     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5018     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5019     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5020     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5021       MAP_ANONYMOUS, MAP_ANONYMOUS },
5022     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5023       MAP_GROWSDOWN, MAP_GROWSDOWN },
5024     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5025       MAP_DENYWRITE, MAP_DENYWRITE },
5026     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5027       MAP_EXECUTABLE, MAP_EXECUTABLE },
5028     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5029     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5030       MAP_NORESERVE, MAP_NORESERVE },
5031     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5032     /* MAP_STACK had been ignored by the kernel for quite some time.
5033        Recognize it for the target insofar as we do not want to pass
5034        it through to the host.  */
5035     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5036     { 0, 0, 0, 0 }
5037 };
5038 
5039 #if defined(TARGET_I386)
5040 
5041 /* NOTE: there is really one LDT for all the threads */
5042 static uint8_t *ldt_table;
5043 
5044 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5045 {
5046     int size;
5047     void *p;
5048 
5049     if (!ldt_table)
5050         return 0;
5051     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5052     if (size > bytecount)
5053         size = bytecount;
5054     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5055     if (!p)
5056         return -TARGET_EFAULT;
5057     /* ??? Should this by byteswapped?  */
5058     memcpy(p, ldt_table, size);
5059     unlock_user(p, ptr, size);
5060     return size;
5061 }
5062 
5063 /* XXX: add locking support */
5064 static abi_long write_ldt(CPUX86State *env,
5065                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5066 {
5067     struct target_modify_ldt_ldt_s ldt_info;
5068     struct target_modify_ldt_ldt_s *target_ldt_info;
5069     int seg_32bit, contents, read_exec_only, limit_in_pages;
5070     int seg_not_present, useable, lm;
5071     uint32_t *lp, entry_1, entry_2;
5072 
5073     if (bytecount != sizeof(ldt_info))
5074         return -TARGET_EINVAL;
5075     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5076         return -TARGET_EFAULT;
5077     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5078     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5079     ldt_info.limit = tswap32(target_ldt_info->limit);
5080     ldt_info.flags = tswap32(target_ldt_info->flags);
5081     unlock_user_struct(target_ldt_info, ptr, 0);
5082 
5083     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5084         return -TARGET_EINVAL;
5085     seg_32bit = ldt_info.flags & 1;
5086     contents = (ldt_info.flags >> 1) & 3;
5087     read_exec_only = (ldt_info.flags >> 3) & 1;
5088     limit_in_pages = (ldt_info.flags >> 4) & 1;
5089     seg_not_present = (ldt_info.flags >> 5) & 1;
5090     useable = (ldt_info.flags >> 6) & 1;
5091 #ifdef TARGET_ABI32
5092     lm = 0;
5093 #else
5094     lm = (ldt_info.flags >> 7) & 1;
5095 #endif
5096     if (contents == 3) {
5097         if (oldmode)
5098             return -TARGET_EINVAL;
5099         if (seg_not_present == 0)
5100             return -TARGET_EINVAL;
5101     }
5102     /* allocate the LDT */
5103     if (!ldt_table) {
5104         env->ldt.base = target_mmap(0,
5105                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5106                                     PROT_READ|PROT_WRITE,
5107                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5108         if (env->ldt.base == -1)
5109             return -TARGET_ENOMEM;
5110         memset(g2h(env->ldt.base), 0,
5111                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5112         env->ldt.limit = 0xffff;
5113         ldt_table = g2h(env->ldt.base);
5114     }
5115 
5116     /* NOTE: same code as Linux kernel */
5117     /* Allow LDTs to be cleared by the user. */
5118     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5119         if (oldmode ||
5120             (contents == 0		&&
5121              read_exec_only == 1	&&
5122              seg_32bit == 0		&&
5123              limit_in_pages == 0	&&
5124              seg_not_present == 1	&&
5125              useable == 0 )) {
5126             entry_1 = 0;
5127             entry_2 = 0;
5128             goto install;
5129         }
5130     }
5131 
5132     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5133         (ldt_info.limit & 0x0ffff);
5134     entry_2 = (ldt_info.base_addr & 0xff000000) |
5135         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5136         (ldt_info.limit & 0xf0000) |
5137         ((read_exec_only ^ 1) << 9) |
5138         (contents << 10) |
5139         ((seg_not_present ^ 1) << 15) |
5140         (seg_32bit << 22) |
5141         (limit_in_pages << 23) |
5142         (lm << 21) |
5143         0x7000;
5144     if (!oldmode)
5145         entry_2 |= (useable << 20);
5146 
5147     /* Install the new entry ...  */
5148 install:
5149     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5150     lp[0] = tswap32(entry_1);
5151     lp[1] = tswap32(entry_2);
5152     return 0;
5153 }
5154 
5155 /* specific and weird i386 syscalls */
5156 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5157                               unsigned long bytecount)
5158 {
5159     abi_long ret;
5160 
5161     switch (func) {
5162     case 0:
5163         ret = read_ldt(ptr, bytecount);
5164         break;
5165     case 1:
5166         ret = write_ldt(env, ptr, bytecount, 1);
5167         break;
5168     case 0x11:
5169         ret = write_ldt(env, ptr, bytecount, 0);
5170         break;
5171     default:
5172         ret = -TARGET_ENOSYS;
5173         break;
5174     }
5175     return ret;
5176 }
5177 
5178 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5179 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5180 {
5181     uint64_t *gdt_table = g2h(env->gdt.base);
5182     struct target_modify_ldt_ldt_s ldt_info;
5183     struct target_modify_ldt_ldt_s *target_ldt_info;
5184     int seg_32bit, contents, read_exec_only, limit_in_pages;
5185     int seg_not_present, useable, lm;
5186     uint32_t *lp, entry_1, entry_2;
5187     int i;
5188 
5189     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5190     if (!target_ldt_info)
5191         return -TARGET_EFAULT;
5192     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5193     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5194     ldt_info.limit = tswap32(target_ldt_info->limit);
5195     ldt_info.flags = tswap32(target_ldt_info->flags);
5196     if (ldt_info.entry_number == -1) {
5197         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5198             if (gdt_table[i] == 0) {
5199                 ldt_info.entry_number = i;
5200                 target_ldt_info->entry_number = tswap32(i);
5201                 break;
5202             }
5203         }
5204     }
5205     unlock_user_struct(target_ldt_info, ptr, 1);
5206 
5207     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5208         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5209            return -TARGET_EINVAL;
5210     seg_32bit = ldt_info.flags & 1;
5211     contents = (ldt_info.flags >> 1) & 3;
5212     read_exec_only = (ldt_info.flags >> 3) & 1;
5213     limit_in_pages = (ldt_info.flags >> 4) & 1;
5214     seg_not_present = (ldt_info.flags >> 5) & 1;
5215     useable = (ldt_info.flags >> 6) & 1;
5216 #ifdef TARGET_ABI32
5217     lm = 0;
5218 #else
5219     lm = (ldt_info.flags >> 7) & 1;
5220 #endif
5221 
5222     if (contents == 3) {
5223         if (seg_not_present == 0)
5224             return -TARGET_EINVAL;
5225     }
5226 
5227     /* NOTE: same code as Linux kernel */
5228     /* Allow LDTs to be cleared by the user. */
5229     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5230         if ((contents == 0             &&
5231              read_exec_only == 1       &&
5232              seg_32bit == 0            &&
5233              limit_in_pages == 0       &&
5234              seg_not_present == 1      &&
5235              useable == 0 )) {
5236             entry_1 = 0;
5237             entry_2 = 0;
5238             goto install;
5239         }
5240     }
5241 
5242     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5243         (ldt_info.limit & 0x0ffff);
5244     entry_2 = (ldt_info.base_addr & 0xff000000) |
5245         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5246         (ldt_info.limit & 0xf0000) |
5247         ((read_exec_only ^ 1) << 9) |
5248         (contents << 10) |
5249         ((seg_not_present ^ 1) << 15) |
5250         (seg_32bit << 22) |
5251         (limit_in_pages << 23) |
5252         (useable << 20) |
5253         (lm << 21) |
5254         0x7000;
5255 
5256     /* Install the new entry ...  */
5257 install:
5258     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5259     lp[0] = tswap32(entry_1);
5260     lp[1] = tswap32(entry_2);
5261     return 0;
5262 }
5263 
5264 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5265 {
5266     struct target_modify_ldt_ldt_s *target_ldt_info;
5267     uint64_t *gdt_table = g2h(env->gdt.base);
5268     uint32_t base_addr, limit, flags;
5269     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5270     int seg_not_present, useable, lm;
5271     uint32_t *lp, entry_1, entry_2;
5272 
5273     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5274     if (!target_ldt_info)
5275         return -TARGET_EFAULT;
5276     idx = tswap32(target_ldt_info->entry_number);
5277     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5278         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5279         unlock_user_struct(target_ldt_info, ptr, 1);
5280         return -TARGET_EINVAL;
5281     }
5282     lp = (uint32_t *)(gdt_table + idx);
5283     entry_1 = tswap32(lp[0]);
5284     entry_2 = tswap32(lp[1]);
5285 
5286     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5287     contents = (entry_2 >> 10) & 3;
5288     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5289     seg_32bit = (entry_2 >> 22) & 1;
5290     limit_in_pages = (entry_2 >> 23) & 1;
5291     useable = (entry_2 >> 20) & 1;
5292 #ifdef TARGET_ABI32
5293     lm = 0;
5294 #else
5295     lm = (entry_2 >> 21) & 1;
5296 #endif
5297     flags = (seg_32bit << 0) | (contents << 1) |
5298         (read_exec_only << 3) | (limit_in_pages << 4) |
5299         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5300     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5301     base_addr = (entry_1 >> 16) |
5302         (entry_2 & 0xff000000) |
5303         ((entry_2 & 0xff) << 16);
5304     target_ldt_info->base_addr = tswapal(base_addr);
5305     target_ldt_info->limit = tswap32(limit);
5306     target_ldt_info->flags = tswap32(flags);
5307     unlock_user_struct(target_ldt_info, ptr, 1);
5308     return 0;
5309 }
5310 #endif /* TARGET_I386 && TARGET_ABI32 */
5311 
5312 #ifndef TARGET_ABI32
5313 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5314 {
5315     abi_long ret = 0;
5316     abi_ulong val;
5317     int idx;
5318 
5319     switch(code) {
5320     case TARGET_ARCH_SET_GS:
5321     case TARGET_ARCH_SET_FS:
5322         if (code == TARGET_ARCH_SET_GS)
5323             idx = R_GS;
5324         else
5325             idx = R_FS;
5326         cpu_x86_load_seg(env, idx, 0);
5327         env->segs[idx].base = addr;
5328         break;
5329     case TARGET_ARCH_GET_GS:
5330     case TARGET_ARCH_GET_FS:
5331         if (code == TARGET_ARCH_GET_GS)
5332             idx = R_GS;
5333         else
5334             idx = R_FS;
5335         val = env->segs[idx].base;
5336         if (put_user(val, addr, abi_ulong))
5337             ret = -TARGET_EFAULT;
5338         break;
5339     default:
5340         ret = -TARGET_EINVAL;
5341         break;
5342     }
5343     return ret;
5344 }
5345 #endif
5346 
5347 #endif /* defined(TARGET_I386) */
5348 
5349 #define NEW_STACK_SIZE 0x40000
5350 
5351 
5352 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5353 typedef struct {
5354     CPUArchState *env;
5355     pthread_mutex_t mutex;
5356     pthread_cond_t cond;
5357     pthread_t thread;
5358     uint32_t tid;
5359     abi_ulong child_tidptr;
5360     abi_ulong parent_tidptr;
5361     sigset_t sigmask;
5362 } new_thread_info;
5363 
5364 static void *clone_func(void *arg)
5365 {
5366     new_thread_info *info = arg;
5367     CPUArchState *env;
5368     CPUState *cpu;
5369     TaskState *ts;
5370 
5371     rcu_register_thread();
5372     tcg_register_thread();
5373     env = info->env;
5374     cpu = ENV_GET_CPU(env);
5375     thread_cpu = cpu;
5376     ts = (TaskState *)cpu->opaque;
5377     info->tid = gettid();
5378     task_settid(ts);
5379     if (info->child_tidptr)
5380         put_user_u32(info->tid, info->child_tidptr);
5381     if (info->parent_tidptr)
5382         put_user_u32(info->tid, info->parent_tidptr);
5383     /* Enable signals.  */
5384     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5385     /* Signal to the parent that we're ready.  */
5386     pthread_mutex_lock(&info->mutex);
5387     pthread_cond_broadcast(&info->cond);
5388     pthread_mutex_unlock(&info->mutex);
5389     /* Wait until the parent has finished initializing the tls state.  */
5390     pthread_mutex_lock(&clone_lock);
5391     pthread_mutex_unlock(&clone_lock);
5392     cpu_loop(env);
5393     /* never exits */
5394     return NULL;
5395 }
5396 
5397 /* do_fork() Must return host values and target errnos (unlike most
5398    do_*() functions). */
5399 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5400                    abi_ulong parent_tidptr, target_ulong newtls,
5401                    abi_ulong child_tidptr)
5402 {
5403     CPUState *cpu = ENV_GET_CPU(env);
5404     int ret;
5405     TaskState *ts;
5406     CPUState *new_cpu;
5407     CPUArchState *new_env;
5408     sigset_t sigmask;
5409 
5410     flags &= ~CLONE_IGNORED_FLAGS;
5411 
5412     /* Emulate vfork() with fork() */
5413     if (flags & CLONE_VFORK)
5414         flags &= ~(CLONE_VFORK | CLONE_VM);
5415 
5416     if (flags & CLONE_VM) {
5417         TaskState *parent_ts = (TaskState *)cpu->opaque;
5418         new_thread_info info;
5419         pthread_attr_t attr;
5420 
5421         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5422             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5423             return -TARGET_EINVAL;
5424         }
5425 
5426         ts = g_new0(TaskState, 1);
5427         init_task_state(ts);
5428 
5429         /* Grab a mutex so that thread setup appears atomic.  */
5430         pthread_mutex_lock(&clone_lock);
5431 
5432         /* we create a new CPU instance. */
5433         new_env = cpu_copy(env);
5434         /* Init regs that differ from the parent.  */
5435         cpu_clone_regs(new_env, newsp);
5436         new_cpu = ENV_GET_CPU(new_env);
5437         new_cpu->opaque = ts;
5438         ts->bprm = parent_ts->bprm;
5439         ts->info = parent_ts->info;
5440         ts->signal_mask = parent_ts->signal_mask;
5441 
5442         if (flags & CLONE_CHILD_CLEARTID) {
5443             ts->child_tidptr = child_tidptr;
5444         }
5445 
5446         if (flags & CLONE_SETTLS) {
5447             cpu_set_tls (new_env, newtls);
5448         }
5449 
5450         memset(&info, 0, sizeof(info));
5451         pthread_mutex_init(&info.mutex, NULL);
5452         pthread_mutex_lock(&info.mutex);
5453         pthread_cond_init(&info.cond, NULL);
5454         info.env = new_env;
5455         if (flags & CLONE_CHILD_SETTID) {
5456             info.child_tidptr = child_tidptr;
5457         }
5458         if (flags & CLONE_PARENT_SETTID) {
5459             info.parent_tidptr = parent_tidptr;
5460         }
5461 
5462         ret = pthread_attr_init(&attr);
5463         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5464         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5465         /* It is not safe to deliver signals until the child has finished
5466            initializing, so temporarily block all signals.  */
5467         sigfillset(&sigmask);
5468         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5469 
5470         /* If this is our first additional thread, we need to ensure we
5471          * generate code for parallel execution and flush old translations.
5472          */
5473         if (!parallel_cpus) {
5474             parallel_cpus = true;
5475             tb_flush(cpu);
5476         }
5477 
5478         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5479         /* TODO: Free new CPU state if thread creation failed.  */
5480 
5481         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5482         pthread_attr_destroy(&attr);
5483         if (ret == 0) {
5484             /* Wait for the child to initialize.  */
5485             pthread_cond_wait(&info.cond, &info.mutex);
5486             ret = info.tid;
5487         } else {
5488             ret = -1;
5489         }
5490         pthread_mutex_unlock(&info.mutex);
5491         pthread_cond_destroy(&info.cond);
5492         pthread_mutex_destroy(&info.mutex);
5493         pthread_mutex_unlock(&clone_lock);
5494     } else {
5495         /* if no CLONE_VM, we consider it is a fork */
5496         if (flags & CLONE_INVALID_FORK_FLAGS) {
5497             return -TARGET_EINVAL;
5498         }
5499 
5500         /* We can't support custom termination signals */
5501         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5502             return -TARGET_EINVAL;
5503         }
5504 
5505         if (block_signals()) {
5506             return -TARGET_ERESTARTSYS;
5507         }
5508 
5509         fork_start();
5510         ret = fork();
5511         if (ret == 0) {
5512             /* Child Process.  */
5513             cpu_clone_regs(env, newsp);
5514             fork_end(1);
5515             /* There is a race condition here.  The parent process could
5516                theoretically read the TID in the child process before the child
5517                tid is set.  This would require using either ptrace
5518                (not implemented) or having *_tidptr to point at a shared memory
5519                mapping.  We can't repeat the spinlock hack used above because
5520                the child process gets its own copy of the lock.  */
5521             if (flags & CLONE_CHILD_SETTID)
5522                 put_user_u32(gettid(), child_tidptr);
5523             if (flags & CLONE_PARENT_SETTID)
5524                 put_user_u32(gettid(), parent_tidptr);
5525             ts = (TaskState *)cpu->opaque;
5526             if (flags & CLONE_SETTLS)
5527                 cpu_set_tls (env, newtls);
5528             if (flags & CLONE_CHILD_CLEARTID)
5529                 ts->child_tidptr = child_tidptr;
5530         } else {
5531             fork_end(0);
5532         }
5533     }
5534     return ret;
5535 }
5536 
5537 /* warning : doesn't handle linux specific flags... */
5538 static int target_to_host_fcntl_cmd(int cmd)
5539 {
5540     int ret;
5541 
5542     switch(cmd) {
5543     case TARGET_F_DUPFD:
5544     case TARGET_F_GETFD:
5545     case TARGET_F_SETFD:
5546     case TARGET_F_GETFL:
5547     case TARGET_F_SETFL:
5548         ret = cmd;
5549         break;
5550     case TARGET_F_GETLK:
5551         ret = F_GETLK64;
5552         break;
5553     case TARGET_F_SETLK:
5554         ret = F_SETLK64;
5555         break;
5556     case TARGET_F_SETLKW:
5557         ret = F_SETLKW64;
5558         break;
5559     case TARGET_F_GETOWN:
5560         ret = F_GETOWN;
5561         break;
5562     case TARGET_F_SETOWN:
5563         ret = F_SETOWN;
5564         break;
5565     case TARGET_F_GETSIG:
5566         ret = F_GETSIG;
5567         break;
5568     case TARGET_F_SETSIG:
5569         ret = F_SETSIG;
5570         break;
5571 #if TARGET_ABI_BITS == 32
5572     case TARGET_F_GETLK64:
5573         ret = F_GETLK64;
5574         break;
5575     case TARGET_F_SETLK64:
5576         ret = F_SETLK64;
5577         break;
5578     case TARGET_F_SETLKW64:
5579         ret = F_SETLKW64;
5580         break;
5581 #endif
5582     case TARGET_F_SETLEASE:
5583         ret = F_SETLEASE;
5584         break;
5585     case TARGET_F_GETLEASE:
5586         ret = F_GETLEASE;
5587         break;
5588 #ifdef F_DUPFD_CLOEXEC
5589     case TARGET_F_DUPFD_CLOEXEC:
5590         ret = F_DUPFD_CLOEXEC;
5591         break;
5592 #endif
5593     case TARGET_F_NOTIFY:
5594         ret = F_NOTIFY;
5595         break;
5596 #ifdef F_GETOWN_EX
5597     case TARGET_F_GETOWN_EX:
5598         ret = F_GETOWN_EX;
5599         break;
5600 #endif
5601 #ifdef F_SETOWN_EX
5602     case TARGET_F_SETOWN_EX:
5603         ret = F_SETOWN_EX;
5604         break;
5605 #endif
5606 #ifdef F_SETPIPE_SZ
5607     case TARGET_F_SETPIPE_SZ:
5608         ret = F_SETPIPE_SZ;
5609         break;
5610     case TARGET_F_GETPIPE_SZ:
5611         ret = F_GETPIPE_SZ;
5612         break;
5613 #endif
5614     default:
5615         ret = -TARGET_EINVAL;
5616         break;
5617     }
5618 
5619 #if defined(__powerpc64__)
5620     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5621      * is not supported by kernel. The glibc fcntl call actually adjusts
5622      * them to 5, 6 and 7 before making the syscall(). Since we make the
5623      * syscall directly, adjust to what is supported by the kernel.
5624      */
5625     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5626         ret -= F_GETLK64 - 5;
5627     }
5628 #endif
5629 
5630     return ret;
5631 }
5632 
5633 #define FLOCK_TRANSTBL \
5634     switch (type) { \
5635     TRANSTBL_CONVERT(F_RDLCK); \
5636     TRANSTBL_CONVERT(F_WRLCK); \
5637     TRANSTBL_CONVERT(F_UNLCK); \
5638     TRANSTBL_CONVERT(F_EXLCK); \
5639     TRANSTBL_CONVERT(F_SHLCK); \
5640     }
5641 
5642 static int target_to_host_flock(int type)
5643 {
5644 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5645     FLOCK_TRANSTBL
5646 #undef  TRANSTBL_CONVERT
5647     return -TARGET_EINVAL;
5648 }
5649 
5650 static int host_to_target_flock(int type)
5651 {
5652 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5653     FLOCK_TRANSTBL
5654 #undef  TRANSTBL_CONVERT
5655     /* if we don't know how to convert the value coming
5656      * from the host we copy to the target field as-is
5657      */
5658     return type;
5659 }
5660 
5661 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5662                                             abi_ulong target_flock_addr)
5663 {
5664     struct target_flock *target_fl;
5665     int l_type;
5666 
5667     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5668         return -TARGET_EFAULT;
5669     }
5670 
5671     __get_user(l_type, &target_fl->l_type);
5672     l_type = target_to_host_flock(l_type);
5673     if (l_type < 0) {
5674         return l_type;
5675     }
5676     fl->l_type = l_type;
5677     __get_user(fl->l_whence, &target_fl->l_whence);
5678     __get_user(fl->l_start, &target_fl->l_start);
5679     __get_user(fl->l_len, &target_fl->l_len);
5680     __get_user(fl->l_pid, &target_fl->l_pid);
5681     unlock_user_struct(target_fl, target_flock_addr, 0);
5682     return 0;
5683 }
5684 
5685 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5686                                           const struct flock64 *fl)
5687 {
5688     struct target_flock *target_fl;
5689     short l_type;
5690 
5691     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5692         return -TARGET_EFAULT;
5693     }
5694 
5695     l_type = host_to_target_flock(fl->l_type);
5696     __put_user(l_type, &target_fl->l_type);
5697     __put_user(fl->l_whence, &target_fl->l_whence);
5698     __put_user(fl->l_start, &target_fl->l_start);
5699     __put_user(fl->l_len, &target_fl->l_len);
5700     __put_user(fl->l_pid, &target_fl->l_pid);
5701     unlock_user_struct(target_fl, target_flock_addr, 1);
5702     return 0;
5703 }
5704 
5705 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5706 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5707 
5708 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5709 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5710                                                    abi_ulong target_flock_addr)
5711 {
5712     struct target_oabi_flock64 *target_fl;
5713     int l_type;
5714 
5715     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5716         return -TARGET_EFAULT;
5717     }
5718 
5719     __get_user(l_type, &target_fl->l_type);
5720     l_type = target_to_host_flock(l_type);
5721     if (l_type < 0) {
5722         return l_type;
5723     }
5724     fl->l_type = l_type;
5725     __get_user(fl->l_whence, &target_fl->l_whence);
5726     __get_user(fl->l_start, &target_fl->l_start);
5727     __get_user(fl->l_len, &target_fl->l_len);
5728     __get_user(fl->l_pid, &target_fl->l_pid);
5729     unlock_user_struct(target_fl, target_flock_addr, 0);
5730     return 0;
5731 }
5732 
5733 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5734                                                  const struct flock64 *fl)
5735 {
5736     struct target_oabi_flock64 *target_fl;
5737     short l_type;
5738 
5739     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5740         return -TARGET_EFAULT;
5741     }
5742 
5743     l_type = host_to_target_flock(fl->l_type);
5744     __put_user(l_type, &target_fl->l_type);
5745     __put_user(fl->l_whence, &target_fl->l_whence);
5746     __put_user(fl->l_start, &target_fl->l_start);
5747     __put_user(fl->l_len, &target_fl->l_len);
5748     __put_user(fl->l_pid, &target_fl->l_pid);
5749     unlock_user_struct(target_fl, target_flock_addr, 1);
5750     return 0;
5751 }
5752 #endif
5753 
5754 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5755                                               abi_ulong target_flock_addr)
5756 {
5757     struct target_flock64 *target_fl;
5758     int l_type;
5759 
5760     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5761         return -TARGET_EFAULT;
5762     }
5763 
5764     __get_user(l_type, &target_fl->l_type);
5765     l_type = target_to_host_flock(l_type);
5766     if (l_type < 0) {
5767         return l_type;
5768     }
5769     fl->l_type = l_type;
5770     __get_user(fl->l_whence, &target_fl->l_whence);
5771     __get_user(fl->l_start, &target_fl->l_start);
5772     __get_user(fl->l_len, &target_fl->l_len);
5773     __get_user(fl->l_pid, &target_fl->l_pid);
5774     unlock_user_struct(target_fl, target_flock_addr, 0);
5775     return 0;
5776 }
5777 
5778 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5779                                             const struct flock64 *fl)
5780 {
5781     struct target_flock64 *target_fl;
5782     short l_type;
5783 
5784     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5785         return -TARGET_EFAULT;
5786     }
5787 
5788     l_type = host_to_target_flock(fl->l_type);
5789     __put_user(l_type, &target_fl->l_type);
5790     __put_user(fl->l_whence, &target_fl->l_whence);
5791     __put_user(fl->l_start, &target_fl->l_start);
5792     __put_user(fl->l_len, &target_fl->l_len);
5793     __put_user(fl->l_pid, &target_fl->l_pid);
5794     unlock_user_struct(target_fl, target_flock_addr, 1);
5795     return 0;
5796 }
5797 
5798 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5799 {
5800     struct flock64 fl64;
5801 #ifdef F_GETOWN_EX
5802     struct f_owner_ex fox;
5803     struct target_f_owner_ex *target_fox;
5804 #endif
5805     abi_long ret;
5806     int host_cmd = target_to_host_fcntl_cmd(cmd);
5807 
5808     if (host_cmd == -TARGET_EINVAL)
5809 	    return host_cmd;
5810 
5811     switch(cmd) {
5812     case TARGET_F_GETLK:
5813         ret = copy_from_user_flock(&fl64, arg);
5814         if (ret) {
5815             return ret;
5816         }
5817         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5818         if (ret == 0) {
5819             ret = copy_to_user_flock(arg, &fl64);
5820         }
5821         break;
5822 
5823     case TARGET_F_SETLK:
5824     case TARGET_F_SETLKW:
5825         ret = copy_from_user_flock(&fl64, arg);
5826         if (ret) {
5827             return ret;
5828         }
5829         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5830         break;
5831 
5832     case TARGET_F_GETLK64:
5833         ret = copy_from_user_flock64(&fl64, arg);
5834         if (ret) {
5835             return ret;
5836         }
5837         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5838         if (ret == 0) {
5839             ret = copy_to_user_flock64(arg, &fl64);
5840         }
5841         break;
5842     case TARGET_F_SETLK64:
5843     case TARGET_F_SETLKW64:
5844         ret = copy_from_user_flock64(&fl64, arg);
5845         if (ret) {
5846             return ret;
5847         }
5848         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5849         break;
5850 
5851     case TARGET_F_GETFL:
5852         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5853         if (ret >= 0) {
5854             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5855         }
5856         break;
5857 
5858     case TARGET_F_SETFL:
5859         ret = get_errno(safe_fcntl(fd, host_cmd,
5860                                    target_to_host_bitmask(arg,
5861                                                           fcntl_flags_tbl)));
5862         break;
5863 
5864 #ifdef F_GETOWN_EX
5865     case TARGET_F_GETOWN_EX:
5866         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5867         if (ret >= 0) {
5868             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5869                 return -TARGET_EFAULT;
5870             target_fox->type = tswap32(fox.type);
5871             target_fox->pid = tswap32(fox.pid);
5872             unlock_user_struct(target_fox, arg, 1);
5873         }
5874         break;
5875 #endif
5876 
5877 #ifdef F_SETOWN_EX
5878     case TARGET_F_SETOWN_EX:
5879         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5880             return -TARGET_EFAULT;
5881         fox.type = tswap32(target_fox->type);
5882         fox.pid = tswap32(target_fox->pid);
5883         unlock_user_struct(target_fox, arg, 0);
5884         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5885         break;
5886 #endif
5887 
5888     case TARGET_F_SETOWN:
5889     case TARGET_F_GETOWN:
5890     case TARGET_F_SETSIG:
5891     case TARGET_F_GETSIG:
5892     case TARGET_F_SETLEASE:
5893     case TARGET_F_GETLEASE:
5894     case TARGET_F_SETPIPE_SZ:
5895     case TARGET_F_GETPIPE_SZ:
5896         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5897         break;
5898 
5899     default:
5900         ret = get_errno(safe_fcntl(fd, cmd, arg));
5901         break;
5902     }
5903     return ret;
5904 }
5905 
5906 #ifdef USE_UID16
5907 
5908 static inline int high2lowuid(int uid)
5909 {
5910     if (uid > 65535)
5911         return 65534;
5912     else
5913         return uid;
5914 }
5915 
5916 static inline int high2lowgid(int gid)
5917 {
5918     if (gid > 65535)
5919         return 65534;
5920     else
5921         return gid;
5922 }
5923 
5924 static inline int low2highuid(int uid)
5925 {
5926     if ((int16_t)uid == -1)
5927         return -1;
5928     else
5929         return uid;
5930 }
5931 
5932 static inline int low2highgid(int gid)
5933 {
5934     if ((int16_t)gid == -1)
5935         return -1;
5936     else
5937         return gid;
5938 }
5939 static inline int tswapid(int id)
5940 {
5941     return tswap16(id);
5942 }
5943 
5944 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5945 
5946 #else /* !USE_UID16 */
5947 static inline int high2lowuid(int uid)
5948 {
5949     return uid;
5950 }
5951 static inline int high2lowgid(int gid)
5952 {
5953     return gid;
5954 }
5955 static inline int low2highuid(int uid)
5956 {
5957     return uid;
5958 }
5959 static inline int low2highgid(int gid)
5960 {
5961     return gid;
5962 }
5963 static inline int tswapid(int id)
5964 {
5965     return tswap32(id);
5966 }
5967 
5968 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5969 
5970 #endif /* USE_UID16 */
5971 
5972 /* We must do direct syscalls for setting UID/GID, because we want to
5973  * implement the Linux system call semantics of "change only for this thread",
5974  * not the libc/POSIX semantics of "change for all threads in process".
5975  * (See http://ewontfix.com/17/ for more details.)
5976  * We use the 32-bit version of the syscalls if present; if it is not
5977  * then either the host architecture supports 32-bit UIDs natively with
5978  * the standard syscall, or the 16-bit UID is the best we can do.
5979  */
5980 #ifdef __NR_setuid32
5981 #define __NR_sys_setuid __NR_setuid32
5982 #else
5983 #define __NR_sys_setuid __NR_setuid
5984 #endif
5985 #ifdef __NR_setgid32
5986 #define __NR_sys_setgid __NR_setgid32
5987 #else
5988 #define __NR_sys_setgid __NR_setgid
5989 #endif
5990 #ifdef __NR_setresuid32
5991 #define __NR_sys_setresuid __NR_setresuid32
5992 #else
5993 #define __NR_sys_setresuid __NR_setresuid
5994 #endif
5995 #ifdef __NR_setresgid32
5996 #define __NR_sys_setresgid __NR_setresgid32
5997 #else
5998 #define __NR_sys_setresgid __NR_setresgid
5999 #endif
6000 
6001 _syscall1(int, sys_setuid, uid_t, uid)
6002 _syscall1(int, sys_setgid, gid_t, gid)
6003 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6004 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6005 
6006 void syscall_init(void)
6007 {
6008     IOCTLEntry *ie;
6009     const argtype *arg_type;
6010     int size;
6011     int i;
6012 
6013     thunk_init(STRUCT_MAX);
6014 
6015 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6016 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6017 #include "syscall_types.h"
6018 #undef STRUCT
6019 #undef STRUCT_SPECIAL
6020 
6021     /* Build target_to_host_errno_table[] table from
6022      * host_to_target_errno_table[]. */
6023     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6024         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6025     }
6026 
6027     /* we patch the ioctl size if necessary. We rely on the fact that
6028        no ioctl has all the bits at '1' in the size field */
6029     ie = ioctl_entries;
6030     while (ie->target_cmd != 0) {
6031         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6032             TARGET_IOC_SIZEMASK) {
6033             arg_type = ie->arg_type;
6034             if (arg_type[0] != TYPE_PTR) {
6035                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6036                         ie->target_cmd);
6037                 exit(1);
6038             }
6039             arg_type++;
6040             size = thunk_type_size(arg_type, 0);
6041             ie->target_cmd = (ie->target_cmd &
6042                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6043                 (size << TARGET_IOC_SIZESHIFT);
6044         }
6045 
6046         /* automatic consistency check if same arch */
6047 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6048     (defined(__x86_64__) && defined(TARGET_X86_64))
6049         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6050             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6051                     ie->name, ie->target_cmd, ie->host_cmd);
6052         }
6053 #endif
6054         ie++;
6055     }
6056 }
6057 
6058 #if TARGET_ABI_BITS == 32
6059 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6060 {
6061 #ifdef TARGET_WORDS_BIGENDIAN
6062     return ((uint64_t)word0 << 32) | word1;
6063 #else
6064     return ((uint64_t)word1 << 32) | word0;
6065 #endif
6066 }
6067 #else /* TARGET_ABI_BITS == 32 */
6068 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6069 {
6070     return word0;
6071 }
6072 #endif /* TARGET_ABI_BITS != 32 */
6073 
6074 #ifdef TARGET_NR_truncate64
6075 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6076                                          abi_long arg2,
6077                                          abi_long arg3,
6078                                          abi_long arg4)
6079 {
6080     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6081         arg2 = arg3;
6082         arg3 = arg4;
6083     }
6084     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6085 }
6086 #endif
6087 
6088 #ifdef TARGET_NR_ftruncate64
6089 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6090                                           abi_long arg2,
6091                                           abi_long arg3,
6092                                           abi_long arg4)
6093 {
6094     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6095         arg2 = arg3;
6096         arg3 = arg4;
6097     }
6098     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6099 }
6100 #endif
6101 
6102 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6103                                                abi_ulong target_addr)
6104 {
6105     struct target_timespec *target_ts;
6106 
6107     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6108         return -TARGET_EFAULT;
6109     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6110     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6111     unlock_user_struct(target_ts, target_addr, 0);
6112     return 0;
6113 }
6114 
6115 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6116                                                struct timespec *host_ts)
6117 {
6118     struct target_timespec *target_ts;
6119 
6120     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6121         return -TARGET_EFAULT;
6122     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6123     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6124     unlock_user_struct(target_ts, target_addr, 1);
6125     return 0;
6126 }
6127 
6128 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6129                                                  abi_ulong target_addr)
6130 {
6131     struct target_itimerspec *target_itspec;
6132 
6133     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6134         return -TARGET_EFAULT;
6135     }
6136 
6137     host_itspec->it_interval.tv_sec =
6138                             tswapal(target_itspec->it_interval.tv_sec);
6139     host_itspec->it_interval.tv_nsec =
6140                             tswapal(target_itspec->it_interval.tv_nsec);
6141     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6142     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6143 
6144     unlock_user_struct(target_itspec, target_addr, 1);
6145     return 0;
6146 }
6147 
6148 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6149                                                struct itimerspec *host_its)
6150 {
6151     struct target_itimerspec *target_itspec;
6152 
6153     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6154         return -TARGET_EFAULT;
6155     }
6156 
6157     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6158     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6159 
6160     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6161     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6162 
6163     unlock_user_struct(target_itspec, target_addr, 0);
6164     return 0;
6165 }
6166 
6167 static inline abi_long target_to_host_timex(struct timex *host_tx,
6168                                             abi_long target_addr)
6169 {
6170     struct target_timex *target_tx;
6171 
6172     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6173         return -TARGET_EFAULT;
6174     }
6175 
6176     __get_user(host_tx->modes, &target_tx->modes);
6177     __get_user(host_tx->offset, &target_tx->offset);
6178     __get_user(host_tx->freq, &target_tx->freq);
6179     __get_user(host_tx->maxerror, &target_tx->maxerror);
6180     __get_user(host_tx->esterror, &target_tx->esterror);
6181     __get_user(host_tx->status, &target_tx->status);
6182     __get_user(host_tx->constant, &target_tx->constant);
6183     __get_user(host_tx->precision, &target_tx->precision);
6184     __get_user(host_tx->tolerance, &target_tx->tolerance);
6185     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6186     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6187     __get_user(host_tx->tick, &target_tx->tick);
6188     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6189     __get_user(host_tx->jitter, &target_tx->jitter);
6190     __get_user(host_tx->shift, &target_tx->shift);
6191     __get_user(host_tx->stabil, &target_tx->stabil);
6192     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6193     __get_user(host_tx->calcnt, &target_tx->calcnt);
6194     __get_user(host_tx->errcnt, &target_tx->errcnt);
6195     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6196     __get_user(host_tx->tai, &target_tx->tai);
6197 
6198     unlock_user_struct(target_tx, target_addr, 0);
6199     return 0;
6200 }
6201 
6202 static inline abi_long host_to_target_timex(abi_long target_addr,
6203                                             struct timex *host_tx)
6204 {
6205     struct target_timex *target_tx;
6206 
6207     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6208         return -TARGET_EFAULT;
6209     }
6210 
6211     __put_user(host_tx->modes, &target_tx->modes);
6212     __put_user(host_tx->offset, &target_tx->offset);
6213     __put_user(host_tx->freq, &target_tx->freq);
6214     __put_user(host_tx->maxerror, &target_tx->maxerror);
6215     __put_user(host_tx->esterror, &target_tx->esterror);
6216     __put_user(host_tx->status, &target_tx->status);
6217     __put_user(host_tx->constant, &target_tx->constant);
6218     __put_user(host_tx->precision, &target_tx->precision);
6219     __put_user(host_tx->tolerance, &target_tx->tolerance);
6220     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6221     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6222     __put_user(host_tx->tick, &target_tx->tick);
6223     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6224     __put_user(host_tx->jitter, &target_tx->jitter);
6225     __put_user(host_tx->shift, &target_tx->shift);
6226     __put_user(host_tx->stabil, &target_tx->stabil);
6227     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6228     __put_user(host_tx->calcnt, &target_tx->calcnt);
6229     __put_user(host_tx->errcnt, &target_tx->errcnt);
6230     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6231     __put_user(host_tx->tai, &target_tx->tai);
6232 
6233     unlock_user_struct(target_tx, target_addr, 1);
6234     return 0;
6235 }
6236 
6237 
6238 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6239                                                abi_ulong target_addr)
6240 {
6241     struct target_sigevent *target_sevp;
6242 
6243     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6244         return -TARGET_EFAULT;
6245     }
6246 
6247     /* This union is awkward on 64 bit systems because it has a 32 bit
6248      * integer and a pointer in it; we follow the conversion approach
6249      * used for handling sigval types in signal.c so the guest should get
6250      * the correct value back even if we did a 64 bit byteswap and it's
6251      * using the 32 bit integer.
6252      */
6253     host_sevp->sigev_value.sival_ptr =
6254         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6255     host_sevp->sigev_signo =
6256         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6257     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6258     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6259 
6260     unlock_user_struct(target_sevp, target_addr, 1);
6261     return 0;
6262 }
6263 
6264 #if defined(TARGET_NR_mlockall)
6265 static inline int target_to_host_mlockall_arg(int arg)
6266 {
6267     int result = 0;
6268 
6269     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6270         result |= MCL_CURRENT;
6271     }
6272     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6273         result |= MCL_FUTURE;
6274     }
6275     return result;
6276 }
6277 #endif
6278 
6279 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6280      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6281      defined(TARGET_NR_newfstatat))
6282 static inline abi_long host_to_target_stat64(void *cpu_env,
6283                                              abi_ulong target_addr,
6284                                              struct stat *host_st)
6285 {
6286 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6287     if (((CPUARMState *)cpu_env)->eabi) {
6288         struct target_eabi_stat64 *target_st;
6289 
6290         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6291             return -TARGET_EFAULT;
6292         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6293         __put_user(host_st->st_dev, &target_st->st_dev);
6294         __put_user(host_st->st_ino, &target_st->st_ino);
6295 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6296         __put_user(host_st->st_ino, &target_st->__st_ino);
6297 #endif
6298         __put_user(host_st->st_mode, &target_st->st_mode);
6299         __put_user(host_st->st_nlink, &target_st->st_nlink);
6300         __put_user(host_st->st_uid, &target_st->st_uid);
6301         __put_user(host_st->st_gid, &target_st->st_gid);
6302         __put_user(host_st->st_rdev, &target_st->st_rdev);
6303         __put_user(host_st->st_size, &target_st->st_size);
6304         __put_user(host_st->st_blksize, &target_st->st_blksize);
6305         __put_user(host_st->st_blocks, &target_st->st_blocks);
6306         __put_user(host_st->st_atime, &target_st->target_st_atime);
6307         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6308         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6309         unlock_user_struct(target_st, target_addr, 1);
6310     } else
6311 #endif
6312     {
6313 #if defined(TARGET_HAS_STRUCT_STAT64)
6314         struct target_stat64 *target_st;
6315 #else
6316         struct target_stat *target_st;
6317 #endif
6318 
6319         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6320             return -TARGET_EFAULT;
6321         memset(target_st, 0, sizeof(*target_st));
6322         __put_user(host_st->st_dev, &target_st->st_dev);
6323         __put_user(host_st->st_ino, &target_st->st_ino);
6324 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6325         __put_user(host_st->st_ino, &target_st->__st_ino);
6326 #endif
6327         __put_user(host_st->st_mode, &target_st->st_mode);
6328         __put_user(host_st->st_nlink, &target_st->st_nlink);
6329         __put_user(host_st->st_uid, &target_st->st_uid);
6330         __put_user(host_st->st_gid, &target_st->st_gid);
6331         __put_user(host_st->st_rdev, &target_st->st_rdev);
6332         /* XXX: better use of kernel struct */
6333         __put_user(host_st->st_size, &target_st->st_size);
6334         __put_user(host_st->st_blksize, &target_st->st_blksize);
6335         __put_user(host_st->st_blocks, &target_st->st_blocks);
6336         __put_user(host_st->st_atime, &target_st->target_st_atime);
6337         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6338         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6339         unlock_user_struct(target_st, target_addr, 1);
6340     }
6341 
6342     return 0;
6343 }
6344 #endif
6345 
6346 /* ??? Using host futex calls even when target atomic operations
6347    are not really atomic probably breaks things.  However implementing
6348    futexes locally would make futexes shared between multiple processes
6349    tricky.  However they're probably useless because guest atomic
6350    operations won't work either.  */
6351 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6352                     target_ulong uaddr2, int val3)
6353 {
6354     struct timespec ts, *pts;
6355     int base_op;
6356 
6357     /* ??? We assume FUTEX_* constants are the same on both host
6358        and target.  */
6359 #ifdef FUTEX_CMD_MASK
6360     base_op = op & FUTEX_CMD_MASK;
6361 #else
6362     base_op = op;
6363 #endif
6364     switch (base_op) {
6365     case FUTEX_WAIT:
6366     case FUTEX_WAIT_BITSET:
6367         if (timeout) {
6368             pts = &ts;
6369             target_to_host_timespec(pts, timeout);
6370         } else {
6371             pts = NULL;
6372         }
6373         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6374                          pts, NULL, val3));
6375     case FUTEX_WAKE:
6376         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6377     case FUTEX_FD:
6378         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6379     case FUTEX_REQUEUE:
6380     case FUTEX_CMP_REQUEUE:
6381     case FUTEX_WAKE_OP:
6382         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6383            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6384            But the prototype takes a `struct timespec *'; insert casts
6385            to satisfy the compiler.  We do not need to tswap TIMEOUT
6386            since it's not compared to guest memory.  */
6387         pts = (struct timespec *)(uintptr_t) timeout;
6388         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6389                                     g2h(uaddr2),
6390                                     (base_op == FUTEX_CMP_REQUEUE
6391                                      ? tswap32(val3)
6392                                      : val3)));
6393     default:
6394         return -TARGET_ENOSYS;
6395     }
6396 }
6397 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6398 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6399                                      abi_long handle, abi_long mount_id,
6400                                      abi_long flags)
6401 {
6402     struct file_handle *target_fh;
6403     struct file_handle *fh;
6404     int mid = 0;
6405     abi_long ret;
6406     char *name;
6407     unsigned int size, total_size;
6408 
6409     if (get_user_s32(size, handle)) {
6410         return -TARGET_EFAULT;
6411     }
6412 
6413     name = lock_user_string(pathname);
6414     if (!name) {
6415         return -TARGET_EFAULT;
6416     }
6417 
6418     total_size = sizeof(struct file_handle) + size;
6419     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6420     if (!target_fh) {
6421         unlock_user(name, pathname, 0);
6422         return -TARGET_EFAULT;
6423     }
6424 
6425     fh = g_malloc0(total_size);
6426     fh->handle_bytes = size;
6427 
6428     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6429     unlock_user(name, pathname, 0);
6430 
6431     /* man name_to_handle_at(2):
6432      * Other than the use of the handle_bytes field, the caller should treat
6433      * the file_handle structure as an opaque data type
6434      */
6435 
6436     memcpy(target_fh, fh, total_size);
6437     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6438     target_fh->handle_type = tswap32(fh->handle_type);
6439     g_free(fh);
6440     unlock_user(target_fh, handle, total_size);
6441 
6442     if (put_user_s32(mid, mount_id)) {
6443         return -TARGET_EFAULT;
6444     }
6445 
6446     return ret;
6447 
6448 }
6449 #endif
6450 
6451 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6452 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6453                                      abi_long flags)
6454 {
6455     struct file_handle *target_fh;
6456     struct file_handle *fh;
6457     unsigned int size, total_size;
6458     abi_long ret;
6459 
6460     if (get_user_s32(size, handle)) {
6461         return -TARGET_EFAULT;
6462     }
6463 
6464     total_size = sizeof(struct file_handle) + size;
6465     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6466     if (!target_fh) {
6467         return -TARGET_EFAULT;
6468     }
6469 
6470     fh = g_memdup(target_fh, total_size);
6471     fh->handle_bytes = size;
6472     fh->handle_type = tswap32(target_fh->handle_type);
6473 
6474     ret = get_errno(open_by_handle_at(mount_fd, fh,
6475                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6476 
6477     g_free(fh);
6478 
6479     unlock_user(target_fh, handle, total_size);
6480 
6481     return ret;
6482 }
6483 #endif
6484 
6485 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6486 
6487 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6488 {
6489     int host_flags;
6490     target_sigset_t *target_mask;
6491     sigset_t host_mask;
6492     abi_long ret;
6493 
6494     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6495         return -TARGET_EINVAL;
6496     }
6497     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6498         return -TARGET_EFAULT;
6499     }
6500 
6501     target_to_host_sigset(&host_mask, target_mask);
6502 
6503     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6504 
6505     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6506     if (ret >= 0) {
6507         fd_trans_register(ret, &target_signalfd_trans);
6508     }
6509 
6510     unlock_user_struct(target_mask, mask, 0);
6511 
6512     return ret;
6513 }
6514 #endif
6515 
6516 /* Map host to target signal numbers for the wait family of syscalls.
6517    Assume all other status bits are the same.  */
6518 int host_to_target_waitstatus(int status)
6519 {
6520     if (WIFSIGNALED(status)) {
6521         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6522     }
6523     if (WIFSTOPPED(status)) {
6524         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6525                | (status & 0xff);
6526     }
6527     return status;
6528 }
6529 
6530 static int open_self_cmdline(void *cpu_env, int fd)
6531 {
6532     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6533     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6534     int i;
6535 
6536     for (i = 0; i < bprm->argc; i++) {
6537         size_t len = strlen(bprm->argv[i]) + 1;
6538 
6539         if (write(fd, bprm->argv[i], len) != len) {
6540             return -1;
6541         }
6542     }
6543 
6544     return 0;
6545 }
6546 
6547 static int open_self_maps(void *cpu_env, int fd)
6548 {
6549     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6550     TaskState *ts = cpu->opaque;
6551     FILE *fp;
6552     char *line = NULL;
6553     size_t len = 0;
6554     ssize_t read;
6555 
6556     fp = fopen("/proc/self/maps", "r");
6557     if (fp == NULL) {
6558         return -1;
6559     }
6560 
6561     while ((read = getline(&line, &len, fp)) != -1) {
6562         int fields, dev_maj, dev_min, inode;
6563         uint64_t min, max, offset;
6564         char flag_r, flag_w, flag_x, flag_p;
6565         char path[512] = "";
6566         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6567                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6568                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6569 
6570         if ((fields < 10) || (fields > 11)) {
6571             continue;
6572         }
6573         if (h2g_valid(min)) {
6574             int flags = page_get_flags(h2g(min));
6575             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6576             if (page_check_range(h2g(min), max - min, flags) == -1) {
6577                 continue;
6578             }
6579             if (h2g(min) == ts->info->stack_limit) {
6580                 pstrcpy(path, sizeof(path), "      [stack]");
6581             }
6582             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6583                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6584                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6585                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6586                     path[0] ? "         " : "", path);
6587         }
6588     }
6589 
6590     free(line);
6591     fclose(fp);
6592 
6593     return 0;
6594 }
6595 
6596 static int open_self_stat(void *cpu_env, int fd)
6597 {
6598     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6599     TaskState *ts = cpu->opaque;
6600     abi_ulong start_stack = ts->info->start_stack;
6601     int i;
6602 
6603     for (i = 0; i < 44; i++) {
6604       char buf[128];
6605       int len;
6606       uint64_t val = 0;
6607 
6608       if (i == 0) {
6609         /* pid */
6610         val = getpid();
6611         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6612       } else if (i == 1) {
6613         /* app name */
6614         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6615       } else if (i == 27) {
6616         /* stack bottom */
6617         val = start_stack;
6618         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6619       } else {
6620         /* for the rest, there is MasterCard */
6621         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6622       }
6623 
6624       len = strlen(buf);
6625       if (write(fd, buf, len) != len) {
6626           return -1;
6627       }
6628     }
6629 
6630     return 0;
6631 }
6632 
6633 static int open_self_auxv(void *cpu_env, int fd)
6634 {
6635     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6636     TaskState *ts = cpu->opaque;
6637     abi_ulong auxv = ts->info->saved_auxv;
6638     abi_ulong len = ts->info->auxv_len;
6639     char *ptr;
6640 
6641     /*
6642      * Auxiliary vector is stored in target process stack.
6643      * read in whole auxv vector and copy it to file
6644      */
6645     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6646     if (ptr != NULL) {
6647         while (len > 0) {
6648             ssize_t r;
6649             r = write(fd, ptr, len);
6650             if (r <= 0) {
6651                 break;
6652             }
6653             len -= r;
6654             ptr += r;
6655         }
6656         lseek(fd, 0, SEEK_SET);
6657         unlock_user(ptr, auxv, len);
6658     }
6659 
6660     return 0;
6661 }
6662 
6663 static int is_proc_myself(const char *filename, const char *entry)
6664 {
6665     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6666         filename += strlen("/proc/");
6667         if (!strncmp(filename, "self/", strlen("self/"))) {
6668             filename += strlen("self/");
6669         } else if (*filename >= '1' && *filename <= '9') {
6670             char myself[80];
6671             snprintf(myself, sizeof(myself), "%d/", getpid());
6672             if (!strncmp(filename, myself, strlen(myself))) {
6673                 filename += strlen(myself);
6674             } else {
6675                 return 0;
6676             }
6677         } else {
6678             return 0;
6679         }
6680         if (!strcmp(filename, entry)) {
6681             return 1;
6682         }
6683     }
6684     return 0;
6685 }
6686 
6687 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6688 static int is_proc(const char *filename, const char *entry)
6689 {
6690     return strcmp(filename, entry) == 0;
6691 }
6692 
6693 static int open_net_route(void *cpu_env, int fd)
6694 {
6695     FILE *fp;
6696     char *line = NULL;
6697     size_t len = 0;
6698     ssize_t read;
6699 
6700     fp = fopen("/proc/net/route", "r");
6701     if (fp == NULL) {
6702         return -1;
6703     }
6704 
6705     /* read header */
6706 
6707     read = getline(&line, &len, fp);
6708     dprintf(fd, "%s", line);
6709 
6710     /* read routes */
6711 
6712     while ((read = getline(&line, &len, fp)) != -1) {
6713         char iface[16];
6714         uint32_t dest, gw, mask;
6715         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6716         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6717                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6718                      &mask, &mtu, &window, &irtt);
6719         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6720                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6721                 metric, tswap32(mask), mtu, window, irtt);
6722     }
6723 
6724     free(line);
6725     fclose(fp);
6726 
6727     return 0;
6728 }
6729 #endif
6730 
6731 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6732 {
6733     struct fake_open {
6734         const char *filename;
6735         int (*fill)(void *cpu_env, int fd);
6736         int (*cmp)(const char *s1, const char *s2);
6737     };
6738     const struct fake_open *fake_open;
6739     static const struct fake_open fakes[] = {
6740         { "maps", open_self_maps, is_proc_myself },
6741         { "stat", open_self_stat, is_proc_myself },
6742         { "auxv", open_self_auxv, is_proc_myself },
6743         { "cmdline", open_self_cmdline, is_proc_myself },
6744 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6745         { "/proc/net/route", open_net_route, is_proc },
6746 #endif
6747         { NULL, NULL, NULL }
6748     };
6749 
6750     if (is_proc_myself(pathname, "exe")) {
6751         int execfd = qemu_getauxval(AT_EXECFD);
6752         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6753     }
6754 
6755     for (fake_open = fakes; fake_open->filename; fake_open++) {
6756         if (fake_open->cmp(pathname, fake_open->filename)) {
6757             break;
6758         }
6759     }
6760 
6761     if (fake_open->filename) {
6762         const char *tmpdir;
6763         char filename[PATH_MAX];
6764         int fd, r;
6765 
6766         /* create temporary file to map stat to */
6767         tmpdir = getenv("TMPDIR");
6768         if (!tmpdir)
6769             tmpdir = "/tmp";
6770         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6771         fd = mkstemp(filename);
6772         if (fd < 0) {
6773             return fd;
6774         }
6775         unlink(filename);
6776 
6777         if ((r = fake_open->fill(cpu_env, fd))) {
6778             int e = errno;
6779             close(fd);
6780             errno = e;
6781             return r;
6782         }
6783         lseek(fd, 0, SEEK_SET);
6784 
6785         return fd;
6786     }
6787 
6788     return safe_openat(dirfd, path(pathname), flags, mode);
6789 }
6790 
6791 #define TIMER_MAGIC 0x0caf0000
6792 #define TIMER_MAGIC_MASK 0xffff0000
6793 
6794 /* Convert QEMU provided timer ID back to internal 16bit index format */
6795 static target_timer_t get_timer_id(abi_long arg)
6796 {
6797     target_timer_t timerid = arg;
6798 
6799     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6800         return -TARGET_EINVAL;
6801     }
6802 
6803     timerid &= 0xffff;
6804 
6805     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6806         return -TARGET_EINVAL;
6807     }
6808 
6809     return timerid;
6810 }
6811 
6812 static int target_to_host_cpu_mask(unsigned long *host_mask,
6813                                    size_t host_size,
6814                                    abi_ulong target_addr,
6815                                    size_t target_size)
6816 {
6817     unsigned target_bits = sizeof(abi_ulong) * 8;
6818     unsigned host_bits = sizeof(*host_mask) * 8;
6819     abi_ulong *target_mask;
6820     unsigned i, j;
6821 
6822     assert(host_size >= target_size);
6823 
6824     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6825     if (!target_mask) {
6826         return -TARGET_EFAULT;
6827     }
6828     memset(host_mask, 0, host_size);
6829 
6830     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6831         unsigned bit = i * target_bits;
6832         abi_ulong val;
6833 
6834         __get_user(val, &target_mask[i]);
6835         for (j = 0; j < target_bits; j++, bit++) {
6836             if (val & (1UL << j)) {
6837                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6838             }
6839         }
6840     }
6841 
6842     unlock_user(target_mask, target_addr, 0);
6843     return 0;
6844 }
6845 
6846 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6847                                    size_t host_size,
6848                                    abi_ulong target_addr,
6849                                    size_t target_size)
6850 {
6851     unsigned target_bits = sizeof(abi_ulong) * 8;
6852     unsigned host_bits = sizeof(*host_mask) * 8;
6853     abi_ulong *target_mask;
6854     unsigned i, j;
6855 
6856     assert(host_size >= target_size);
6857 
6858     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6859     if (!target_mask) {
6860         return -TARGET_EFAULT;
6861     }
6862 
6863     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6864         unsigned bit = i * target_bits;
6865         abi_ulong val = 0;
6866 
6867         for (j = 0; j < target_bits; j++, bit++) {
6868             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6869                 val |= 1UL << j;
6870             }
6871         }
6872         __put_user(val, &target_mask[i]);
6873     }
6874 
6875     unlock_user(target_mask, target_addr, target_size);
6876     return 0;
6877 }
6878 
6879 /* This is an internal helper for do_syscall so that it is easier
6880  * to have a single return point, so that actions, such as logging
6881  * of syscall results, can be performed.
6882  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6883  */
6884 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6885                             abi_long arg2, abi_long arg3, abi_long arg4,
6886                             abi_long arg5, abi_long arg6, abi_long arg7,
6887                             abi_long arg8)
6888 {
6889     CPUState *cpu = ENV_GET_CPU(cpu_env);
6890     abi_long ret;
6891 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6892     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6893     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6894     struct stat st;
6895 #endif
6896 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6897     || defined(TARGET_NR_fstatfs)
6898     struct statfs stfs;
6899 #endif
6900     void *p;
6901 
6902     switch(num) {
6903     case TARGET_NR_exit:
6904         /* In old applications this may be used to implement _exit(2).
6905            However in threaded applictions it is used for thread termination,
6906            and _exit_group is used for application termination.
6907            Do thread termination if we have more then one thread.  */
6908 
6909         if (block_signals()) {
6910             return -TARGET_ERESTARTSYS;
6911         }
6912 
6913         cpu_list_lock();
6914 
6915         if (CPU_NEXT(first_cpu)) {
6916             TaskState *ts;
6917 
6918             /* Remove the CPU from the list.  */
6919             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6920 
6921             cpu_list_unlock();
6922 
6923             ts = cpu->opaque;
6924             if (ts->child_tidptr) {
6925                 put_user_u32(0, ts->child_tidptr);
6926                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6927                           NULL, NULL, 0);
6928             }
6929             thread_cpu = NULL;
6930             object_unref(OBJECT(cpu));
6931             g_free(ts);
6932             rcu_unregister_thread();
6933             pthread_exit(NULL);
6934         }
6935 
6936         cpu_list_unlock();
6937         preexit_cleanup(cpu_env, arg1);
6938         _exit(arg1);
6939         return 0; /* avoid warning */
6940     case TARGET_NR_read:
6941         if (arg3 == 0) {
6942             return 0;
6943         } else {
6944             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6945                 return -TARGET_EFAULT;
6946             ret = get_errno(safe_read(arg1, p, arg3));
6947             if (ret >= 0 &&
6948                 fd_trans_host_to_target_data(arg1)) {
6949                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6950             }
6951             unlock_user(p, arg2, ret);
6952         }
6953         return ret;
6954     case TARGET_NR_write:
6955         if (arg2 == 0 && arg3 == 0) {
6956             return get_errno(safe_write(arg1, 0, 0));
6957         }
6958         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6959             return -TARGET_EFAULT;
6960         if (fd_trans_target_to_host_data(arg1)) {
6961             void *copy = g_malloc(arg3);
6962             memcpy(copy, p, arg3);
6963             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
6964             if (ret >= 0) {
6965                 ret = get_errno(safe_write(arg1, copy, ret));
6966             }
6967             g_free(copy);
6968         } else {
6969             ret = get_errno(safe_write(arg1, p, arg3));
6970         }
6971         unlock_user(p, arg2, 0);
6972         return ret;
6973 
6974 #ifdef TARGET_NR_open
6975     case TARGET_NR_open:
6976         if (!(p = lock_user_string(arg1)))
6977             return -TARGET_EFAULT;
6978         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6979                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
6980                                   arg3));
6981         fd_trans_unregister(ret);
6982         unlock_user(p, arg1, 0);
6983         return ret;
6984 #endif
6985     case TARGET_NR_openat:
6986         if (!(p = lock_user_string(arg2)))
6987             return -TARGET_EFAULT;
6988         ret = get_errno(do_openat(cpu_env, arg1, p,
6989                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
6990                                   arg4));
6991         fd_trans_unregister(ret);
6992         unlock_user(p, arg2, 0);
6993         return ret;
6994 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6995     case TARGET_NR_name_to_handle_at:
6996         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6997         return ret;
6998 #endif
6999 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7000     case TARGET_NR_open_by_handle_at:
7001         ret = do_open_by_handle_at(arg1, arg2, arg3);
7002         fd_trans_unregister(ret);
7003         return ret;
7004 #endif
7005     case TARGET_NR_close:
7006         fd_trans_unregister(arg1);
7007         return get_errno(close(arg1));
7008 
7009     case TARGET_NR_brk:
7010         return do_brk(arg1);
7011 #ifdef TARGET_NR_fork
7012     case TARGET_NR_fork:
7013         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7014 #endif
7015 #ifdef TARGET_NR_waitpid
7016     case TARGET_NR_waitpid:
7017         {
7018             int status;
7019             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7020             if (!is_error(ret) && arg2 && ret
7021                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7022                 return -TARGET_EFAULT;
7023         }
7024         return ret;
7025 #endif
7026 #ifdef TARGET_NR_waitid
7027     case TARGET_NR_waitid:
7028         {
7029             siginfo_t info;
7030             info.si_pid = 0;
7031             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7032             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7033                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7034                     return -TARGET_EFAULT;
7035                 host_to_target_siginfo(p, &info);
7036                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7037             }
7038         }
7039         return ret;
7040 #endif
7041 #ifdef TARGET_NR_creat /* not on alpha */
7042     case TARGET_NR_creat:
7043         if (!(p = lock_user_string(arg1)))
7044             return -TARGET_EFAULT;
7045         ret = get_errno(creat(p, arg2));
7046         fd_trans_unregister(ret);
7047         unlock_user(p, arg1, 0);
7048         return ret;
7049 #endif
7050 #ifdef TARGET_NR_link
7051     case TARGET_NR_link:
7052         {
7053             void * p2;
7054             p = lock_user_string(arg1);
7055             p2 = lock_user_string(arg2);
7056             if (!p || !p2)
7057                 ret = -TARGET_EFAULT;
7058             else
7059                 ret = get_errno(link(p, p2));
7060             unlock_user(p2, arg2, 0);
7061             unlock_user(p, arg1, 0);
7062         }
7063         return ret;
7064 #endif
7065 #if defined(TARGET_NR_linkat)
7066     case TARGET_NR_linkat:
7067         {
7068             void * p2 = NULL;
7069             if (!arg2 || !arg4)
7070                 return -TARGET_EFAULT;
7071             p  = lock_user_string(arg2);
7072             p2 = lock_user_string(arg4);
7073             if (!p || !p2)
7074                 ret = -TARGET_EFAULT;
7075             else
7076                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7077             unlock_user(p, arg2, 0);
7078             unlock_user(p2, arg4, 0);
7079         }
7080         return ret;
7081 #endif
7082 #ifdef TARGET_NR_unlink
7083     case TARGET_NR_unlink:
7084         if (!(p = lock_user_string(arg1)))
7085             return -TARGET_EFAULT;
7086         ret = get_errno(unlink(p));
7087         unlock_user(p, arg1, 0);
7088         return ret;
7089 #endif
7090 #if defined(TARGET_NR_unlinkat)
7091     case TARGET_NR_unlinkat:
7092         if (!(p = lock_user_string(arg2)))
7093             return -TARGET_EFAULT;
7094         ret = get_errno(unlinkat(arg1, p, arg3));
7095         unlock_user(p, arg2, 0);
7096         return ret;
7097 #endif
7098     case TARGET_NR_execve:
7099         {
7100             char **argp, **envp;
7101             int argc, envc;
7102             abi_ulong gp;
7103             abi_ulong guest_argp;
7104             abi_ulong guest_envp;
7105             abi_ulong addr;
7106             char **q;
7107             int total_size = 0;
7108 
7109             argc = 0;
7110             guest_argp = arg2;
7111             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7112                 if (get_user_ual(addr, gp))
7113                     return -TARGET_EFAULT;
7114                 if (!addr)
7115                     break;
7116                 argc++;
7117             }
7118             envc = 0;
7119             guest_envp = arg3;
7120             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7121                 if (get_user_ual(addr, gp))
7122                     return -TARGET_EFAULT;
7123                 if (!addr)
7124                     break;
7125                 envc++;
7126             }
7127 
7128             argp = g_new0(char *, argc + 1);
7129             envp = g_new0(char *, envc + 1);
7130 
7131             for (gp = guest_argp, q = argp; gp;
7132                   gp += sizeof(abi_ulong), q++) {
7133                 if (get_user_ual(addr, gp))
7134                     goto execve_efault;
7135                 if (!addr)
7136                     break;
7137                 if (!(*q = lock_user_string(addr)))
7138                     goto execve_efault;
7139                 total_size += strlen(*q) + 1;
7140             }
7141             *q = NULL;
7142 
7143             for (gp = guest_envp, q = envp; gp;
7144                   gp += sizeof(abi_ulong), q++) {
7145                 if (get_user_ual(addr, gp))
7146                     goto execve_efault;
7147                 if (!addr)
7148                     break;
7149                 if (!(*q = lock_user_string(addr)))
7150                     goto execve_efault;
7151                 total_size += strlen(*q) + 1;
7152             }
7153             *q = NULL;
7154 
7155             if (!(p = lock_user_string(arg1)))
7156                 goto execve_efault;
7157             /* Although execve() is not an interruptible syscall it is
7158              * a special case where we must use the safe_syscall wrapper:
7159              * if we allow a signal to happen before we make the host
7160              * syscall then we will 'lose' it, because at the point of
7161              * execve the process leaves QEMU's control. So we use the
7162              * safe syscall wrapper to ensure that we either take the
7163              * signal as a guest signal, or else it does not happen
7164              * before the execve completes and makes it the other
7165              * program's problem.
7166              */
7167             ret = get_errno(safe_execve(p, argp, envp));
7168             unlock_user(p, arg1, 0);
7169 
7170             goto execve_end;
7171 
7172         execve_efault:
7173             ret = -TARGET_EFAULT;
7174 
7175         execve_end:
7176             for (gp = guest_argp, q = argp; *q;
7177                   gp += sizeof(abi_ulong), q++) {
7178                 if (get_user_ual(addr, gp)
7179                     || !addr)
7180                     break;
7181                 unlock_user(*q, addr, 0);
7182             }
7183             for (gp = guest_envp, q = envp; *q;
7184                   gp += sizeof(abi_ulong), q++) {
7185                 if (get_user_ual(addr, gp)
7186                     || !addr)
7187                     break;
7188                 unlock_user(*q, addr, 0);
7189             }
7190 
7191             g_free(argp);
7192             g_free(envp);
7193         }
7194         return ret;
7195     case TARGET_NR_chdir:
7196         if (!(p = lock_user_string(arg1)))
7197             return -TARGET_EFAULT;
7198         ret = get_errno(chdir(p));
7199         unlock_user(p, arg1, 0);
7200         return ret;
7201 #ifdef TARGET_NR_time
7202     case TARGET_NR_time:
7203         {
7204             time_t host_time;
7205             ret = get_errno(time(&host_time));
7206             if (!is_error(ret)
7207                 && arg1
7208                 && put_user_sal(host_time, arg1))
7209                 return -TARGET_EFAULT;
7210         }
7211         return ret;
7212 #endif
7213 #ifdef TARGET_NR_mknod
7214     case TARGET_NR_mknod:
7215         if (!(p = lock_user_string(arg1)))
7216             return -TARGET_EFAULT;
7217         ret = get_errno(mknod(p, arg2, arg3));
7218         unlock_user(p, arg1, 0);
7219         return ret;
7220 #endif
7221 #if defined(TARGET_NR_mknodat)
7222     case TARGET_NR_mknodat:
7223         if (!(p = lock_user_string(arg2)))
7224             return -TARGET_EFAULT;
7225         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7226         unlock_user(p, arg2, 0);
7227         return ret;
7228 #endif
7229 #ifdef TARGET_NR_chmod
7230     case TARGET_NR_chmod:
7231         if (!(p = lock_user_string(arg1)))
7232             return -TARGET_EFAULT;
7233         ret = get_errno(chmod(p, arg2));
7234         unlock_user(p, arg1, 0);
7235         return ret;
7236 #endif
7237 #ifdef TARGET_NR_lseek
7238     case TARGET_NR_lseek:
7239         return get_errno(lseek(arg1, arg2, arg3));
7240 #endif
7241 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7242     /* Alpha specific */
7243     case TARGET_NR_getxpid:
7244         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7245         return get_errno(getpid());
7246 #endif
7247 #ifdef TARGET_NR_getpid
7248     case TARGET_NR_getpid:
7249         return get_errno(getpid());
7250 #endif
7251     case TARGET_NR_mount:
7252         {
7253             /* need to look at the data field */
7254             void *p2, *p3;
7255 
7256             if (arg1) {
7257                 p = lock_user_string(arg1);
7258                 if (!p) {
7259                     return -TARGET_EFAULT;
7260                 }
7261             } else {
7262                 p = NULL;
7263             }
7264 
7265             p2 = lock_user_string(arg2);
7266             if (!p2) {
7267                 if (arg1) {
7268                     unlock_user(p, arg1, 0);
7269                 }
7270                 return -TARGET_EFAULT;
7271             }
7272 
7273             if (arg3) {
7274                 p3 = lock_user_string(arg3);
7275                 if (!p3) {
7276                     if (arg1) {
7277                         unlock_user(p, arg1, 0);
7278                     }
7279                     unlock_user(p2, arg2, 0);
7280                     return -TARGET_EFAULT;
7281                 }
7282             } else {
7283                 p3 = NULL;
7284             }
7285 
7286             /* FIXME - arg5 should be locked, but it isn't clear how to
7287              * do that since it's not guaranteed to be a NULL-terminated
7288              * string.
7289              */
7290             if (!arg5) {
7291                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7292             } else {
7293                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7294             }
7295             ret = get_errno(ret);
7296 
7297             if (arg1) {
7298                 unlock_user(p, arg1, 0);
7299             }
7300             unlock_user(p2, arg2, 0);
7301             if (arg3) {
7302                 unlock_user(p3, arg3, 0);
7303             }
7304         }
7305         return ret;
7306 #ifdef TARGET_NR_umount
7307     case TARGET_NR_umount:
7308         if (!(p = lock_user_string(arg1)))
7309             return -TARGET_EFAULT;
7310         ret = get_errno(umount(p));
7311         unlock_user(p, arg1, 0);
7312         return ret;
7313 #endif
7314 #ifdef TARGET_NR_stime /* not on alpha */
7315     case TARGET_NR_stime:
7316         {
7317             time_t host_time;
7318             if (get_user_sal(host_time, arg1))
7319                 return -TARGET_EFAULT;
7320             return get_errno(stime(&host_time));
7321         }
7322 #endif
7323 #ifdef TARGET_NR_alarm /* not on alpha */
7324     case TARGET_NR_alarm:
7325         return alarm(arg1);
7326 #endif
7327 #ifdef TARGET_NR_pause /* not on alpha */
7328     case TARGET_NR_pause:
7329         if (!block_signals()) {
7330             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7331         }
7332         return -TARGET_EINTR;
7333 #endif
7334 #ifdef TARGET_NR_utime
7335     case TARGET_NR_utime:
7336         {
7337             struct utimbuf tbuf, *host_tbuf;
7338             struct target_utimbuf *target_tbuf;
7339             if (arg2) {
7340                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7341                     return -TARGET_EFAULT;
7342                 tbuf.actime = tswapal(target_tbuf->actime);
7343                 tbuf.modtime = tswapal(target_tbuf->modtime);
7344                 unlock_user_struct(target_tbuf, arg2, 0);
7345                 host_tbuf = &tbuf;
7346             } else {
7347                 host_tbuf = NULL;
7348             }
7349             if (!(p = lock_user_string(arg1)))
7350                 return -TARGET_EFAULT;
7351             ret = get_errno(utime(p, host_tbuf));
7352             unlock_user(p, arg1, 0);
7353         }
7354         return ret;
7355 #endif
7356 #ifdef TARGET_NR_utimes
7357     case TARGET_NR_utimes:
7358         {
7359             struct timeval *tvp, tv[2];
7360             if (arg2) {
7361                 if (copy_from_user_timeval(&tv[0], arg2)
7362                     || copy_from_user_timeval(&tv[1],
7363                                               arg2 + sizeof(struct target_timeval)))
7364                     return -TARGET_EFAULT;
7365                 tvp = tv;
7366             } else {
7367                 tvp = NULL;
7368             }
7369             if (!(p = lock_user_string(arg1)))
7370                 return -TARGET_EFAULT;
7371             ret = get_errno(utimes(p, tvp));
7372             unlock_user(p, arg1, 0);
7373         }
7374         return ret;
7375 #endif
7376 #if defined(TARGET_NR_futimesat)
7377     case TARGET_NR_futimesat:
7378         {
7379             struct timeval *tvp, tv[2];
7380             if (arg3) {
7381                 if (copy_from_user_timeval(&tv[0], arg3)
7382                     || copy_from_user_timeval(&tv[1],
7383                                               arg3 + sizeof(struct target_timeval)))
7384                     return -TARGET_EFAULT;
7385                 tvp = tv;
7386             } else {
7387                 tvp = NULL;
7388             }
7389             if (!(p = lock_user_string(arg2))) {
7390                 return -TARGET_EFAULT;
7391             }
7392             ret = get_errno(futimesat(arg1, path(p), tvp));
7393             unlock_user(p, arg2, 0);
7394         }
7395         return ret;
7396 #endif
7397 #ifdef TARGET_NR_access
7398     case TARGET_NR_access:
7399         if (!(p = lock_user_string(arg1))) {
7400             return -TARGET_EFAULT;
7401         }
7402         ret = get_errno(access(path(p), arg2));
7403         unlock_user(p, arg1, 0);
7404         return ret;
7405 #endif
7406 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7407     case TARGET_NR_faccessat:
7408         if (!(p = lock_user_string(arg2))) {
7409             return -TARGET_EFAULT;
7410         }
7411         ret = get_errno(faccessat(arg1, p, arg3, 0));
7412         unlock_user(p, arg2, 0);
7413         return ret;
7414 #endif
7415 #ifdef TARGET_NR_nice /* not on alpha */
7416     case TARGET_NR_nice:
7417         return get_errno(nice(arg1));
7418 #endif
7419     case TARGET_NR_sync:
7420         sync();
7421         return 0;
7422 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7423     case TARGET_NR_syncfs:
7424         return get_errno(syncfs(arg1));
7425 #endif
7426     case TARGET_NR_kill:
7427         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7428 #ifdef TARGET_NR_rename
7429     case TARGET_NR_rename:
7430         {
7431             void *p2;
7432             p = lock_user_string(arg1);
7433             p2 = lock_user_string(arg2);
7434             if (!p || !p2)
7435                 ret = -TARGET_EFAULT;
7436             else
7437                 ret = get_errno(rename(p, p2));
7438             unlock_user(p2, arg2, 0);
7439             unlock_user(p, arg1, 0);
7440         }
7441         return ret;
7442 #endif
7443 #if defined(TARGET_NR_renameat)
7444     case TARGET_NR_renameat:
7445         {
7446             void *p2;
7447             p  = lock_user_string(arg2);
7448             p2 = lock_user_string(arg4);
7449             if (!p || !p2)
7450                 ret = -TARGET_EFAULT;
7451             else
7452                 ret = get_errno(renameat(arg1, p, arg3, p2));
7453             unlock_user(p2, arg4, 0);
7454             unlock_user(p, arg2, 0);
7455         }
7456         return ret;
7457 #endif
7458 #if defined(TARGET_NR_renameat2)
7459     case TARGET_NR_renameat2:
7460         {
7461             void *p2;
7462             p  = lock_user_string(arg2);
7463             p2 = lock_user_string(arg4);
7464             if (!p || !p2) {
7465                 ret = -TARGET_EFAULT;
7466             } else {
7467                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7468             }
7469             unlock_user(p2, arg4, 0);
7470             unlock_user(p, arg2, 0);
7471         }
7472         return ret;
7473 #endif
7474 #ifdef TARGET_NR_mkdir
7475     case TARGET_NR_mkdir:
7476         if (!(p = lock_user_string(arg1)))
7477             return -TARGET_EFAULT;
7478         ret = get_errno(mkdir(p, arg2));
7479         unlock_user(p, arg1, 0);
7480         return ret;
7481 #endif
7482 #if defined(TARGET_NR_mkdirat)
7483     case TARGET_NR_mkdirat:
7484         if (!(p = lock_user_string(arg2)))
7485             return -TARGET_EFAULT;
7486         ret = get_errno(mkdirat(arg1, p, arg3));
7487         unlock_user(p, arg2, 0);
7488         return ret;
7489 #endif
7490 #ifdef TARGET_NR_rmdir
7491     case TARGET_NR_rmdir:
7492         if (!(p = lock_user_string(arg1)))
7493             return -TARGET_EFAULT;
7494         ret = get_errno(rmdir(p));
7495         unlock_user(p, arg1, 0);
7496         return ret;
7497 #endif
7498     case TARGET_NR_dup:
7499         ret = get_errno(dup(arg1));
7500         if (ret >= 0) {
7501             fd_trans_dup(arg1, ret);
7502         }
7503         return ret;
7504 #ifdef TARGET_NR_pipe
7505     case TARGET_NR_pipe:
7506         return do_pipe(cpu_env, arg1, 0, 0);
7507 #endif
7508 #ifdef TARGET_NR_pipe2
7509     case TARGET_NR_pipe2:
7510         return do_pipe(cpu_env, arg1,
7511                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7512 #endif
7513     case TARGET_NR_times:
7514         {
7515             struct target_tms *tmsp;
7516             struct tms tms;
7517             ret = get_errno(times(&tms));
7518             if (arg1) {
7519                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7520                 if (!tmsp)
7521                     return -TARGET_EFAULT;
7522                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7523                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7524                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7525                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7526             }
7527             if (!is_error(ret))
7528                 ret = host_to_target_clock_t(ret);
7529         }
7530         return ret;
7531     case TARGET_NR_acct:
7532         if (arg1 == 0) {
7533             ret = get_errno(acct(NULL));
7534         } else {
7535             if (!(p = lock_user_string(arg1))) {
7536                 return -TARGET_EFAULT;
7537             }
7538             ret = get_errno(acct(path(p)));
7539             unlock_user(p, arg1, 0);
7540         }
7541         return ret;
7542 #ifdef TARGET_NR_umount2
7543     case TARGET_NR_umount2:
7544         if (!(p = lock_user_string(arg1)))
7545             return -TARGET_EFAULT;
7546         ret = get_errno(umount2(p, arg2));
7547         unlock_user(p, arg1, 0);
7548         return ret;
7549 #endif
7550     case TARGET_NR_ioctl:
7551         return do_ioctl(arg1, arg2, arg3);
7552 #ifdef TARGET_NR_fcntl
7553     case TARGET_NR_fcntl:
7554         return do_fcntl(arg1, arg2, arg3);
7555 #endif
7556     case TARGET_NR_setpgid:
7557         return get_errno(setpgid(arg1, arg2));
7558     case TARGET_NR_umask:
7559         return get_errno(umask(arg1));
7560     case TARGET_NR_chroot:
7561         if (!(p = lock_user_string(arg1)))
7562             return -TARGET_EFAULT;
7563         ret = get_errno(chroot(p));
7564         unlock_user(p, arg1, 0);
7565         return ret;
7566 #ifdef TARGET_NR_dup2
7567     case TARGET_NR_dup2:
7568         ret = get_errno(dup2(arg1, arg2));
7569         if (ret >= 0) {
7570             fd_trans_dup(arg1, arg2);
7571         }
7572         return ret;
7573 #endif
7574 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7575     case TARGET_NR_dup3:
7576     {
7577         int host_flags;
7578 
7579         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7580             return -EINVAL;
7581         }
7582         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7583         ret = get_errno(dup3(arg1, arg2, host_flags));
7584         if (ret >= 0) {
7585             fd_trans_dup(arg1, arg2);
7586         }
7587         return ret;
7588     }
7589 #endif
7590 #ifdef TARGET_NR_getppid /* not on alpha */
7591     case TARGET_NR_getppid:
7592         return get_errno(getppid());
7593 #endif
7594 #ifdef TARGET_NR_getpgrp
7595     case TARGET_NR_getpgrp:
7596         return get_errno(getpgrp());
7597 #endif
7598     case TARGET_NR_setsid:
7599         return get_errno(setsid());
7600 #ifdef TARGET_NR_sigaction
7601     case TARGET_NR_sigaction:
7602         {
7603 #if defined(TARGET_ALPHA)
7604             struct target_sigaction act, oact, *pact = 0;
7605             struct target_old_sigaction *old_act;
7606             if (arg2) {
7607                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7608                     return -TARGET_EFAULT;
7609                 act._sa_handler = old_act->_sa_handler;
7610                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7611                 act.sa_flags = old_act->sa_flags;
7612                 act.sa_restorer = 0;
7613                 unlock_user_struct(old_act, arg2, 0);
7614                 pact = &act;
7615             }
7616             ret = get_errno(do_sigaction(arg1, pact, &oact));
7617             if (!is_error(ret) && arg3) {
7618                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7619                     return -TARGET_EFAULT;
7620                 old_act->_sa_handler = oact._sa_handler;
7621                 old_act->sa_mask = oact.sa_mask.sig[0];
7622                 old_act->sa_flags = oact.sa_flags;
7623                 unlock_user_struct(old_act, arg3, 1);
7624             }
7625 #elif defined(TARGET_MIPS)
7626 	    struct target_sigaction act, oact, *pact, *old_act;
7627 
7628 	    if (arg2) {
7629                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7630                     return -TARGET_EFAULT;
7631 		act._sa_handler = old_act->_sa_handler;
7632 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7633 		act.sa_flags = old_act->sa_flags;
7634 		unlock_user_struct(old_act, arg2, 0);
7635 		pact = &act;
7636 	    } else {
7637 		pact = NULL;
7638 	    }
7639 
7640 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7641 
7642 	    if (!is_error(ret) && arg3) {
7643                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7644                     return -TARGET_EFAULT;
7645 		old_act->_sa_handler = oact._sa_handler;
7646 		old_act->sa_flags = oact.sa_flags;
7647 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7648 		old_act->sa_mask.sig[1] = 0;
7649 		old_act->sa_mask.sig[2] = 0;
7650 		old_act->sa_mask.sig[3] = 0;
7651 		unlock_user_struct(old_act, arg3, 1);
7652 	    }
7653 #else
7654             struct target_old_sigaction *old_act;
7655             struct target_sigaction act, oact, *pact;
7656             if (arg2) {
7657                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7658                     return -TARGET_EFAULT;
7659                 act._sa_handler = old_act->_sa_handler;
7660                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7661                 act.sa_flags = old_act->sa_flags;
7662                 act.sa_restorer = old_act->sa_restorer;
7663 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7664                 act.ka_restorer = 0;
7665 #endif
7666                 unlock_user_struct(old_act, arg2, 0);
7667                 pact = &act;
7668             } else {
7669                 pact = NULL;
7670             }
7671             ret = get_errno(do_sigaction(arg1, pact, &oact));
7672             if (!is_error(ret) && arg3) {
7673                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7674                     return -TARGET_EFAULT;
7675                 old_act->_sa_handler = oact._sa_handler;
7676                 old_act->sa_mask = oact.sa_mask.sig[0];
7677                 old_act->sa_flags = oact.sa_flags;
7678                 old_act->sa_restorer = oact.sa_restorer;
7679                 unlock_user_struct(old_act, arg3, 1);
7680             }
7681 #endif
7682         }
7683         return ret;
7684 #endif
7685     case TARGET_NR_rt_sigaction:
7686         {
7687 #if defined(TARGET_ALPHA)
7688             /* For Alpha and SPARC this is a 5 argument syscall, with
7689              * a 'restorer' parameter which must be copied into the
7690              * sa_restorer field of the sigaction struct.
7691              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7692              * and arg5 is the sigsetsize.
7693              * Alpha also has a separate rt_sigaction struct that it uses
7694              * here; SPARC uses the usual sigaction struct.
7695              */
7696             struct target_rt_sigaction *rt_act;
7697             struct target_sigaction act, oact, *pact = 0;
7698 
7699             if (arg4 != sizeof(target_sigset_t)) {
7700                 return -TARGET_EINVAL;
7701             }
7702             if (arg2) {
7703                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7704                     return -TARGET_EFAULT;
7705                 act._sa_handler = rt_act->_sa_handler;
7706                 act.sa_mask = rt_act->sa_mask;
7707                 act.sa_flags = rt_act->sa_flags;
7708                 act.sa_restorer = arg5;
7709                 unlock_user_struct(rt_act, arg2, 0);
7710                 pact = &act;
7711             }
7712             ret = get_errno(do_sigaction(arg1, pact, &oact));
7713             if (!is_error(ret) && arg3) {
7714                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7715                     return -TARGET_EFAULT;
7716                 rt_act->_sa_handler = oact._sa_handler;
7717                 rt_act->sa_mask = oact.sa_mask;
7718                 rt_act->sa_flags = oact.sa_flags;
7719                 unlock_user_struct(rt_act, arg3, 1);
7720             }
7721 #else
7722 #ifdef TARGET_SPARC
7723             target_ulong restorer = arg4;
7724             target_ulong sigsetsize = arg5;
7725 #else
7726             target_ulong sigsetsize = arg4;
7727 #endif
7728             struct target_sigaction *act;
7729             struct target_sigaction *oact;
7730 
7731             if (sigsetsize != sizeof(target_sigset_t)) {
7732                 return -TARGET_EINVAL;
7733             }
7734             if (arg2) {
7735                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7736                     return -TARGET_EFAULT;
7737                 }
7738 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7739                 act->ka_restorer = restorer;
7740 #endif
7741             } else {
7742                 act = NULL;
7743             }
7744             if (arg3) {
7745                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7746                     ret = -TARGET_EFAULT;
7747                     goto rt_sigaction_fail;
7748                 }
7749             } else
7750                 oact = NULL;
7751             ret = get_errno(do_sigaction(arg1, act, oact));
7752 	rt_sigaction_fail:
7753             if (act)
7754                 unlock_user_struct(act, arg2, 0);
7755             if (oact)
7756                 unlock_user_struct(oact, arg3, 1);
7757 #endif
7758         }
7759         return ret;
7760 #ifdef TARGET_NR_sgetmask /* not on alpha */
7761     case TARGET_NR_sgetmask:
7762         {
7763             sigset_t cur_set;
7764             abi_ulong target_set;
7765             ret = do_sigprocmask(0, NULL, &cur_set);
7766             if (!ret) {
7767                 host_to_target_old_sigset(&target_set, &cur_set);
7768                 ret = target_set;
7769             }
7770         }
7771         return ret;
7772 #endif
7773 #ifdef TARGET_NR_ssetmask /* not on alpha */
7774     case TARGET_NR_ssetmask:
7775         {
7776             sigset_t set, oset;
7777             abi_ulong target_set = arg1;
7778             target_to_host_old_sigset(&set, &target_set);
7779             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7780             if (!ret) {
7781                 host_to_target_old_sigset(&target_set, &oset);
7782                 ret = target_set;
7783             }
7784         }
7785         return ret;
7786 #endif
7787 #ifdef TARGET_NR_sigprocmask
7788     case TARGET_NR_sigprocmask:
7789         {
7790 #if defined(TARGET_ALPHA)
7791             sigset_t set, oldset;
7792             abi_ulong mask;
7793             int how;
7794 
7795             switch (arg1) {
7796             case TARGET_SIG_BLOCK:
7797                 how = SIG_BLOCK;
7798                 break;
7799             case TARGET_SIG_UNBLOCK:
7800                 how = SIG_UNBLOCK;
7801                 break;
7802             case TARGET_SIG_SETMASK:
7803                 how = SIG_SETMASK;
7804                 break;
7805             default:
7806                 return -TARGET_EINVAL;
7807             }
7808             mask = arg2;
7809             target_to_host_old_sigset(&set, &mask);
7810 
7811             ret = do_sigprocmask(how, &set, &oldset);
7812             if (!is_error(ret)) {
7813                 host_to_target_old_sigset(&mask, &oldset);
7814                 ret = mask;
7815                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7816             }
7817 #else
7818             sigset_t set, oldset, *set_ptr;
7819             int how;
7820 
7821             if (arg2) {
7822                 switch (arg1) {
7823                 case TARGET_SIG_BLOCK:
7824                     how = SIG_BLOCK;
7825                     break;
7826                 case TARGET_SIG_UNBLOCK:
7827                     how = SIG_UNBLOCK;
7828                     break;
7829                 case TARGET_SIG_SETMASK:
7830                     how = SIG_SETMASK;
7831                     break;
7832                 default:
7833                     return -TARGET_EINVAL;
7834                 }
7835                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7836                     return -TARGET_EFAULT;
7837                 target_to_host_old_sigset(&set, p);
7838                 unlock_user(p, arg2, 0);
7839                 set_ptr = &set;
7840             } else {
7841                 how = 0;
7842                 set_ptr = NULL;
7843             }
7844             ret = do_sigprocmask(how, set_ptr, &oldset);
7845             if (!is_error(ret) && arg3) {
7846                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7847                     return -TARGET_EFAULT;
7848                 host_to_target_old_sigset(p, &oldset);
7849                 unlock_user(p, arg3, sizeof(target_sigset_t));
7850             }
7851 #endif
7852         }
7853         return ret;
7854 #endif
7855     case TARGET_NR_rt_sigprocmask:
7856         {
7857             int how = arg1;
7858             sigset_t set, oldset, *set_ptr;
7859 
7860             if (arg4 != sizeof(target_sigset_t)) {
7861                 return -TARGET_EINVAL;
7862             }
7863 
7864             if (arg2) {
7865                 switch(how) {
7866                 case TARGET_SIG_BLOCK:
7867                     how = SIG_BLOCK;
7868                     break;
7869                 case TARGET_SIG_UNBLOCK:
7870                     how = SIG_UNBLOCK;
7871                     break;
7872                 case TARGET_SIG_SETMASK:
7873                     how = SIG_SETMASK;
7874                     break;
7875                 default:
7876                     return -TARGET_EINVAL;
7877                 }
7878                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7879                     return -TARGET_EFAULT;
7880                 target_to_host_sigset(&set, p);
7881                 unlock_user(p, arg2, 0);
7882                 set_ptr = &set;
7883             } else {
7884                 how = 0;
7885                 set_ptr = NULL;
7886             }
7887             ret = do_sigprocmask(how, set_ptr, &oldset);
7888             if (!is_error(ret) && arg3) {
7889                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7890                     return -TARGET_EFAULT;
7891                 host_to_target_sigset(p, &oldset);
7892                 unlock_user(p, arg3, sizeof(target_sigset_t));
7893             }
7894         }
7895         return ret;
7896 #ifdef TARGET_NR_sigpending
7897     case TARGET_NR_sigpending:
7898         {
7899             sigset_t set;
7900             ret = get_errno(sigpending(&set));
7901             if (!is_error(ret)) {
7902                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7903                     return -TARGET_EFAULT;
7904                 host_to_target_old_sigset(p, &set);
7905                 unlock_user(p, arg1, sizeof(target_sigset_t));
7906             }
7907         }
7908         return ret;
7909 #endif
7910     case TARGET_NR_rt_sigpending:
7911         {
7912             sigset_t set;
7913 
7914             /* Yes, this check is >, not != like most. We follow the kernel's
7915              * logic and it does it like this because it implements
7916              * NR_sigpending through the same code path, and in that case
7917              * the old_sigset_t is smaller in size.
7918              */
7919             if (arg2 > sizeof(target_sigset_t)) {
7920                 return -TARGET_EINVAL;
7921             }
7922 
7923             ret = get_errno(sigpending(&set));
7924             if (!is_error(ret)) {
7925                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7926                     return -TARGET_EFAULT;
7927                 host_to_target_sigset(p, &set);
7928                 unlock_user(p, arg1, sizeof(target_sigset_t));
7929             }
7930         }
7931         return ret;
7932 #ifdef TARGET_NR_sigsuspend
7933     case TARGET_NR_sigsuspend:
7934         {
7935             TaskState *ts = cpu->opaque;
7936 #if defined(TARGET_ALPHA)
7937             abi_ulong mask = arg1;
7938             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7939 #else
7940             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7941                 return -TARGET_EFAULT;
7942             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7943             unlock_user(p, arg1, 0);
7944 #endif
7945             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7946                                                SIGSET_T_SIZE));
7947             if (ret != -TARGET_ERESTARTSYS) {
7948                 ts->in_sigsuspend = 1;
7949             }
7950         }
7951         return ret;
7952 #endif
7953     case TARGET_NR_rt_sigsuspend:
7954         {
7955             TaskState *ts = cpu->opaque;
7956 
7957             if (arg2 != sizeof(target_sigset_t)) {
7958                 return -TARGET_EINVAL;
7959             }
7960             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7961                 return -TARGET_EFAULT;
7962             target_to_host_sigset(&ts->sigsuspend_mask, p);
7963             unlock_user(p, arg1, 0);
7964             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7965                                                SIGSET_T_SIZE));
7966             if (ret != -TARGET_ERESTARTSYS) {
7967                 ts->in_sigsuspend = 1;
7968             }
7969         }
7970         return ret;
7971     case TARGET_NR_rt_sigtimedwait:
7972         {
7973             sigset_t set;
7974             struct timespec uts, *puts;
7975             siginfo_t uinfo;
7976 
7977             if (arg4 != sizeof(target_sigset_t)) {
7978                 return -TARGET_EINVAL;
7979             }
7980 
7981             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7982                 return -TARGET_EFAULT;
7983             target_to_host_sigset(&set, p);
7984             unlock_user(p, arg1, 0);
7985             if (arg3) {
7986                 puts = &uts;
7987                 target_to_host_timespec(puts, arg3);
7988             } else {
7989                 puts = NULL;
7990             }
7991             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7992                                                  SIGSET_T_SIZE));
7993             if (!is_error(ret)) {
7994                 if (arg2) {
7995                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7996                                   0);
7997                     if (!p) {
7998                         return -TARGET_EFAULT;
7999                     }
8000                     host_to_target_siginfo(p, &uinfo);
8001                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8002                 }
8003                 ret = host_to_target_signal(ret);
8004             }
8005         }
8006         return ret;
8007     case TARGET_NR_rt_sigqueueinfo:
8008         {
8009             siginfo_t uinfo;
8010 
8011             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8012             if (!p) {
8013                 return -TARGET_EFAULT;
8014             }
8015             target_to_host_siginfo(&uinfo, p);
8016             unlock_user(p, arg3, 0);
8017             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8018         }
8019         return ret;
8020     case TARGET_NR_rt_tgsigqueueinfo:
8021         {
8022             siginfo_t uinfo;
8023 
8024             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8025             if (!p) {
8026                 return -TARGET_EFAULT;
8027             }
8028             target_to_host_siginfo(&uinfo, p);
8029             unlock_user(p, arg4, 0);
8030             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8031         }
8032         return ret;
8033 #ifdef TARGET_NR_sigreturn
8034     case TARGET_NR_sigreturn:
8035         if (block_signals()) {
8036             return -TARGET_ERESTARTSYS;
8037         }
8038         return do_sigreturn(cpu_env);
8039 #endif
8040     case TARGET_NR_rt_sigreturn:
8041         if (block_signals()) {
8042             return -TARGET_ERESTARTSYS;
8043         }
8044         return do_rt_sigreturn(cpu_env);
8045     case TARGET_NR_sethostname:
8046         if (!(p = lock_user_string(arg1)))
8047             return -TARGET_EFAULT;
8048         ret = get_errno(sethostname(p, arg2));
8049         unlock_user(p, arg1, 0);
8050         return ret;
8051 #ifdef TARGET_NR_setrlimit
8052     case TARGET_NR_setrlimit:
8053         {
8054             int resource = target_to_host_resource(arg1);
8055             struct target_rlimit *target_rlim;
8056             struct rlimit rlim;
8057             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8058                 return -TARGET_EFAULT;
8059             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8060             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8061             unlock_user_struct(target_rlim, arg2, 0);
8062             /*
8063              * If we just passed through resource limit settings for memory then
8064              * they would also apply to QEMU's own allocations, and QEMU will
8065              * crash or hang or die if its allocations fail. Ideally we would
8066              * track the guest allocations in QEMU and apply the limits ourselves.
8067              * For now, just tell the guest the call succeeded but don't actually
8068              * limit anything.
8069              */
8070             if (resource != RLIMIT_AS &&
8071                 resource != RLIMIT_DATA &&
8072                 resource != RLIMIT_STACK) {
8073                 return get_errno(setrlimit(resource, &rlim));
8074             } else {
8075                 return 0;
8076             }
8077         }
8078 #endif
8079 #ifdef TARGET_NR_getrlimit
8080     case TARGET_NR_getrlimit:
8081         {
8082             int resource = target_to_host_resource(arg1);
8083             struct target_rlimit *target_rlim;
8084             struct rlimit rlim;
8085 
8086             ret = get_errno(getrlimit(resource, &rlim));
8087             if (!is_error(ret)) {
8088                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8089                     return -TARGET_EFAULT;
8090                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8091                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8092                 unlock_user_struct(target_rlim, arg2, 1);
8093             }
8094         }
8095         return ret;
8096 #endif
8097     case TARGET_NR_getrusage:
8098         {
8099             struct rusage rusage;
8100             ret = get_errno(getrusage(arg1, &rusage));
8101             if (!is_error(ret)) {
8102                 ret = host_to_target_rusage(arg2, &rusage);
8103             }
8104         }
8105         return ret;
8106     case TARGET_NR_gettimeofday:
8107         {
8108             struct timeval tv;
8109             ret = get_errno(gettimeofday(&tv, NULL));
8110             if (!is_error(ret)) {
8111                 if (copy_to_user_timeval(arg1, &tv))
8112                     return -TARGET_EFAULT;
8113             }
8114         }
8115         return ret;
8116     case TARGET_NR_settimeofday:
8117         {
8118             struct timeval tv, *ptv = NULL;
8119             struct timezone tz, *ptz = NULL;
8120 
8121             if (arg1) {
8122                 if (copy_from_user_timeval(&tv, arg1)) {
8123                     return -TARGET_EFAULT;
8124                 }
8125                 ptv = &tv;
8126             }
8127 
8128             if (arg2) {
8129                 if (copy_from_user_timezone(&tz, arg2)) {
8130                     return -TARGET_EFAULT;
8131                 }
8132                 ptz = &tz;
8133             }
8134 
8135             return get_errno(settimeofday(ptv, ptz));
8136         }
8137 #if defined(TARGET_NR_select)
8138     case TARGET_NR_select:
8139 #if defined(TARGET_WANT_NI_OLD_SELECT)
8140         /* some architectures used to have old_select here
8141          * but now ENOSYS it.
8142          */
8143         ret = -TARGET_ENOSYS;
8144 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8145         ret = do_old_select(arg1);
8146 #else
8147         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8148 #endif
8149         return ret;
8150 #endif
8151 #ifdef TARGET_NR_pselect6
8152     case TARGET_NR_pselect6:
8153         {
8154             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8155             fd_set rfds, wfds, efds;
8156             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8157             struct timespec ts, *ts_ptr;
8158 
8159             /*
8160              * The 6th arg is actually two args smashed together,
8161              * so we cannot use the C library.
8162              */
8163             sigset_t set;
8164             struct {
8165                 sigset_t *set;
8166                 size_t size;
8167             } sig, *sig_ptr;
8168 
8169             abi_ulong arg_sigset, arg_sigsize, *arg7;
8170             target_sigset_t *target_sigset;
8171 
8172             n = arg1;
8173             rfd_addr = arg2;
8174             wfd_addr = arg3;
8175             efd_addr = arg4;
8176             ts_addr = arg5;
8177 
8178             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8179             if (ret) {
8180                 return ret;
8181             }
8182             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8183             if (ret) {
8184                 return ret;
8185             }
8186             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8187             if (ret) {
8188                 return ret;
8189             }
8190 
8191             /*
8192              * This takes a timespec, and not a timeval, so we cannot
8193              * use the do_select() helper ...
8194              */
8195             if (ts_addr) {
8196                 if (target_to_host_timespec(&ts, ts_addr)) {
8197                     return -TARGET_EFAULT;
8198                 }
8199                 ts_ptr = &ts;
8200             } else {
8201                 ts_ptr = NULL;
8202             }
8203 
8204             /* Extract the two packed args for the sigset */
8205             if (arg6) {
8206                 sig_ptr = &sig;
8207                 sig.size = SIGSET_T_SIZE;
8208 
8209                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8210                 if (!arg7) {
8211                     return -TARGET_EFAULT;
8212                 }
8213                 arg_sigset = tswapal(arg7[0]);
8214                 arg_sigsize = tswapal(arg7[1]);
8215                 unlock_user(arg7, arg6, 0);
8216 
8217                 if (arg_sigset) {
8218                     sig.set = &set;
8219                     if (arg_sigsize != sizeof(*target_sigset)) {
8220                         /* Like the kernel, we enforce correct size sigsets */
8221                         return -TARGET_EINVAL;
8222                     }
8223                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8224                                               sizeof(*target_sigset), 1);
8225                     if (!target_sigset) {
8226                         return -TARGET_EFAULT;
8227                     }
8228                     target_to_host_sigset(&set, target_sigset);
8229                     unlock_user(target_sigset, arg_sigset, 0);
8230                 } else {
8231                     sig.set = NULL;
8232                 }
8233             } else {
8234                 sig_ptr = NULL;
8235             }
8236 
8237             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8238                                           ts_ptr, sig_ptr));
8239 
8240             if (!is_error(ret)) {
8241                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8242                     return -TARGET_EFAULT;
8243                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8244                     return -TARGET_EFAULT;
8245                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8246                     return -TARGET_EFAULT;
8247 
8248                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8249                     return -TARGET_EFAULT;
8250             }
8251         }
8252         return ret;
8253 #endif
8254 #ifdef TARGET_NR_symlink
8255     case TARGET_NR_symlink:
8256         {
8257             void *p2;
8258             p = lock_user_string(arg1);
8259             p2 = lock_user_string(arg2);
8260             if (!p || !p2)
8261                 ret = -TARGET_EFAULT;
8262             else
8263                 ret = get_errno(symlink(p, p2));
8264             unlock_user(p2, arg2, 0);
8265             unlock_user(p, arg1, 0);
8266         }
8267         return ret;
8268 #endif
8269 #if defined(TARGET_NR_symlinkat)
8270     case TARGET_NR_symlinkat:
8271         {
8272             void *p2;
8273             p  = lock_user_string(arg1);
8274             p2 = lock_user_string(arg3);
8275             if (!p || !p2)
8276                 ret = -TARGET_EFAULT;
8277             else
8278                 ret = get_errno(symlinkat(p, arg2, p2));
8279             unlock_user(p2, arg3, 0);
8280             unlock_user(p, arg1, 0);
8281         }
8282         return ret;
8283 #endif
8284 #ifdef TARGET_NR_readlink
8285     case TARGET_NR_readlink:
8286         {
8287             void *p2;
8288             p = lock_user_string(arg1);
8289             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8290             if (!p || !p2) {
8291                 ret = -TARGET_EFAULT;
8292             } else if (!arg3) {
8293                 /* Short circuit this for the magic exe check. */
8294                 ret = -TARGET_EINVAL;
8295             } else if (is_proc_myself((const char *)p, "exe")) {
8296                 char real[PATH_MAX], *temp;
8297                 temp = realpath(exec_path, real);
8298                 /* Return value is # of bytes that we wrote to the buffer. */
8299                 if (temp == NULL) {
8300                     ret = get_errno(-1);
8301                 } else {
8302                     /* Don't worry about sign mismatch as earlier mapping
8303                      * logic would have thrown a bad address error. */
8304                     ret = MIN(strlen(real), arg3);
8305                     /* We cannot NUL terminate the string. */
8306                     memcpy(p2, real, ret);
8307                 }
8308             } else {
8309                 ret = get_errno(readlink(path(p), p2, arg3));
8310             }
8311             unlock_user(p2, arg2, ret);
8312             unlock_user(p, arg1, 0);
8313         }
8314         return ret;
8315 #endif
8316 #if defined(TARGET_NR_readlinkat)
8317     case TARGET_NR_readlinkat:
8318         {
8319             void *p2;
8320             p  = lock_user_string(arg2);
8321             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8322             if (!p || !p2) {
8323                 ret = -TARGET_EFAULT;
8324             } else if (is_proc_myself((const char *)p, "exe")) {
8325                 char real[PATH_MAX], *temp;
8326                 temp = realpath(exec_path, real);
8327                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8328                 snprintf((char *)p2, arg4, "%s", real);
8329             } else {
8330                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8331             }
8332             unlock_user(p2, arg3, ret);
8333             unlock_user(p, arg2, 0);
8334         }
8335         return ret;
8336 #endif
8337 #ifdef TARGET_NR_swapon
8338     case TARGET_NR_swapon:
8339         if (!(p = lock_user_string(arg1)))
8340             return -TARGET_EFAULT;
8341         ret = get_errno(swapon(p, arg2));
8342         unlock_user(p, arg1, 0);
8343         return ret;
8344 #endif
8345     case TARGET_NR_reboot:
8346         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8347            /* arg4 must be ignored in all other cases */
8348            p = lock_user_string(arg4);
8349            if (!p) {
8350                return -TARGET_EFAULT;
8351            }
8352            ret = get_errno(reboot(arg1, arg2, arg3, p));
8353            unlock_user(p, arg4, 0);
8354         } else {
8355            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8356         }
8357         return ret;
8358 #ifdef TARGET_NR_mmap
8359     case TARGET_NR_mmap:
8360 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8361     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8362     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8363     || defined(TARGET_S390X)
8364         {
8365             abi_ulong *v;
8366             abi_ulong v1, v2, v3, v4, v5, v6;
8367             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8368                 return -TARGET_EFAULT;
8369             v1 = tswapal(v[0]);
8370             v2 = tswapal(v[1]);
8371             v3 = tswapal(v[2]);
8372             v4 = tswapal(v[3]);
8373             v5 = tswapal(v[4]);
8374             v6 = tswapal(v[5]);
8375             unlock_user(v, arg1, 0);
8376             ret = get_errno(target_mmap(v1, v2, v3,
8377                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8378                                         v5, v6));
8379         }
8380 #else
8381         ret = get_errno(target_mmap(arg1, arg2, arg3,
8382                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8383                                     arg5,
8384                                     arg6));
8385 #endif
8386         return ret;
8387 #endif
8388 #ifdef TARGET_NR_mmap2
8389     case TARGET_NR_mmap2:
8390 #ifndef MMAP_SHIFT
8391 #define MMAP_SHIFT 12
8392 #endif
8393         ret = target_mmap(arg1, arg2, arg3,
8394                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8395                           arg5, arg6 << MMAP_SHIFT);
8396         return get_errno(ret);
8397 #endif
8398     case TARGET_NR_munmap:
8399         return get_errno(target_munmap(arg1, arg2));
8400     case TARGET_NR_mprotect:
8401         {
8402             TaskState *ts = cpu->opaque;
8403             /* Special hack to detect libc making the stack executable.  */
8404             if ((arg3 & PROT_GROWSDOWN)
8405                 && arg1 >= ts->info->stack_limit
8406                 && arg1 <= ts->info->start_stack) {
8407                 arg3 &= ~PROT_GROWSDOWN;
8408                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8409                 arg1 = ts->info->stack_limit;
8410             }
8411         }
8412         return get_errno(target_mprotect(arg1, arg2, arg3));
8413 #ifdef TARGET_NR_mremap
8414     case TARGET_NR_mremap:
8415         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8416 #endif
8417         /* ??? msync/mlock/munlock are broken for softmmu.  */
8418 #ifdef TARGET_NR_msync
8419     case TARGET_NR_msync:
8420         return get_errno(msync(g2h(arg1), arg2, arg3));
8421 #endif
8422 #ifdef TARGET_NR_mlock
8423     case TARGET_NR_mlock:
8424         return get_errno(mlock(g2h(arg1), arg2));
8425 #endif
8426 #ifdef TARGET_NR_munlock
8427     case TARGET_NR_munlock:
8428         return get_errno(munlock(g2h(arg1), arg2));
8429 #endif
8430 #ifdef TARGET_NR_mlockall
8431     case TARGET_NR_mlockall:
8432         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8433 #endif
8434 #ifdef TARGET_NR_munlockall
8435     case TARGET_NR_munlockall:
8436         return get_errno(munlockall());
8437 #endif
8438 #ifdef TARGET_NR_truncate
8439     case TARGET_NR_truncate:
8440         if (!(p = lock_user_string(arg1)))
8441             return -TARGET_EFAULT;
8442         ret = get_errno(truncate(p, arg2));
8443         unlock_user(p, arg1, 0);
8444         return ret;
8445 #endif
8446 #ifdef TARGET_NR_ftruncate
8447     case TARGET_NR_ftruncate:
8448         return get_errno(ftruncate(arg1, arg2));
8449 #endif
8450     case TARGET_NR_fchmod:
8451         return get_errno(fchmod(arg1, arg2));
8452 #if defined(TARGET_NR_fchmodat)
8453     case TARGET_NR_fchmodat:
8454         if (!(p = lock_user_string(arg2)))
8455             return -TARGET_EFAULT;
8456         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8457         unlock_user(p, arg2, 0);
8458         return ret;
8459 #endif
8460     case TARGET_NR_getpriority:
8461         /* Note that negative values are valid for getpriority, so we must
8462            differentiate based on errno settings.  */
8463         errno = 0;
8464         ret = getpriority(arg1, arg2);
8465         if (ret == -1 && errno != 0) {
8466             return -host_to_target_errno(errno);
8467         }
8468 #ifdef TARGET_ALPHA
8469         /* Return value is the unbiased priority.  Signal no error.  */
8470         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8471 #else
8472         /* Return value is a biased priority to avoid negative numbers.  */
8473         ret = 20 - ret;
8474 #endif
8475         return ret;
8476     case TARGET_NR_setpriority:
8477         return get_errno(setpriority(arg1, arg2, arg3));
8478 #ifdef TARGET_NR_statfs
8479     case TARGET_NR_statfs:
8480         if (!(p = lock_user_string(arg1))) {
8481             return -TARGET_EFAULT;
8482         }
8483         ret = get_errno(statfs(path(p), &stfs));
8484         unlock_user(p, arg1, 0);
8485     convert_statfs:
8486         if (!is_error(ret)) {
8487             struct target_statfs *target_stfs;
8488 
8489             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8490                 return -TARGET_EFAULT;
8491             __put_user(stfs.f_type, &target_stfs->f_type);
8492             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8493             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8494             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8495             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8496             __put_user(stfs.f_files, &target_stfs->f_files);
8497             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8498             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8499             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8500             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8501             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8502 #ifdef _STATFS_F_FLAGS
8503             __put_user(stfs.f_flags, &target_stfs->f_flags);
8504 #else
8505             __put_user(0, &target_stfs->f_flags);
8506 #endif
8507             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8508             unlock_user_struct(target_stfs, arg2, 1);
8509         }
8510         return ret;
8511 #endif
8512 #ifdef TARGET_NR_fstatfs
8513     case TARGET_NR_fstatfs:
8514         ret = get_errno(fstatfs(arg1, &stfs));
8515         goto convert_statfs;
8516 #endif
8517 #ifdef TARGET_NR_statfs64
8518     case TARGET_NR_statfs64:
8519         if (!(p = lock_user_string(arg1))) {
8520             return -TARGET_EFAULT;
8521         }
8522         ret = get_errno(statfs(path(p), &stfs));
8523         unlock_user(p, arg1, 0);
8524     convert_statfs64:
8525         if (!is_error(ret)) {
8526             struct target_statfs64 *target_stfs;
8527 
8528             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8529                 return -TARGET_EFAULT;
8530             __put_user(stfs.f_type, &target_stfs->f_type);
8531             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8532             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8533             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8534             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8535             __put_user(stfs.f_files, &target_stfs->f_files);
8536             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8537             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8538             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8539             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8540             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8541             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8542             unlock_user_struct(target_stfs, arg3, 1);
8543         }
8544         return ret;
8545     case TARGET_NR_fstatfs64:
8546         ret = get_errno(fstatfs(arg1, &stfs));
8547         goto convert_statfs64;
8548 #endif
8549 #ifdef TARGET_NR_socketcall
8550     case TARGET_NR_socketcall:
8551         return do_socketcall(arg1, arg2);
8552 #endif
8553 #ifdef TARGET_NR_accept
8554     case TARGET_NR_accept:
8555         return do_accept4(arg1, arg2, arg3, 0);
8556 #endif
8557 #ifdef TARGET_NR_accept4
8558     case TARGET_NR_accept4:
8559         return do_accept4(arg1, arg2, arg3, arg4);
8560 #endif
8561 #ifdef TARGET_NR_bind
8562     case TARGET_NR_bind:
8563         return do_bind(arg1, arg2, arg3);
8564 #endif
8565 #ifdef TARGET_NR_connect
8566     case TARGET_NR_connect:
8567         return do_connect(arg1, arg2, arg3);
8568 #endif
8569 #ifdef TARGET_NR_getpeername
8570     case TARGET_NR_getpeername:
8571         return do_getpeername(arg1, arg2, arg3);
8572 #endif
8573 #ifdef TARGET_NR_getsockname
8574     case TARGET_NR_getsockname:
8575         return do_getsockname(arg1, arg2, arg3);
8576 #endif
8577 #ifdef TARGET_NR_getsockopt
8578     case TARGET_NR_getsockopt:
8579         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8580 #endif
8581 #ifdef TARGET_NR_listen
8582     case TARGET_NR_listen:
8583         return get_errno(listen(arg1, arg2));
8584 #endif
8585 #ifdef TARGET_NR_recv
8586     case TARGET_NR_recv:
8587         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8588 #endif
8589 #ifdef TARGET_NR_recvfrom
8590     case TARGET_NR_recvfrom:
8591         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8592 #endif
8593 #ifdef TARGET_NR_recvmsg
8594     case TARGET_NR_recvmsg:
8595         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8596 #endif
8597 #ifdef TARGET_NR_send
8598     case TARGET_NR_send:
8599         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8600 #endif
8601 #ifdef TARGET_NR_sendmsg
8602     case TARGET_NR_sendmsg:
8603         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8604 #endif
8605 #ifdef TARGET_NR_sendmmsg
8606     case TARGET_NR_sendmmsg:
8607         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8608     case TARGET_NR_recvmmsg:
8609         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8610 #endif
8611 #ifdef TARGET_NR_sendto
8612     case TARGET_NR_sendto:
8613         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8614 #endif
8615 #ifdef TARGET_NR_shutdown
8616     case TARGET_NR_shutdown:
8617         return get_errno(shutdown(arg1, arg2));
8618 #endif
8619 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8620     case TARGET_NR_getrandom:
8621         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8622         if (!p) {
8623             return -TARGET_EFAULT;
8624         }
8625         ret = get_errno(getrandom(p, arg2, arg3));
8626         unlock_user(p, arg1, ret);
8627         return ret;
8628 #endif
8629 #ifdef TARGET_NR_socket
8630     case TARGET_NR_socket:
8631         return do_socket(arg1, arg2, arg3);
8632 #endif
8633 #ifdef TARGET_NR_socketpair
8634     case TARGET_NR_socketpair:
8635         return do_socketpair(arg1, arg2, arg3, arg4);
8636 #endif
8637 #ifdef TARGET_NR_setsockopt
8638     case TARGET_NR_setsockopt:
8639         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8640 #endif
8641 #if defined(TARGET_NR_syslog)
8642     case TARGET_NR_syslog:
8643         {
8644             int len = arg2;
8645 
8646             switch (arg1) {
8647             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8648             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8649             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8650             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8651             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8652             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8653             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8654             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8655                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8656             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8657             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8658             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8659                 {
8660                     if (len < 0) {
8661                         return -TARGET_EINVAL;
8662                     }
8663                     if (len == 0) {
8664                         return 0;
8665                     }
8666                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8667                     if (!p) {
8668                         return -TARGET_EFAULT;
8669                     }
8670                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8671                     unlock_user(p, arg2, arg3);
8672                 }
8673                 return ret;
8674             default:
8675                 return -TARGET_EINVAL;
8676             }
8677         }
8678         break;
8679 #endif
8680     case TARGET_NR_setitimer:
8681         {
8682             struct itimerval value, ovalue, *pvalue;
8683 
8684             if (arg2) {
8685                 pvalue = &value;
8686                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8687                     || copy_from_user_timeval(&pvalue->it_value,
8688                                               arg2 + sizeof(struct target_timeval)))
8689                     return -TARGET_EFAULT;
8690             } else {
8691                 pvalue = NULL;
8692             }
8693             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8694             if (!is_error(ret) && arg3) {
8695                 if (copy_to_user_timeval(arg3,
8696                                          &ovalue.it_interval)
8697                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8698                                             &ovalue.it_value))
8699                     return -TARGET_EFAULT;
8700             }
8701         }
8702         return ret;
8703     case TARGET_NR_getitimer:
8704         {
8705             struct itimerval value;
8706 
8707             ret = get_errno(getitimer(arg1, &value));
8708             if (!is_error(ret) && arg2) {
8709                 if (copy_to_user_timeval(arg2,
8710                                          &value.it_interval)
8711                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8712                                             &value.it_value))
8713                     return -TARGET_EFAULT;
8714             }
8715         }
8716         return ret;
8717 #ifdef TARGET_NR_stat
8718     case TARGET_NR_stat:
8719         if (!(p = lock_user_string(arg1))) {
8720             return -TARGET_EFAULT;
8721         }
8722         ret = get_errno(stat(path(p), &st));
8723         unlock_user(p, arg1, 0);
8724         goto do_stat;
8725 #endif
8726 #ifdef TARGET_NR_lstat
8727     case TARGET_NR_lstat:
8728         if (!(p = lock_user_string(arg1))) {
8729             return -TARGET_EFAULT;
8730         }
8731         ret = get_errno(lstat(path(p), &st));
8732         unlock_user(p, arg1, 0);
8733         goto do_stat;
8734 #endif
8735 #ifdef TARGET_NR_fstat
8736     case TARGET_NR_fstat:
8737         {
8738             ret = get_errno(fstat(arg1, &st));
8739 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8740         do_stat:
8741 #endif
8742             if (!is_error(ret)) {
8743                 struct target_stat *target_st;
8744 
8745                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8746                     return -TARGET_EFAULT;
8747                 memset(target_st, 0, sizeof(*target_st));
8748                 __put_user(st.st_dev, &target_st->st_dev);
8749                 __put_user(st.st_ino, &target_st->st_ino);
8750                 __put_user(st.st_mode, &target_st->st_mode);
8751                 __put_user(st.st_uid, &target_st->st_uid);
8752                 __put_user(st.st_gid, &target_st->st_gid);
8753                 __put_user(st.st_nlink, &target_st->st_nlink);
8754                 __put_user(st.st_rdev, &target_st->st_rdev);
8755                 __put_user(st.st_size, &target_st->st_size);
8756                 __put_user(st.st_blksize, &target_st->st_blksize);
8757                 __put_user(st.st_blocks, &target_st->st_blocks);
8758                 __put_user(st.st_atime, &target_st->target_st_atime);
8759                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8760                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8761                 unlock_user_struct(target_st, arg2, 1);
8762             }
8763         }
8764         return ret;
8765 #endif
8766     case TARGET_NR_vhangup:
8767         return get_errno(vhangup());
8768 #ifdef TARGET_NR_syscall
8769     case TARGET_NR_syscall:
8770         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8771                           arg6, arg7, arg8, 0);
8772 #endif
8773     case TARGET_NR_wait4:
8774         {
8775             int status;
8776             abi_long status_ptr = arg2;
8777             struct rusage rusage, *rusage_ptr;
8778             abi_ulong target_rusage = arg4;
8779             abi_long rusage_err;
8780             if (target_rusage)
8781                 rusage_ptr = &rusage;
8782             else
8783                 rusage_ptr = NULL;
8784             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8785             if (!is_error(ret)) {
8786                 if (status_ptr && ret) {
8787                     status = host_to_target_waitstatus(status);
8788                     if (put_user_s32(status, status_ptr))
8789                         return -TARGET_EFAULT;
8790                 }
8791                 if (target_rusage) {
8792                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8793                     if (rusage_err) {
8794                         ret = rusage_err;
8795                     }
8796                 }
8797             }
8798         }
8799         return ret;
8800 #ifdef TARGET_NR_swapoff
8801     case TARGET_NR_swapoff:
8802         if (!(p = lock_user_string(arg1)))
8803             return -TARGET_EFAULT;
8804         ret = get_errno(swapoff(p));
8805         unlock_user(p, arg1, 0);
8806         return ret;
8807 #endif
8808     case TARGET_NR_sysinfo:
8809         {
8810             struct target_sysinfo *target_value;
8811             struct sysinfo value;
8812             ret = get_errno(sysinfo(&value));
8813             if (!is_error(ret) && arg1)
8814             {
8815                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8816                     return -TARGET_EFAULT;
8817                 __put_user(value.uptime, &target_value->uptime);
8818                 __put_user(value.loads[0], &target_value->loads[0]);
8819                 __put_user(value.loads[1], &target_value->loads[1]);
8820                 __put_user(value.loads[2], &target_value->loads[2]);
8821                 __put_user(value.totalram, &target_value->totalram);
8822                 __put_user(value.freeram, &target_value->freeram);
8823                 __put_user(value.sharedram, &target_value->sharedram);
8824                 __put_user(value.bufferram, &target_value->bufferram);
8825                 __put_user(value.totalswap, &target_value->totalswap);
8826                 __put_user(value.freeswap, &target_value->freeswap);
8827                 __put_user(value.procs, &target_value->procs);
8828                 __put_user(value.totalhigh, &target_value->totalhigh);
8829                 __put_user(value.freehigh, &target_value->freehigh);
8830                 __put_user(value.mem_unit, &target_value->mem_unit);
8831                 unlock_user_struct(target_value, arg1, 1);
8832             }
8833         }
8834         return ret;
8835 #ifdef TARGET_NR_ipc
8836     case TARGET_NR_ipc:
8837         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8838 #endif
8839 #ifdef TARGET_NR_semget
8840     case TARGET_NR_semget:
8841         return get_errno(semget(arg1, arg2, arg3));
8842 #endif
8843 #ifdef TARGET_NR_semop
8844     case TARGET_NR_semop:
8845         return do_semop(arg1, arg2, arg3);
8846 #endif
8847 #ifdef TARGET_NR_semctl
8848     case TARGET_NR_semctl:
8849         return do_semctl(arg1, arg2, arg3, arg4);
8850 #endif
8851 #ifdef TARGET_NR_msgctl
8852     case TARGET_NR_msgctl:
8853         return do_msgctl(arg1, arg2, arg3);
8854 #endif
8855 #ifdef TARGET_NR_msgget
8856     case TARGET_NR_msgget:
8857         return get_errno(msgget(arg1, arg2));
8858 #endif
8859 #ifdef TARGET_NR_msgrcv
8860     case TARGET_NR_msgrcv:
8861         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8862 #endif
8863 #ifdef TARGET_NR_msgsnd
8864     case TARGET_NR_msgsnd:
8865         return do_msgsnd(arg1, arg2, arg3, arg4);
8866 #endif
8867 #ifdef TARGET_NR_shmget
8868     case TARGET_NR_shmget:
8869         return get_errno(shmget(arg1, arg2, arg3));
8870 #endif
8871 #ifdef TARGET_NR_shmctl
8872     case TARGET_NR_shmctl:
8873         return do_shmctl(arg1, arg2, arg3);
8874 #endif
8875 #ifdef TARGET_NR_shmat
8876     case TARGET_NR_shmat:
8877         return do_shmat(cpu_env, arg1, arg2, arg3);
8878 #endif
8879 #ifdef TARGET_NR_shmdt
8880     case TARGET_NR_shmdt:
8881         return do_shmdt(arg1);
8882 #endif
8883     case TARGET_NR_fsync:
8884         return get_errno(fsync(arg1));
8885     case TARGET_NR_clone:
8886         /* Linux manages to have three different orderings for its
8887          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8888          * match the kernel's CONFIG_CLONE_* settings.
8889          * Microblaze is further special in that it uses a sixth
8890          * implicit argument to clone for the TLS pointer.
8891          */
8892 #if defined(TARGET_MICROBLAZE)
8893         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8894 #elif defined(TARGET_CLONE_BACKWARDS)
8895         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8896 #elif defined(TARGET_CLONE_BACKWARDS2)
8897         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8898 #else
8899         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8900 #endif
8901         return ret;
8902 #ifdef __NR_exit_group
8903         /* new thread calls */
8904     case TARGET_NR_exit_group:
8905         preexit_cleanup(cpu_env, arg1);
8906         return get_errno(exit_group(arg1));
8907 #endif
8908     case TARGET_NR_setdomainname:
8909         if (!(p = lock_user_string(arg1)))
8910             return -TARGET_EFAULT;
8911         ret = get_errno(setdomainname(p, arg2));
8912         unlock_user(p, arg1, 0);
8913         return ret;
8914     case TARGET_NR_uname:
8915         /* no need to transcode because we use the linux syscall */
8916         {
8917             struct new_utsname * buf;
8918 
8919             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8920                 return -TARGET_EFAULT;
8921             ret = get_errno(sys_uname(buf));
8922             if (!is_error(ret)) {
8923                 /* Overwrite the native machine name with whatever is being
8924                    emulated. */
8925                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8926                           sizeof(buf->machine));
8927                 /* Allow the user to override the reported release.  */
8928                 if (qemu_uname_release && *qemu_uname_release) {
8929                     g_strlcpy(buf->release, qemu_uname_release,
8930                               sizeof(buf->release));
8931                 }
8932             }
8933             unlock_user_struct(buf, arg1, 1);
8934         }
8935         return ret;
8936 #ifdef TARGET_I386
8937     case TARGET_NR_modify_ldt:
8938         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
8939 #if !defined(TARGET_X86_64)
8940     case TARGET_NR_vm86:
8941         return do_vm86(cpu_env, arg1, arg2);
8942 #endif
8943 #endif
8944     case TARGET_NR_adjtimex:
8945         {
8946             struct timex host_buf;
8947 
8948             if (target_to_host_timex(&host_buf, arg1) != 0) {
8949                 return -TARGET_EFAULT;
8950             }
8951             ret = get_errno(adjtimex(&host_buf));
8952             if (!is_error(ret)) {
8953                 if (host_to_target_timex(arg1, &host_buf) != 0) {
8954                     return -TARGET_EFAULT;
8955                 }
8956             }
8957         }
8958         return ret;
8959 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8960     case TARGET_NR_clock_adjtime:
8961         {
8962             struct timex htx, *phtx = &htx;
8963 
8964             if (target_to_host_timex(phtx, arg2) != 0) {
8965                 return -TARGET_EFAULT;
8966             }
8967             ret = get_errno(clock_adjtime(arg1, phtx));
8968             if (!is_error(ret) && phtx) {
8969                 if (host_to_target_timex(arg2, phtx) != 0) {
8970                     return -TARGET_EFAULT;
8971                 }
8972             }
8973         }
8974         return ret;
8975 #endif
8976     case TARGET_NR_getpgid:
8977         return get_errno(getpgid(arg1));
8978     case TARGET_NR_fchdir:
8979         return get_errno(fchdir(arg1));
8980     case TARGET_NR_personality:
8981         return get_errno(personality(arg1));
8982 #ifdef TARGET_NR__llseek /* Not on alpha */
8983     case TARGET_NR__llseek:
8984         {
8985             int64_t res;
8986 #if !defined(__NR_llseek)
8987             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
8988             if (res == -1) {
8989                 ret = get_errno(res);
8990             } else {
8991                 ret = 0;
8992             }
8993 #else
8994             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8995 #endif
8996             if ((ret == 0) && put_user_s64(res, arg4)) {
8997                 return -TARGET_EFAULT;
8998             }
8999         }
9000         return ret;
9001 #endif
9002 #ifdef TARGET_NR_getdents
9003     case TARGET_NR_getdents:
9004 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9005 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9006         {
9007             struct target_dirent *target_dirp;
9008             struct linux_dirent *dirp;
9009             abi_long count = arg3;
9010 
9011             dirp = g_try_malloc(count);
9012             if (!dirp) {
9013                 return -TARGET_ENOMEM;
9014             }
9015 
9016             ret = get_errno(sys_getdents(arg1, dirp, count));
9017             if (!is_error(ret)) {
9018                 struct linux_dirent *de;
9019 		struct target_dirent *tde;
9020                 int len = ret;
9021                 int reclen, treclen;
9022 		int count1, tnamelen;
9023 
9024 		count1 = 0;
9025                 de = dirp;
9026                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9027                     return -TARGET_EFAULT;
9028 		tde = target_dirp;
9029                 while (len > 0) {
9030                     reclen = de->d_reclen;
9031                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9032                     assert(tnamelen >= 0);
9033                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9034                     assert(count1 + treclen <= count);
9035                     tde->d_reclen = tswap16(treclen);
9036                     tde->d_ino = tswapal(de->d_ino);
9037                     tde->d_off = tswapal(de->d_off);
9038                     memcpy(tde->d_name, de->d_name, tnamelen);
9039                     de = (struct linux_dirent *)((char *)de + reclen);
9040                     len -= reclen;
9041                     tde = (struct target_dirent *)((char *)tde + treclen);
9042 		    count1 += treclen;
9043                 }
9044 		ret = count1;
9045                 unlock_user(target_dirp, arg2, ret);
9046             }
9047             g_free(dirp);
9048         }
9049 #else
9050         {
9051             struct linux_dirent *dirp;
9052             abi_long count = arg3;
9053 
9054             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9055                 return -TARGET_EFAULT;
9056             ret = get_errno(sys_getdents(arg1, dirp, count));
9057             if (!is_error(ret)) {
9058                 struct linux_dirent *de;
9059                 int len = ret;
9060                 int reclen;
9061                 de = dirp;
9062                 while (len > 0) {
9063                     reclen = de->d_reclen;
9064                     if (reclen > len)
9065                         break;
9066                     de->d_reclen = tswap16(reclen);
9067                     tswapls(&de->d_ino);
9068                     tswapls(&de->d_off);
9069                     de = (struct linux_dirent *)((char *)de + reclen);
9070                     len -= reclen;
9071                 }
9072             }
9073             unlock_user(dirp, arg2, ret);
9074         }
9075 #endif
9076 #else
9077         /* Implement getdents in terms of getdents64 */
9078         {
9079             struct linux_dirent64 *dirp;
9080             abi_long count = arg3;
9081 
9082             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9083             if (!dirp) {
9084                 return -TARGET_EFAULT;
9085             }
9086             ret = get_errno(sys_getdents64(arg1, dirp, count));
9087             if (!is_error(ret)) {
9088                 /* Convert the dirent64 structs to target dirent.  We do this
9089                  * in-place, since we can guarantee that a target_dirent is no
9090                  * larger than a dirent64; however this means we have to be
9091                  * careful to read everything before writing in the new format.
9092                  */
9093                 struct linux_dirent64 *de;
9094                 struct target_dirent *tde;
9095                 int len = ret;
9096                 int tlen = 0;
9097 
9098                 de = dirp;
9099                 tde = (struct target_dirent *)dirp;
9100                 while (len > 0) {
9101                     int namelen, treclen;
9102                     int reclen = de->d_reclen;
9103                     uint64_t ino = de->d_ino;
9104                     int64_t off = de->d_off;
9105                     uint8_t type = de->d_type;
9106 
9107                     namelen = strlen(de->d_name);
9108                     treclen = offsetof(struct target_dirent, d_name)
9109                         + namelen + 2;
9110                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9111 
9112                     memmove(tde->d_name, de->d_name, namelen + 1);
9113                     tde->d_ino = tswapal(ino);
9114                     tde->d_off = tswapal(off);
9115                     tde->d_reclen = tswap16(treclen);
9116                     /* The target_dirent type is in what was formerly a padding
9117                      * byte at the end of the structure:
9118                      */
9119                     *(((char *)tde) + treclen - 1) = type;
9120 
9121                     de = (struct linux_dirent64 *)((char *)de + reclen);
9122                     tde = (struct target_dirent *)((char *)tde + treclen);
9123                     len -= reclen;
9124                     tlen += treclen;
9125                 }
9126                 ret = tlen;
9127             }
9128             unlock_user(dirp, arg2, ret);
9129         }
9130 #endif
9131         return ret;
9132 #endif /* TARGET_NR_getdents */
9133 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9134     case TARGET_NR_getdents64:
9135         {
9136             struct linux_dirent64 *dirp;
9137             abi_long count = arg3;
9138             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9139                 return -TARGET_EFAULT;
9140             ret = get_errno(sys_getdents64(arg1, dirp, count));
9141             if (!is_error(ret)) {
9142                 struct linux_dirent64 *de;
9143                 int len = ret;
9144                 int reclen;
9145                 de = dirp;
9146                 while (len > 0) {
9147                     reclen = de->d_reclen;
9148                     if (reclen > len)
9149                         break;
9150                     de->d_reclen = tswap16(reclen);
9151                     tswap64s((uint64_t *)&de->d_ino);
9152                     tswap64s((uint64_t *)&de->d_off);
9153                     de = (struct linux_dirent64 *)((char *)de + reclen);
9154                     len -= reclen;
9155                 }
9156             }
9157             unlock_user(dirp, arg2, ret);
9158         }
9159         return ret;
9160 #endif /* TARGET_NR_getdents64 */
9161 #if defined(TARGET_NR__newselect)
9162     case TARGET_NR__newselect:
9163         return do_select(arg1, arg2, arg3, arg4, arg5);
9164 #endif
9165 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9166 # ifdef TARGET_NR_poll
9167     case TARGET_NR_poll:
9168 # endif
9169 # ifdef TARGET_NR_ppoll
9170     case TARGET_NR_ppoll:
9171 # endif
9172         {
9173             struct target_pollfd *target_pfd;
9174             unsigned int nfds = arg2;
9175             struct pollfd *pfd;
9176             unsigned int i;
9177 
9178             pfd = NULL;
9179             target_pfd = NULL;
9180             if (nfds) {
9181                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9182                     return -TARGET_EINVAL;
9183                 }
9184 
9185                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9186                                        sizeof(struct target_pollfd) * nfds, 1);
9187                 if (!target_pfd) {
9188                     return -TARGET_EFAULT;
9189                 }
9190 
9191                 pfd = alloca(sizeof(struct pollfd) * nfds);
9192                 for (i = 0; i < nfds; i++) {
9193                     pfd[i].fd = tswap32(target_pfd[i].fd);
9194                     pfd[i].events = tswap16(target_pfd[i].events);
9195                 }
9196             }
9197 
9198             switch (num) {
9199 # ifdef TARGET_NR_ppoll
9200             case TARGET_NR_ppoll:
9201             {
9202                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9203                 target_sigset_t *target_set;
9204                 sigset_t _set, *set = &_set;
9205 
9206                 if (arg3) {
9207                     if (target_to_host_timespec(timeout_ts, arg3)) {
9208                         unlock_user(target_pfd, arg1, 0);
9209                         return -TARGET_EFAULT;
9210                     }
9211                 } else {
9212                     timeout_ts = NULL;
9213                 }
9214 
9215                 if (arg4) {
9216                     if (arg5 != sizeof(target_sigset_t)) {
9217                         unlock_user(target_pfd, arg1, 0);
9218                         return -TARGET_EINVAL;
9219                     }
9220 
9221                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9222                     if (!target_set) {
9223                         unlock_user(target_pfd, arg1, 0);
9224                         return -TARGET_EFAULT;
9225                     }
9226                     target_to_host_sigset(set, target_set);
9227                 } else {
9228                     set = NULL;
9229                 }
9230 
9231                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9232                                            set, SIGSET_T_SIZE));
9233 
9234                 if (!is_error(ret) && arg3) {
9235                     host_to_target_timespec(arg3, timeout_ts);
9236                 }
9237                 if (arg4) {
9238                     unlock_user(target_set, arg4, 0);
9239                 }
9240                 break;
9241             }
9242 # endif
9243 # ifdef TARGET_NR_poll
9244             case TARGET_NR_poll:
9245             {
9246                 struct timespec ts, *pts;
9247 
9248                 if (arg3 >= 0) {
9249                     /* Convert ms to secs, ns */
9250                     ts.tv_sec = arg3 / 1000;
9251                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9252                     pts = &ts;
9253                 } else {
9254                     /* -ve poll() timeout means "infinite" */
9255                     pts = NULL;
9256                 }
9257                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9258                 break;
9259             }
9260 # endif
9261             default:
9262                 g_assert_not_reached();
9263             }
9264 
9265             if (!is_error(ret)) {
9266                 for(i = 0; i < nfds; i++) {
9267                     target_pfd[i].revents = tswap16(pfd[i].revents);
9268                 }
9269             }
9270             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9271         }
9272         return ret;
9273 #endif
9274     case TARGET_NR_flock:
9275         /* NOTE: the flock constant seems to be the same for every
9276            Linux platform */
9277         return get_errno(safe_flock(arg1, arg2));
9278     case TARGET_NR_readv:
9279         {
9280             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9281             if (vec != NULL) {
9282                 ret = get_errno(safe_readv(arg1, vec, arg3));
9283                 unlock_iovec(vec, arg2, arg3, 1);
9284             } else {
9285                 ret = -host_to_target_errno(errno);
9286             }
9287         }
9288         return ret;
9289     case TARGET_NR_writev:
9290         {
9291             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9292             if (vec != NULL) {
9293                 ret = get_errno(safe_writev(arg1, vec, arg3));
9294                 unlock_iovec(vec, arg2, arg3, 0);
9295             } else {
9296                 ret = -host_to_target_errno(errno);
9297             }
9298         }
9299         return ret;
9300 #if defined(TARGET_NR_preadv)
9301     case TARGET_NR_preadv:
9302         {
9303             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9304             if (vec != NULL) {
9305                 unsigned long low, high;
9306 
9307                 target_to_host_low_high(arg4, arg5, &low, &high);
9308                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9309                 unlock_iovec(vec, arg2, arg3, 1);
9310             } else {
9311                 ret = -host_to_target_errno(errno);
9312            }
9313         }
9314         return ret;
9315 #endif
9316 #if defined(TARGET_NR_pwritev)
9317     case TARGET_NR_pwritev:
9318         {
9319             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9320             if (vec != NULL) {
9321                 unsigned long low, high;
9322 
9323                 target_to_host_low_high(arg4, arg5, &low, &high);
9324                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9325                 unlock_iovec(vec, arg2, arg3, 0);
9326             } else {
9327                 ret = -host_to_target_errno(errno);
9328            }
9329         }
9330         return ret;
9331 #endif
9332     case TARGET_NR_getsid:
9333         return get_errno(getsid(arg1));
9334 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9335     case TARGET_NR_fdatasync:
9336         return get_errno(fdatasync(arg1));
9337 #endif
9338 #ifdef TARGET_NR__sysctl
9339     case TARGET_NR__sysctl:
9340         /* We don't implement this, but ENOTDIR is always a safe
9341            return value. */
9342         return -TARGET_ENOTDIR;
9343 #endif
9344     case TARGET_NR_sched_getaffinity:
9345         {
9346             unsigned int mask_size;
9347             unsigned long *mask;
9348 
9349             /*
9350              * sched_getaffinity needs multiples of ulong, so need to take
9351              * care of mismatches between target ulong and host ulong sizes.
9352              */
9353             if (arg2 & (sizeof(abi_ulong) - 1)) {
9354                 return -TARGET_EINVAL;
9355             }
9356             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9357 
9358             mask = alloca(mask_size);
9359             memset(mask, 0, mask_size);
9360             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9361 
9362             if (!is_error(ret)) {
9363                 if (ret > arg2) {
9364                     /* More data returned than the caller's buffer will fit.
9365                      * This only happens if sizeof(abi_long) < sizeof(long)
9366                      * and the caller passed us a buffer holding an odd number
9367                      * of abi_longs. If the host kernel is actually using the
9368                      * extra 4 bytes then fail EINVAL; otherwise we can just
9369                      * ignore them and only copy the interesting part.
9370                      */
9371                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9372                     if (numcpus > arg2 * 8) {
9373                         return -TARGET_EINVAL;
9374                     }
9375                     ret = arg2;
9376                 }
9377 
9378                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9379                     return -TARGET_EFAULT;
9380                 }
9381             }
9382         }
9383         return ret;
9384     case TARGET_NR_sched_setaffinity:
9385         {
9386             unsigned int mask_size;
9387             unsigned long *mask;
9388 
9389             /*
9390              * sched_setaffinity needs multiples of ulong, so need to take
9391              * care of mismatches between target ulong and host ulong sizes.
9392              */
9393             if (arg2 & (sizeof(abi_ulong) - 1)) {
9394                 return -TARGET_EINVAL;
9395             }
9396             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9397             mask = alloca(mask_size);
9398 
9399             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9400             if (ret) {
9401                 return ret;
9402             }
9403 
9404             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9405         }
9406     case TARGET_NR_getcpu:
9407         {
9408             unsigned cpu, node;
9409             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9410                                        arg2 ? &node : NULL,
9411                                        NULL));
9412             if (is_error(ret)) {
9413                 return ret;
9414             }
9415             if (arg1 && put_user_u32(cpu, arg1)) {
9416                 return -TARGET_EFAULT;
9417             }
9418             if (arg2 && put_user_u32(node, arg2)) {
9419                 return -TARGET_EFAULT;
9420             }
9421         }
9422         return ret;
9423     case TARGET_NR_sched_setparam:
9424         {
9425             struct sched_param *target_schp;
9426             struct sched_param schp;
9427 
9428             if (arg2 == 0) {
9429                 return -TARGET_EINVAL;
9430             }
9431             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9432                 return -TARGET_EFAULT;
9433             schp.sched_priority = tswap32(target_schp->sched_priority);
9434             unlock_user_struct(target_schp, arg2, 0);
9435             return get_errno(sched_setparam(arg1, &schp));
9436         }
9437     case TARGET_NR_sched_getparam:
9438         {
9439             struct sched_param *target_schp;
9440             struct sched_param schp;
9441 
9442             if (arg2 == 0) {
9443                 return -TARGET_EINVAL;
9444             }
9445             ret = get_errno(sched_getparam(arg1, &schp));
9446             if (!is_error(ret)) {
9447                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9448                     return -TARGET_EFAULT;
9449                 target_schp->sched_priority = tswap32(schp.sched_priority);
9450                 unlock_user_struct(target_schp, arg2, 1);
9451             }
9452         }
9453         return ret;
9454     case TARGET_NR_sched_setscheduler:
9455         {
9456             struct sched_param *target_schp;
9457             struct sched_param schp;
9458             if (arg3 == 0) {
9459                 return -TARGET_EINVAL;
9460             }
9461             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9462                 return -TARGET_EFAULT;
9463             schp.sched_priority = tswap32(target_schp->sched_priority);
9464             unlock_user_struct(target_schp, arg3, 0);
9465             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9466         }
9467     case TARGET_NR_sched_getscheduler:
9468         return get_errno(sched_getscheduler(arg1));
9469     case TARGET_NR_sched_yield:
9470         return get_errno(sched_yield());
9471     case TARGET_NR_sched_get_priority_max:
9472         return get_errno(sched_get_priority_max(arg1));
9473     case TARGET_NR_sched_get_priority_min:
9474         return get_errno(sched_get_priority_min(arg1));
9475     case TARGET_NR_sched_rr_get_interval:
9476         {
9477             struct timespec ts;
9478             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9479             if (!is_error(ret)) {
9480                 ret = host_to_target_timespec(arg2, &ts);
9481             }
9482         }
9483         return ret;
9484     case TARGET_NR_nanosleep:
9485         {
9486             struct timespec req, rem;
9487             target_to_host_timespec(&req, arg1);
9488             ret = get_errno(safe_nanosleep(&req, &rem));
9489             if (is_error(ret) && arg2) {
9490                 host_to_target_timespec(arg2, &rem);
9491             }
9492         }
9493         return ret;
9494     case TARGET_NR_prctl:
9495         switch (arg1) {
9496         case PR_GET_PDEATHSIG:
9497         {
9498             int deathsig;
9499             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9500             if (!is_error(ret) && arg2
9501                 && put_user_ual(deathsig, arg2)) {
9502                 return -TARGET_EFAULT;
9503             }
9504             return ret;
9505         }
9506 #ifdef PR_GET_NAME
9507         case PR_GET_NAME:
9508         {
9509             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9510             if (!name) {
9511                 return -TARGET_EFAULT;
9512             }
9513             ret = get_errno(prctl(arg1, (unsigned long)name,
9514                                   arg3, arg4, arg5));
9515             unlock_user(name, arg2, 16);
9516             return ret;
9517         }
9518         case PR_SET_NAME:
9519         {
9520             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9521             if (!name) {
9522                 return -TARGET_EFAULT;
9523             }
9524             ret = get_errno(prctl(arg1, (unsigned long)name,
9525                                   arg3, arg4, arg5));
9526             unlock_user(name, arg2, 0);
9527             return ret;
9528         }
9529 #endif
9530 #ifdef TARGET_MIPS
9531         case TARGET_PR_GET_FP_MODE:
9532         {
9533             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9534             ret = 0;
9535             if (env->CP0_Status & (1 << CP0St_FR)) {
9536                 ret |= TARGET_PR_FP_MODE_FR;
9537             }
9538             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9539                 ret |= TARGET_PR_FP_MODE_FRE;
9540             }
9541             return ret;
9542         }
9543         case TARGET_PR_SET_FP_MODE:
9544         {
9545             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9546             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9547             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9548             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9549 
9550             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9551                 /* FR1 is not supported */
9552                 return -TARGET_EOPNOTSUPP;
9553             }
9554             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9555                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9556                 /* cannot set FR=0 */
9557                 return -TARGET_EOPNOTSUPP;
9558             }
9559             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9560                 /* Cannot set FRE=1 */
9561                 return -TARGET_EOPNOTSUPP;
9562             }
9563 
9564             int i;
9565             fpr_t *fpr = env->active_fpu.fpr;
9566             for (i = 0; i < 32 ; i += 2) {
9567                 if (!old_fr && new_fr) {
9568                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9569                 } else if (old_fr && !new_fr) {
9570                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9571                 }
9572             }
9573 
9574             if (new_fr) {
9575                 env->CP0_Status |= (1 << CP0St_FR);
9576                 env->hflags |= MIPS_HFLAG_F64;
9577             } else {
9578                 env->CP0_Status &= ~(1 << CP0St_FR);
9579             }
9580             if (new_fre) {
9581                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9582                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9583                     env->hflags |= MIPS_HFLAG_FRE;
9584                 }
9585             } else {
9586                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9587             }
9588 
9589             return 0;
9590         }
9591 #endif /* MIPS */
9592 #ifdef TARGET_AARCH64
9593         case TARGET_PR_SVE_SET_VL:
9594             /*
9595              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9596              * PR_SVE_VL_INHERIT.  Note the kernel definition
9597              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9598              * even though the current architectural maximum is VQ=16.
9599              */
9600             ret = -TARGET_EINVAL;
9601             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9602                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9603                 CPUARMState *env = cpu_env;
9604                 ARMCPU *cpu = arm_env_get_cpu(env);
9605                 uint32_t vq, old_vq;
9606 
9607                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9608                 vq = MAX(arg2 / 16, 1);
9609                 vq = MIN(vq, cpu->sve_max_vq);
9610 
9611                 if (vq < old_vq) {
9612                     aarch64_sve_narrow_vq(env, vq);
9613                 }
9614                 env->vfp.zcr_el[1] = vq - 1;
9615                 ret = vq * 16;
9616             }
9617             return ret;
9618         case TARGET_PR_SVE_GET_VL:
9619             ret = -TARGET_EINVAL;
9620             {
9621                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9622                 if (cpu_isar_feature(aa64_sve, cpu)) {
9623                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9624                 }
9625             }
9626             return ret;
9627 #endif /* AARCH64 */
9628         case PR_GET_SECCOMP:
9629         case PR_SET_SECCOMP:
9630             /* Disable seccomp to prevent the target disabling syscalls we
9631              * need. */
9632             return -TARGET_EINVAL;
9633         default:
9634             /* Most prctl options have no pointer arguments */
9635             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9636         }
9637         break;
9638 #ifdef TARGET_NR_arch_prctl
9639     case TARGET_NR_arch_prctl:
9640 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9641         return do_arch_prctl(cpu_env, arg1, arg2);
9642 #else
9643 #error unreachable
9644 #endif
9645 #endif
9646 #ifdef TARGET_NR_pread64
9647     case TARGET_NR_pread64:
9648         if (regpairs_aligned(cpu_env, num)) {
9649             arg4 = arg5;
9650             arg5 = arg6;
9651         }
9652         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9653             return -TARGET_EFAULT;
9654         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9655         unlock_user(p, arg2, ret);
9656         return ret;
9657     case TARGET_NR_pwrite64:
9658         if (regpairs_aligned(cpu_env, num)) {
9659             arg4 = arg5;
9660             arg5 = arg6;
9661         }
9662         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9663             return -TARGET_EFAULT;
9664         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9665         unlock_user(p, arg2, 0);
9666         return ret;
9667 #endif
9668     case TARGET_NR_getcwd:
9669         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9670             return -TARGET_EFAULT;
9671         ret = get_errno(sys_getcwd1(p, arg2));
9672         unlock_user(p, arg1, ret);
9673         return ret;
9674     case TARGET_NR_capget:
9675     case TARGET_NR_capset:
9676     {
9677         struct target_user_cap_header *target_header;
9678         struct target_user_cap_data *target_data = NULL;
9679         struct __user_cap_header_struct header;
9680         struct __user_cap_data_struct data[2];
9681         struct __user_cap_data_struct *dataptr = NULL;
9682         int i, target_datalen;
9683         int data_items = 1;
9684 
9685         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9686             return -TARGET_EFAULT;
9687         }
9688         header.version = tswap32(target_header->version);
9689         header.pid = tswap32(target_header->pid);
9690 
9691         if (header.version != _LINUX_CAPABILITY_VERSION) {
9692             /* Version 2 and up takes pointer to two user_data structs */
9693             data_items = 2;
9694         }
9695 
9696         target_datalen = sizeof(*target_data) * data_items;
9697 
9698         if (arg2) {
9699             if (num == TARGET_NR_capget) {
9700                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9701             } else {
9702                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9703             }
9704             if (!target_data) {
9705                 unlock_user_struct(target_header, arg1, 0);
9706                 return -TARGET_EFAULT;
9707             }
9708 
9709             if (num == TARGET_NR_capset) {
9710                 for (i = 0; i < data_items; i++) {
9711                     data[i].effective = tswap32(target_data[i].effective);
9712                     data[i].permitted = tswap32(target_data[i].permitted);
9713                     data[i].inheritable = tswap32(target_data[i].inheritable);
9714                 }
9715             }
9716 
9717             dataptr = data;
9718         }
9719 
9720         if (num == TARGET_NR_capget) {
9721             ret = get_errno(capget(&header, dataptr));
9722         } else {
9723             ret = get_errno(capset(&header, dataptr));
9724         }
9725 
9726         /* The kernel always updates version for both capget and capset */
9727         target_header->version = tswap32(header.version);
9728         unlock_user_struct(target_header, arg1, 1);
9729 
9730         if (arg2) {
9731             if (num == TARGET_NR_capget) {
9732                 for (i = 0; i < data_items; i++) {
9733                     target_data[i].effective = tswap32(data[i].effective);
9734                     target_data[i].permitted = tswap32(data[i].permitted);
9735                     target_data[i].inheritable = tswap32(data[i].inheritable);
9736                 }
9737                 unlock_user(target_data, arg2, target_datalen);
9738             } else {
9739                 unlock_user(target_data, arg2, 0);
9740             }
9741         }
9742         return ret;
9743     }
9744     case TARGET_NR_sigaltstack:
9745         return do_sigaltstack(arg1, arg2,
9746                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9747 
9748 #ifdef CONFIG_SENDFILE
9749 #ifdef TARGET_NR_sendfile
9750     case TARGET_NR_sendfile:
9751     {
9752         off_t *offp = NULL;
9753         off_t off;
9754         if (arg3) {
9755             ret = get_user_sal(off, arg3);
9756             if (is_error(ret)) {
9757                 return ret;
9758             }
9759             offp = &off;
9760         }
9761         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9762         if (!is_error(ret) && arg3) {
9763             abi_long ret2 = put_user_sal(off, arg3);
9764             if (is_error(ret2)) {
9765                 ret = ret2;
9766             }
9767         }
9768         return ret;
9769     }
9770 #endif
9771 #ifdef TARGET_NR_sendfile64
9772     case TARGET_NR_sendfile64:
9773     {
9774         off_t *offp = NULL;
9775         off_t off;
9776         if (arg3) {
9777             ret = get_user_s64(off, arg3);
9778             if (is_error(ret)) {
9779                 return ret;
9780             }
9781             offp = &off;
9782         }
9783         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9784         if (!is_error(ret) && arg3) {
9785             abi_long ret2 = put_user_s64(off, arg3);
9786             if (is_error(ret2)) {
9787                 ret = ret2;
9788             }
9789         }
9790         return ret;
9791     }
9792 #endif
9793 #endif
9794 #ifdef TARGET_NR_vfork
9795     case TARGET_NR_vfork:
9796         return get_errno(do_fork(cpu_env,
9797                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9798                          0, 0, 0, 0));
9799 #endif
9800 #ifdef TARGET_NR_ugetrlimit
9801     case TARGET_NR_ugetrlimit:
9802     {
9803 	struct rlimit rlim;
9804 	int resource = target_to_host_resource(arg1);
9805 	ret = get_errno(getrlimit(resource, &rlim));
9806 	if (!is_error(ret)) {
9807 	    struct target_rlimit *target_rlim;
9808             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9809                 return -TARGET_EFAULT;
9810 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9811 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9812             unlock_user_struct(target_rlim, arg2, 1);
9813 	}
9814         return ret;
9815     }
9816 #endif
9817 #ifdef TARGET_NR_truncate64
9818     case TARGET_NR_truncate64:
9819         if (!(p = lock_user_string(arg1)))
9820             return -TARGET_EFAULT;
9821 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9822         unlock_user(p, arg1, 0);
9823         return ret;
9824 #endif
9825 #ifdef TARGET_NR_ftruncate64
9826     case TARGET_NR_ftruncate64:
9827         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9828 #endif
9829 #ifdef TARGET_NR_stat64
9830     case TARGET_NR_stat64:
9831         if (!(p = lock_user_string(arg1))) {
9832             return -TARGET_EFAULT;
9833         }
9834         ret = get_errno(stat(path(p), &st));
9835         unlock_user(p, arg1, 0);
9836         if (!is_error(ret))
9837             ret = host_to_target_stat64(cpu_env, arg2, &st);
9838         return ret;
9839 #endif
9840 #ifdef TARGET_NR_lstat64
9841     case TARGET_NR_lstat64:
9842         if (!(p = lock_user_string(arg1))) {
9843             return -TARGET_EFAULT;
9844         }
9845         ret = get_errno(lstat(path(p), &st));
9846         unlock_user(p, arg1, 0);
9847         if (!is_error(ret))
9848             ret = host_to_target_stat64(cpu_env, arg2, &st);
9849         return ret;
9850 #endif
9851 #ifdef TARGET_NR_fstat64
9852     case TARGET_NR_fstat64:
9853         ret = get_errno(fstat(arg1, &st));
9854         if (!is_error(ret))
9855             ret = host_to_target_stat64(cpu_env, arg2, &st);
9856         return ret;
9857 #endif
9858 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9859 #ifdef TARGET_NR_fstatat64
9860     case TARGET_NR_fstatat64:
9861 #endif
9862 #ifdef TARGET_NR_newfstatat
9863     case TARGET_NR_newfstatat:
9864 #endif
9865         if (!(p = lock_user_string(arg2))) {
9866             return -TARGET_EFAULT;
9867         }
9868         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9869         unlock_user(p, arg2, 0);
9870         if (!is_error(ret))
9871             ret = host_to_target_stat64(cpu_env, arg3, &st);
9872         return ret;
9873 #endif
9874 #ifdef TARGET_NR_lchown
9875     case TARGET_NR_lchown:
9876         if (!(p = lock_user_string(arg1)))
9877             return -TARGET_EFAULT;
9878         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9879         unlock_user(p, arg1, 0);
9880         return ret;
9881 #endif
9882 #ifdef TARGET_NR_getuid
9883     case TARGET_NR_getuid:
9884         return get_errno(high2lowuid(getuid()));
9885 #endif
9886 #ifdef TARGET_NR_getgid
9887     case TARGET_NR_getgid:
9888         return get_errno(high2lowgid(getgid()));
9889 #endif
9890 #ifdef TARGET_NR_geteuid
9891     case TARGET_NR_geteuid:
9892         return get_errno(high2lowuid(geteuid()));
9893 #endif
9894 #ifdef TARGET_NR_getegid
9895     case TARGET_NR_getegid:
9896         return get_errno(high2lowgid(getegid()));
9897 #endif
9898     case TARGET_NR_setreuid:
9899         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9900     case TARGET_NR_setregid:
9901         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9902     case TARGET_NR_getgroups:
9903         {
9904             int gidsetsize = arg1;
9905             target_id *target_grouplist;
9906             gid_t *grouplist;
9907             int i;
9908 
9909             grouplist = alloca(gidsetsize * sizeof(gid_t));
9910             ret = get_errno(getgroups(gidsetsize, grouplist));
9911             if (gidsetsize == 0)
9912                 return ret;
9913             if (!is_error(ret)) {
9914                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9915                 if (!target_grouplist)
9916                     return -TARGET_EFAULT;
9917                 for(i = 0;i < ret; i++)
9918                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9919                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9920             }
9921         }
9922         return ret;
9923     case TARGET_NR_setgroups:
9924         {
9925             int gidsetsize = arg1;
9926             target_id *target_grouplist;
9927             gid_t *grouplist = NULL;
9928             int i;
9929             if (gidsetsize) {
9930                 grouplist = alloca(gidsetsize * sizeof(gid_t));
9931                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9932                 if (!target_grouplist) {
9933                     return -TARGET_EFAULT;
9934                 }
9935                 for (i = 0; i < gidsetsize; i++) {
9936                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9937                 }
9938                 unlock_user(target_grouplist, arg2, 0);
9939             }
9940             return get_errno(setgroups(gidsetsize, grouplist));
9941         }
9942     case TARGET_NR_fchown:
9943         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9944 #if defined(TARGET_NR_fchownat)
9945     case TARGET_NR_fchownat:
9946         if (!(p = lock_user_string(arg2)))
9947             return -TARGET_EFAULT;
9948         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9949                                  low2highgid(arg4), arg5));
9950         unlock_user(p, arg2, 0);
9951         return ret;
9952 #endif
9953 #ifdef TARGET_NR_setresuid
9954     case TARGET_NR_setresuid:
9955         return get_errno(sys_setresuid(low2highuid(arg1),
9956                                        low2highuid(arg2),
9957                                        low2highuid(arg3)));
9958 #endif
9959 #ifdef TARGET_NR_getresuid
9960     case TARGET_NR_getresuid:
9961         {
9962             uid_t ruid, euid, suid;
9963             ret = get_errno(getresuid(&ruid, &euid, &suid));
9964             if (!is_error(ret)) {
9965                 if (put_user_id(high2lowuid(ruid), arg1)
9966                     || put_user_id(high2lowuid(euid), arg2)
9967                     || put_user_id(high2lowuid(suid), arg3))
9968                     return -TARGET_EFAULT;
9969             }
9970         }
9971         return ret;
9972 #endif
9973 #ifdef TARGET_NR_getresgid
9974     case TARGET_NR_setresgid:
9975         return get_errno(sys_setresgid(low2highgid(arg1),
9976                                        low2highgid(arg2),
9977                                        low2highgid(arg3)));
9978 #endif
9979 #ifdef TARGET_NR_getresgid
9980     case TARGET_NR_getresgid:
9981         {
9982             gid_t rgid, egid, sgid;
9983             ret = get_errno(getresgid(&rgid, &egid, &sgid));
9984             if (!is_error(ret)) {
9985                 if (put_user_id(high2lowgid(rgid), arg1)
9986                     || put_user_id(high2lowgid(egid), arg2)
9987                     || put_user_id(high2lowgid(sgid), arg3))
9988                     return -TARGET_EFAULT;
9989             }
9990         }
9991         return ret;
9992 #endif
9993 #ifdef TARGET_NR_chown
9994     case TARGET_NR_chown:
9995         if (!(p = lock_user_string(arg1)))
9996             return -TARGET_EFAULT;
9997         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9998         unlock_user(p, arg1, 0);
9999         return ret;
10000 #endif
10001     case TARGET_NR_setuid:
10002         return get_errno(sys_setuid(low2highuid(arg1)));
10003     case TARGET_NR_setgid:
10004         return get_errno(sys_setgid(low2highgid(arg1)));
10005     case TARGET_NR_setfsuid:
10006         return get_errno(setfsuid(arg1));
10007     case TARGET_NR_setfsgid:
10008         return get_errno(setfsgid(arg1));
10009 
10010 #ifdef TARGET_NR_lchown32
10011     case TARGET_NR_lchown32:
10012         if (!(p = lock_user_string(arg1)))
10013             return -TARGET_EFAULT;
10014         ret = get_errno(lchown(p, arg2, arg3));
10015         unlock_user(p, arg1, 0);
10016         return ret;
10017 #endif
10018 #ifdef TARGET_NR_getuid32
10019     case TARGET_NR_getuid32:
10020         return get_errno(getuid());
10021 #endif
10022 
10023 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10024    /* Alpha specific */
10025     case TARGET_NR_getxuid:
10026          {
10027             uid_t euid;
10028             euid=geteuid();
10029             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10030          }
10031         return get_errno(getuid());
10032 #endif
10033 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10034    /* Alpha specific */
10035     case TARGET_NR_getxgid:
10036          {
10037             uid_t egid;
10038             egid=getegid();
10039             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10040          }
10041         return get_errno(getgid());
10042 #endif
10043 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10044     /* Alpha specific */
10045     case TARGET_NR_osf_getsysinfo:
10046         ret = -TARGET_EOPNOTSUPP;
10047         switch (arg1) {
10048           case TARGET_GSI_IEEE_FP_CONTROL:
10049             {
10050                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10051 
10052                 /* Copied from linux ieee_fpcr_to_swcr.  */
10053                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10054                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10055                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10056                                         | SWCR_TRAP_ENABLE_DZE
10057                                         | SWCR_TRAP_ENABLE_OVF);
10058                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10059                                         | SWCR_TRAP_ENABLE_INE);
10060                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10061                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10062 
10063                 if (put_user_u64 (swcr, arg2))
10064                         return -TARGET_EFAULT;
10065                 ret = 0;
10066             }
10067             break;
10068 
10069           /* case GSI_IEEE_STATE_AT_SIGNAL:
10070              -- Not implemented in linux kernel.
10071              case GSI_UACPROC:
10072              -- Retrieves current unaligned access state; not much used.
10073              case GSI_PROC_TYPE:
10074              -- Retrieves implver information; surely not used.
10075              case GSI_GET_HWRPB:
10076              -- Grabs a copy of the HWRPB; surely not used.
10077           */
10078         }
10079         return ret;
10080 #endif
10081 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10082     /* Alpha specific */
10083     case TARGET_NR_osf_setsysinfo:
10084         ret = -TARGET_EOPNOTSUPP;
10085         switch (arg1) {
10086           case TARGET_SSI_IEEE_FP_CONTROL:
10087             {
10088                 uint64_t swcr, fpcr, orig_fpcr;
10089 
10090                 if (get_user_u64 (swcr, arg2)) {
10091                     return -TARGET_EFAULT;
10092                 }
10093                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10094                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10095 
10096                 /* Copied from linux ieee_swcr_to_fpcr.  */
10097                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10098                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10099                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10100                                   | SWCR_TRAP_ENABLE_DZE
10101                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10102                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10103                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10104                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10105                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10106 
10107                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10108                 ret = 0;
10109             }
10110             break;
10111 
10112           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10113             {
10114                 uint64_t exc, fpcr, orig_fpcr;
10115                 int si_code;
10116 
10117                 if (get_user_u64(exc, arg2)) {
10118                     return -TARGET_EFAULT;
10119                 }
10120 
10121                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10122 
10123                 /* We only add to the exception status here.  */
10124                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10125 
10126                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10127                 ret = 0;
10128 
10129                 /* Old exceptions are not signaled.  */
10130                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10131 
10132                 /* If any exceptions set by this call,
10133                    and are unmasked, send a signal.  */
10134                 si_code = 0;
10135                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10136                     si_code = TARGET_FPE_FLTRES;
10137                 }
10138                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10139                     si_code = TARGET_FPE_FLTUND;
10140                 }
10141                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10142                     si_code = TARGET_FPE_FLTOVF;
10143                 }
10144                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10145                     si_code = TARGET_FPE_FLTDIV;
10146                 }
10147                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10148                     si_code = TARGET_FPE_FLTINV;
10149                 }
10150                 if (si_code != 0) {
10151                     target_siginfo_t info;
10152                     info.si_signo = SIGFPE;
10153                     info.si_errno = 0;
10154                     info.si_code = si_code;
10155                     info._sifields._sigfault._addr
10156                         = ((CPUArchState *)cpu_env)->pc;
10157                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10158                                  QEMU_SI_FAULT, &info);
10159                 }
10160             }
10161             break;
10162 
10163           /* case SSI_NVPAIRS:
10164              -- Used with SSIN_UACPROC to enable unaligned accesses.
10165              case SSI_IEEE_STATE_AT_SIGNAL:
10166              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10167              -- Not implemented in linux kernel
10168           */
10169         }
10170         return ret;
10171 #endif
10172 #ifdef TARGET_NR_osf_sigprocmask
10173     /* Alpha specific.  */
10174     case TARGET_NR_osf_sigprocmask:
10175         {
10176             abi_ulong mask;
10177             int how;
10178             sigset_t set, oldset;
10179 
10180             switch(arg1) {
10181             case TARGET_SIG_BLOCK:
10182                 how = SIG_BLOCK;
10183                 break;
10184             case TARGET_SIG_UNBLOCK:
10185                 how = SIG_UNBLOCK;
10186                 break;
10187             case TARGET_SIG_SETMASK:
10188                 how = SIG_SETMASK;
10189                 break;
10190             default:
10191                 return -TARGET_EINVAL;
10192             }
10193             mask = arg2;
10194             target_to_host_old_sigset(&set, &mask);
10195             ret = do_sigprocmask(how, &set, &oldset);
10196             if (!ret) {
10197                 host_to_target_old_sigset(&mask, &oldset);
10198                 ret = mask;
10199             }
10200         }
10201         return ret;
10202 #endif
10203 
10204 #ifdef TARGET_NR_getgid32
10205     case TARGET_NR_getgid32:
10206         return get_errno(getgid());
10207 #endif
10208 #ifdef TARGET_NR_geteuid32
10209     case TARGET_NR_geteuid32:
10210         return get_errno(geteuid());
10211 #endif
10212 #ifdef TARGET_NR_getegid32
10213     case TARGET_NR_getegid32:
10214         return get_errno(getegid());
10215 #endif
10216 #ifdef TARGET_NR_setreuid32
10217     case TARGET_NR_setreuid32:
10218         return get_errno(setreuid(arg1, arg2));
10219 #endif
10220 #ifdef TARGET_NR_setregid32
10221     case TARGET_NR_setregid32:
10222         return get_errno(setregid(arg1, arg2));
10223 #endif
10224 #ifdef TARGET_NR_getgroups32
10225     case TARGET_NR_getgroups32:
10226         {
10227             int gidsetsize = arg1;
10228             uint32_t *target_grouplist;
10229             gid_t *grouplist;
10230             int i;
10231 
10232             grouplist = alloca(gidsetsize * sizeof(gid_t));
10233             ret = get_errno(getgroups(gidsetsize, grouplist));
10234             if (gidsetsize == 0)
10235                 return ret;
10236             if (!is_error(ret)) {
10237                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10238                 if (!target_grouplist) {
10239                     return -TARGET_EFAULT;
10240                 }
10241                 for(i = 0;i < ret; i++)
10242                     target_grouplist[i] = tswap32(grouplist[i]);
10243                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10244             }
10245         }
10246         return ret;
10247 #endif
10248 #ifdef TARGET_NR_setgroups32
10249     case TARGET_NR_setgroups32:
10250         {
10251             int gidsetsize = arg1;
10252             uint32_t *target_grouplist;
10253             gid_t *grouplist;
10254             int i;
10255 
10256             grouplist = alloca(gidsetsize * sizeof(gid_t));
10257             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10258             if (!target_grouplist) {
10259                 return -TARGET_EFAULT;
10260             }
10261             for(i = 0;i < gidsetsize; i++)
10262                 grouplist[i] = tswap32(target_grouplist[i]);
10263             unlock_user(target_grouplist, arg2, 0);
10264             return get_errno(setgroups(gidsetsize, grouplist));
10265         }
10266 #endif
10267 #ifdef TARGET_NR_fchown32
10268     case TARGET_NR_fchown32:
10269         return get_errno(fchown(arg1, arg2, arg3));
10270 #endif
10271 #ifdef TARGET_NR_setresuid32
10272     case TARGET_NR_setresuid32:
10273         return get_errno(sys_setresuid(arg1, arg2, arg3));
10274 #endif
10275 #ifdef TARGET_NR_getresuid32
10276     case TARGET_NR_getresuid32:
10277         {
10278             uid_t ruid, euid, suid;
10279             ret = get_errno(getresuid(&ruid, &euid, &suid));
10280             if (!is_error(ret)) {
10281                 if (put_user_u32(ruid, arg1)
10282                     || put_user_u32(euid, arg2)
10283                     || put_user_u32(suid, arg3))
10284                     return -TARGET_EFAULT;
10285             }
10286         }
10287         return ret;
10288 #endif
10289 #ifdef TARGET_NR_setresgid32
10290     case TARGET_NR_setresgid32:
10291         return get_errno(sys_setresgid(arg1, arg2, arg3));
10292 #endif
10293 #ifdef TARGET_NR_getresgid32
10294     case TARGET_NR_getresgid32:
10295         {
10296             gid_t rgid, egid, sgid;
10297             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10298             if (!is_error(ret)) {
10299                 if (put_user_u32(rgid, arg1)
10300                     || put_user_u32(egid, arg2)
10301                     || put_user_u32(sgid, arg3))
10302                     return -TARGET_EFAULT;
10303             }
10304         }
10305         return ret;
10306 #endif
10307 #ifdef TARGET_NR_chown32
10308     case TARGET_NR_chown32:
10309         if (!(p = lock_user_string(arg1)))
10310             return -TARGET_EFAULT;
10311         ret = get_errno(chown(p, arg2, arg3));
10312         unlock_user(p, arg1, 0);
10313         return ret;
10314 #endif
10315 #ifdef TARGET_NR_setuid32
10316     case TARGET_NR_setuid32:
10317         return get_errno(sys_setuid(arg1));
10318 #endif
10319 #ifdef TARGET_NR_setgid32
10320     case TARGET_NR_setgid32:
10321         return get_errno(sys_setgid(arg1));
10322 #endif
10323 #ifdef TARGET_NR_setfsuid32
10324     case TARGET_NR_setfsuid32:
10325         return get_errno(setfsuid(arg1));
10326 #endif
10327 #ifdef TARGET_NR_setfsgid32
10328     case TARGET_NR_setfsgid32:
10329         return get_errno(setfsgid(arg1));
10330 #endif
10331 #ifdef TARGET_NR_mincore
10332     case TARGET_NR_mincore:
10333         {
10334             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10335             if (!a) {
10336                 return -TARGET_ENOMEM;
10337             }
10338             p = lock_user_string(arg3);
10339             if (!p) {
10340                 ret = -TARGET_EFAULT;
10341             } else {
10342                 ret = get_errno(mincore(a, arg2, p));
10343                 unlock_user(p, arg3, ret);
10344             }
10345             unlock_user(a, arg1, 0);
10346         }
10347         return ret;
10348 #endif
10349 #ifdef TARGET_NR_arm_fadvise64_64
10350     case TARGET_NR_arm_fadvise64_64:
10351         /* arm_fadvise64_64 looks like fadvise64_64 but
10352          * with different argument order: fd, advice, offset, len
10353          * rather than the usual fd, offset, len, advice.
10354          * Note that offset and len are both 64-bit so appear as
10355          * pairs of 32-bit registers.
10356          */
10357         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10358                             target_offset64(arg5, arg6), arg2);
10359         return -host_to_target_errno(ret);
10360 #endif
10361 
10362 #if TARGET_ABI_BITS == 32
10363 
10364 #ifdef TARGET_NR_fadvise64_64
10365     case TARGET_NR_fadvise64_64:
10366 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10367         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10368         ret = arg2;
10369         arg2 = arg3;
10370         arg3 = arg4;
10371         arg4 = arg5;
10372         arg5 = arg6;
10373         arg6 = ret;
10374 #else
10375         /* 6 args: fd, offset (high, low), len (high, low), advice */
10376         if (regpairs_aligned(cpu_env, num)) {
10377             /* offset is in (3,4), len in (5,6) and advice in 7 */
10378             arg2 = arg3;
10379             arg3 = arg4;
10380             arg4 = arg5;
10381             arg5 = arg6;
10382             arg6 = arg7;
10383         }
10384 #endif
10385         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10386                             target_offset64(arg4, arg5), arg6);
10387         return -host_to_target_errno(ret);
10388 #endif
10389 
10390 #ifdef TARGET_NR_fadvise64
10391     case TARGET_NR_fadvise64:
10392         /* 5 args: fd, offset (high, low), len, advice */
10393         if (regpairs_aligned(cpu_env, num)) {
10394             /* offset is in (3,4), len in 5 and advice in 6 */
10395             arg2 = arg3;
10396             arg3 = arg4;
10397             arg4 = arg5;
10398             arg5 = arg6;
10399         }
10400         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10401         return -host_to_target_errno(ret);
10402 #endif
10403 
10404 #else /* not a 32-bit ABI */
10405 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10406 #ifdef TARGET_NR_fadvise64_64
10407     case TARGET_NR_fadvise64_64:
10408 #endif
10409 #ifdef TARGET_NR_fadvise64
10410     case TARGET_NR_fadvise64:
10411 #endif
10412 #ifdef TARGET_S390X
10413         switch (arg4) {
10414         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10415         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10416         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10417         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10418         default: break;
10419         }
10420 #endif
10421         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10422 #endif
10423 #endif /* end of 64-bit ABI fadvise handling */
10424 
10425 #ifdef TARGET_NR_madvise
10426     case TARGET_NR_madvise:
10427         /* A straight passthrough may not be safe because qemu sometimes
10428            turns private file-backed mappings into anonymous mappings.
10429            This will break MADV_DONTNEED.
10430            This is a hint, so ignoring and returning success is ok.  */
10431         return 0;
10432 #endif
10433 #if TARGET_ABI_BITS == 32
10434     case TARGET_NR_fcntl64:
10435     {
10436 	int cmd;
10437 	struct flock64 fl;
10438         from_flock64_fn *copyfrom = copy_from_user_flock64;
10439         to_flock64_fn *copyto = copy_to_user_flock64;
10440 
10441 #ifdef TARGET_ARM
10442         if (!((CPUARMState *)cpu_env)->eabi) {
10443             copyfrom = copy_from_user_oabi_flock64;
10444             copyto = copy_to_user_oabi_flock64;
10445         }
10446 #endif
10447 
10448 	cmd = target_to_host_fcntl_cmd(arg2);
10449         if (cmd == -TARGET_EINVAL) {
10450             return cmd;
10451         }
10452 
10453         switch(arg2) {
10454         case TARGET_F_GETLK64:
10455             ret = copyfrom(&fl, arg3);
10456             if (ret) {
10457                 break;
10458             }
10459             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10460             if (ret == 0) {
10461                 ret = copyto(arg3, &fl);
10462             }
10463 	    break;
10464 
10465         case TARGET_F_SETLK64:
10466         case TARGET_F_SETLKW64:
10467             ret = copyfrom(&fl, arg3);
10468             if (ret) {
10469                 break;
10470             }
10471             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10472 	    break;
10473         default:
10474             ret = do_fcntl(arg1, arg2, arg3);
10475             break;
10476         }
10477         return ret;
10478     }
10479 #endif
10480 #ifdef TARGET_NR_cacheflush
10481     case TARGET_NR_cacheflush:
10482         /* self-modifying code is handled automatically, so nothing needed */
10483         return 0;
10484 #endif
10485 #ifdef TARGET_NR_getpagesize
10486     case TARGET_NR_getpagesize:
10487         return TARGET_PAGE_SIZE;
10488 #endif
10489     case TARGET_NR_gettid:
10490         return get_errno(gettid());
10491 #ifdef TARGET_NR_readahead
10492     case TARGET_NR_readahead:
10493 #if TARGET_ABI_BITS == 32
10494         if (regpairs_aligned(cpu_env, num)) {
10495             arg2 = arg3;
10496             arg3 = arg4;
10497             arg4 = arg5;
10498         }
10499         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10500 #else
10501         ret = get_errno(readahead(arg1, arg2, arg3));
10502 #endif
10503         return ret;
10504 #endif
10505 #ifdef CONFIG_ATTR
10506 #ifdef TARGET_NR_setxattr
10507     case TARGET_NR_listxattr:
10508     case TARGET_NR_llistxattr:
10509     {
10510         void *p, *b = 0;
10511         if (arg2) {
10512             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10513             if (!b) {
10514                 return -TARGET_EFAULT;
10515             }
10516         }
10517         p = lock_user_string(arg1);
10518         if (p) {
10519             if (num == TARGET_NR_listxattr) {
10520                 ret = get_errno(listxattr(p, b, arg3));
10521             } else {
10522                 ret = get_errno(llistxattr(p, b, arg3));
10523             }
10524         } else {
10525             ret = -TARGET_EFAULT;
10526         }
10527         unlock_user(p, arg1, 0);
10528         unlock_user(b, arg2, arg3);
10529         return ret;
10530     }
10531     case TARGET_NR_flistxattr:
10532     {
10533         void *b = 0;
10534         if (arg2) {
10535             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10536             if (!b) {
10537                 return -TARGET_EFAULT;
10538             }
10539         }
10540         ret = get_errno(flistxattr(arg1, b, arg3));
10541         unlock_user(b, arg2, arg3);
10542         return ret;
10543     }
10544     case TARGET_NR_setxattr:
10545     case TARGET_NR_lsetxattr:
10546         {
10547             void *p, *n, *v = 0;
10548             if (arg3) {
10549                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10550                 if (!v) {
10551                     return -TARGET_EFAULT;
10552                 }
10553             }
10554             p = lock_user_string(arg1);
10555             n = lock_user_string(arg2);
10556             if (p && n) {
10557                 if (num == TARGET_NR_setxattr) {
10558                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10559                 } else {
10560                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10561                 }
10562             } else {
10563                 ret = -TARGET_EFAULT;
10564             }
10565             unlock_user(p, arg1, 0);
10566             unlock_user(n, arg2, 0);
10567             unlock_user(v, arg3, 0);
10568         }
10569         return ret;
10570     case TARGET_NR_fsetxattr:
10571         {
10572             void *n, *v = 0;
10573             if (arg3) {
10574                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10575                 if (!v) {
10576                     return -TARGET_EFAULT;
10577                 }
10578             }
10579             n = lock_user_string(arg2);
10580             if (n) {
10581                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10582             } else {
10583                 ret = -TARGET_EFAULT;
10584             }
10585             unlock_user(n, arg2, 0);
10586             unlock_user(v, arg3, 0);
10587         }
10588         return ret;
10589     case TARGET_NR_getxattr:
10590     case TARGET_NR_lgetxattr:
10591         {
10592             void *p, *n, *v = 0;
10593             if (arg3) {
10594                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10595                 if (!v) {
10596                     return -TARGET_EFAULT;
10597                 }
10598             }
10599             p = lock_user_string(arg1);
10600             n = lock_user_string(arg2);
10601             if (p && n) {
10602                 if (num == TARGET_NR_getxattr) {
10603                     ret = get_errno(getxattr(p, n, v, arg4));
10604                 } else {
10605                     ret = get_errno(lgetxattr(p, n, v, arg4));
10606                 }
10607             } else {
10608                 ret = -TARGET_EFAULT;
10609             }
10610             unlock_user(p, arg1, 0);
10611             unlock_user(n, arg2, 0);
10612             unlock_user(v, arg3, arg4);
10613         }
10614         return ret;
10615     case TARGET_NR_fgetxattr:
10616         {
10617             void *n, *v = 0;
10618             if (arg3) {
10619                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10620                 if (!v) {
10621                     return -TARGET_EFAULT;
10622                 }
10623             }
10624             n = lock_user_string(arg2);
10625             if (n) {
10626                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10627             } else {
10628                 ret = -TARGET_EFAULT;
10629             }
10630             unlock_user(n, arg2, 0);
10631             unlock_user(v, arg3, arg4);
10632         }
10633         return ret;
10634     case TARGET_NR_removexattr:
10635     case TARGET_NR_lremovexattr:
10636         {
10637             void *p, *n;
10638             p = lock_user_string(arg1);
10639             n = lock_user_string(arg2);
10640             if (p && n) {
10641                 if (num == TARGET_NR_removexattr) {
10642                     ret = get_errno(removexattr(p, n));
10643                 } else {
10644                     ret = get_errno(lremovexattr(p, n));
10645                 }
10646             } else {
10647                 ret = -TARGET_EFAULT;
10648             }
10649             unlock_user(p, arg1, 0);
10650             unlock_user(n, arg2, 0);
10651         }
10652         return ret;
10653     case TARGET_NR_fremovexattr:
10654         {
10655             void *n;
10656             n = lock_user_string(arg2);
10657             if (n) {
10658                 ret = get_errno(fremovexattr(arg1, n));
10659             } else {
10660                 ret = -TARGET_EFAULT;
10661             }
10662             unlock_user(n, arg2, 0);
10663         }
10664         return ret;
10665 #endif
10666 #endif /* CONFIG_ATTR */
10667 #ifdef TARGET_NR_set_thread_area
10668     case TARGET_NR_set_thread_area:
10669 #if defined(TARGET_MIPS)
10670       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10671       return 0;
10672 #elif defined(TARGET_CRIS)
10673       if (arg1 & 0xff)
10674           ret = -TARGET_EINVAL;
10675       else {
10676           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10677           ret = 0;
10678       }
10679       return ret;
10680 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10681       return do_set_thread_area(cpu_env, arg1);
10682 #elif defined(TARGET_M68K)
10683       {
10684           TaskState *ts = cpu->opaque;
10685           ts->tp_value = arg1;
10686           return 0;
10687       }
10688 #else
10689       return -TARGET_ENOSYS;
10690 #endif
10691 #endif
10692 #ifdef TARGET_NR_get_thread_area
10693     case TARGET_NR_get_thread_area:
10694 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10695         return do_get_thread_area(cpu_env, arg1);
10696 #elif defined(TARGET_M68K)
10697         {
10698             TaskState *ts = cpu->opaque;
10699             return ts->tp_value;
10700         }
10701 #else
10702         return -TARGET_ENOSYS;
10703 #endif
10704 #endif
10705 #ifdef TARGET_NR_getdomainname
10706     case TARGET_NR_getdomainname:
10707         return -TARGET_ENOSYS;
10708 #endif
10709 
10710 #ifdef TARGET_NR_clock_settime
10711     case TARGET_NR_clock_settime:
10712     {
10713         struct timespec ts;
10714 
10715         ret = target_to_host_timespec(&ts, arg2);
10716         if (!is_error(ret)) {
10717             ret = get_errno(clock_settime(arg1, &ts));
10718         }
10719         return ret;
10720     }
10721 #endif
10722 #ifdef TARGET_NR_clock_gettime
10723     case TARGET_NR_clock_gettime:
10724     {
10725         struct timespec ts;
10726         ret = get_errno(clock_gettime(arg1, &ts));
10727         if (!is_error(ret)) {
10728             ret = host_to_target_timespec(arg2, &ts);
10729         }
10730         return ret;
10731     }
10732 #endif
10733 #ifdef TARGET_NR_clock_getres
10734     case TARGET_NR_clock_getres:
10735     {
10736         struct timespec ts;
10737         ret = get_errno(clock_getres(arg1, &ts));
10738         if (!is_error(ret)) {
10739             host_to_target_timespec(arg2, &ts);
10740         }
10741         return ret;
10742     }
10743 #endif
10744 #ifdef TARGET_NR_clock_nanosleep
10745     case TARGET_NR_clock_nanosleep:
10746     {
10747         struct timespec ts;
10748         target_to_host_timespec(&ts, arg3);
10749         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10750                                              &ts, arg4 ? &ts : NULL));
10751         if (arg4)
10752             host_to_target_timespec(arg4, &ts);
10753 
10754 #if defined(TARGET_PPC)
10755         /* clock_nanosleep is odd in that it returns positive errno values.
10756          * On PPC, CR0 bit 3 should be set in such a situation. */
10757         if (ret && ret != -TARGET_ERESTARTSYS) {
10758             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10759         }
10760 #endif
10761         return ret;
10762     }
10763 #endif
10764 
10765 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10766     case TARGET_NR_set_tid_address:
10767         return get_errno(set_tid_address((int *)g2h(arg1)));
10768 #endif
10769 
10770     case TARGET_NR_tkill:
10771         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10772 
10773     case TARGET_NR_tgkill:
10774         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10775                          target_to_host_signal(arg3)));
10776 
10777 #ifdef TARGET_NR_set_robust_list
10778     case TARGET_NR_set_robust_list:
10779     case TARGET_NR_get_robust_list:
10780         /* The ABI for supporting robust futexes has userspace pass
10781          * the kernel a pointer to a linked list which is updated by
10782          * userspace after the syscall; the list is walked by the kernel
10783          * when the thread exits. Since the linked list in QEMU guest
10784          * memory isn't a valid linked list for the host and we have
10785          * no way to reliably intercept the thread-death event, we can't
10786          * support these. Silently return ENOSYS so that guest userspace
10787          * falls back to a non-robust futex implementation (which should
10788          * be OK except in the corner case of the guest crashing while
10789          * holding a mutex that is shared with another process via
10790          * shared memory).
10791          */
10792         return -TARGET_ENOSYS;
10793 #endif
10794 
10795 #if defined(TARGET_NR_utimensat)
10796     case TARGET_NR_utimensat:
10797         {
10798             struct timespec *tsp, ts[2];
10799             if (!arg3) {
10800                 tsp = NULL;
10801             } else {
10802                 target_to_host_timespec(ts, arg3);
10803                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10804                 tsp = ts;
10805             }
10806             if (!arg2)
10807                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10808             else {
10809                 if (!(p = lock_user_string(arg2))) {
10810                     return -TARGET_EFAULT;
10811                 }
10812                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10813                 unlock_user(p, arg2, 0);
10814             }
10815         }
10816         return ret;
10817 #endif
10818     case TARGET_NR_futex:
10819         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10820 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10821     case TARGET_NR_inotify_init:
10822         ret = get_errno(sys_inotify_init());
10823         if (ret >= 0) {
10824             fd_trans_register(ret, &target_inotify_trans);
10825         }
10826         return ret;
10827 #endif
10828 #ifdef CONFIG_INOTIFY1
10829 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10830     case TARGET_NR_inotify_init1:
10831         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10832                                           fcntl_flags_tbl)));
10833         if (ret >= 0) {
10834             fd_trans_register(ret, &target_inotify_trans);
10835         }
10836         return ret;
10837 #endif
10838 #endif
10839 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10840     case TARGET_NR_inotify_add_watch:
10841         p = lock_user_string(arg2);
10842         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10843         unlock_user(p, arg2, 0);
10844         return ret;
10845 #endif
10846 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10847     case TARGET_NR_inotify_rm_watch:
10848         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10849 #endif
10850 
10851 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10852     case TARGET_NR_mq_open:
10853         {
10854             struct mq_attr posix_mq_attr;
10855             struct mq_attr *pposix_mq_attr;
10856             int host_flags;
10857 
10858             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10859             pposix_mq_attr = NULL;
10860             if (arg4) {
10861                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10862                     return -TARGET_EFAULT;
10863                 }
10864                 pposix_mq_attr = &posix_mq_attr;
10865             }
10866             p = lock_user_string(arg1 - 1);
10867             if (!p) {
10868                 return -TARGET_EFAULT;
10869             }
10870             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
10871             unlock_user (p, arg1, 0);
10872         }
10873         return ret;
10874 
10875     case TARGET_NR_mq_unlink:
10876         p = lock_user_string(arg1 - 1);
10877         if (!p) {
10878             return -TARGET_EFAULT;
10879         }
10880         ret = get_errno(mq_unlink(p));
10881         unlock_user (p, arg1, 0);
10882         return ret;
10883 
10884     case TARGET_NR_mq_timedsend:
10885         {
10886             struct timespec ts;
10887 
10888             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10889             if (arg5 != 0) {
10890                 target_to_host_timespec(&ts, arg5);
10891                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10892                 host_to_target_timespec(arg5, &ts);
10893             } else {
10894                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10895             }
10896             unlock_user (p, arg2, arg3);
10897         }
10898         return ret;
10899 
10900     case TARGET_NR_mq_timedreceive:
10901         {
10902             struct timespec ts;
10903             unsigned int prio;
10904 
10905             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10906             if (arg5 != 0) {
10907                 target_to_host_timespec(&ts, arg5);
10908                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10909                                                      &prio, &ts));
10910                 host_to_target_timespec(arg5, &ts);
10911             } else {
10912                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10913                                                      &prio, NULL));
10914             }
10915             unlock_user (p, arg2, arg3);
10916             if (arg4 != 0)
10917                 put_user_u32(prio, arg4);
10918         }
10919         return ret;
10920 
10921     /* Not implemented for now... */
10922 /*     case TARGET_NR_mq_notify: */
10923 /*         break; */
10924 
10925     case TARGET_NR_mq_getsetattr:
10926         {
10927             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10928             ret = 0;
10929             if (arg2 != 0) {
10930                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10931                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
10932                                            &posix_mq_attr_out));
10933             } else if (arg3 != 0) {
10934                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
10935             }
10936             if (ret == 0 && arg3 != 0) {
10937                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10938             }
10939         }
10940         return ret;
10941 #endif
10942 
10943 #ifdef CONFIG_SPLICE
10944 #ifdef TARGET_NR_tee
10945     case TARGET_NR_tee:
10946         {
10947             ret = get_errno(tee(arg1,arg2,arg3,arg4));
10948         }
10949         return ret;
10950 #endif
10951 #ifdef TARGET_NR_splice
10952     case TARGET_NR_splice:
10953         {
10954             loff_t loff_in, loff_out;
10955             loff_t *ploff_in = NULL, *ploff_out = NULL;
10956             if (arg2) {
10957                 if (get_user_u64(loff_in, arg2)) {
10958                     return -TARGET_EFAULT;
10959                 }
10960                 ploff_in = &loff_in;
10961             }
10962             if (arg4) {
10963                 if (get_user_u64(loff_out, arg4)) {
10964                     return -TARGET_EFAULT;
10965                 }
10966                 ploff_out = &loff_out;
10967             }
10968             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10969             if (arg2) {
10970                 if (put_user_u64(loff_in, arg2)) {
10971                     return -TARGET_EFAULT;
10972                 }
10973             }
10974             if (arg4) {
10975                 if (put_user_u64(loff_out, arg4)) {
10976                     return -TARGET_EFAULT;
10977                 }
10978             }
10979         }
10980         return ret;
10981 #endif
10982 #ifdef TARGET_NR_vmsplice
10983 	case TARGET_NR_vmsplice:
10984         {
10985             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10986             if (vec != NULL) {
10987                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10988                 unlock_iovec(vec, arg2, arg3, 0);
10989             } else {
10990                 ret = -host_to_target_errno(errno);
10991             }
10992         }
10993         return ret;
10994 #endif
10995 #endif /* CONFIG_SPLICE */
10996 #ifdef CONFIG_EVENTFD
10997 #if defined(TARGET_NR_eventfd)
10998     case TARGET_NR_eventfd:
10999         ret = get_errno(eventfd(arg1, 0));
11000         if (ret >= 0) {
11001             fd_trans_register(ret, &target_eventfd_trans);
11002         }
11003         return ret;
11004 #endif
11005 #if defined(TARGET_NR_eventfd2)
11006     case TARGET_NR_eventfd2:
11007     {
11008         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11009         if (arg2 & TARGET_O_NONBLOCK) {
11010             host_flags |= O_NONBLOCK;
11011         }
11012         if (arg2 & TARGET_O_CLOEXEC) {
11013             host_flags |= O_CLOEXEC;
11014         }
11015         ret = get_errno(eventfd(arg1, host_flags));
11016         if (ret >= 0) {
11017             fd_trans_register(ret, &target_eventfd_trans);
11018         }
11019         return ret;
11020     }
11021 #endif
11022 #endif /* CONFIG_EVENTFD  */
11023 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11024     case TARGET_NR_fallocate:
11025 #if TARGET_ABI_BITS == 32
11026         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11027                                   target_offset64(arg5, arg6)));
11028 #else
11029         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11030 #endif
11031         return ret;
11032 #endif
11033 #if defined(CONFIG_SYNC_FILE_RANGE)
11034 #if defined(TARGET_NR_sync_file_range)
11035     case TARGET_NR_sync_file_range:
11036 #if TARGET_ABI_BITS == 32
11037 #if defined(TARGET_MIPS)
11038         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11039                                         target_offset64(arg5, arg6), arg7));
11040 #else
11041         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11042                                         target_offset64(arg4, arg5), arg6));
11043 #endif /* !TARGET_MIPS */
11044 #else
11045         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11046 #endif
11047         return ret;
11048 #endif
11049 #if defined(TARGET_NR_sync_file_range2)
11050     case TARGET_NR_sync_file_range2:
11051         /* This is like sync_file_range but the arguments are reordered */
11052 #if TARGET_ABI_BITS == 32
11053         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11054                                         target_offset64(arg5, arg6), arg2));
11055 #else
11056         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11057 #endif
11058         return ret;
11059 #endif
11060 #endif
11061 #if defined(TARGET_NR_signalfd4)
11062     case TARGET_NR_signalfd4:
11063         return do_signalfd4(arg1, arg2, arg4);
11064 #endif
11065 #if defined(TARGET_NR_signalfd)
11066     case TARGET_NR_signalfd:
11067         return do_signalfd4(arg1, arg2, 0);
11068 #endif
11069 #if defined(CONFIG_EPOLL)
11070 #if defined(TARGET_NR_epoll_create)
11071     case TARGET_NR_epoll_create:
11072         return get_errno(epoll_create(arg1));
11073 #endif
11074 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11075     case TARGET_NR_epoll_create1:
11076         return get_errno(epoll_create1(arg1));
11077 #endif
11078 #if defined(TARGET_NR_epoll_ctl)
11079     case TARGET_NR_epoll_ctl:
11080     {
11081         struct epoll_event ep;
11082         struct epoll_event *epp = 0;
11083         if (arg4) {
11084             struct target_epoll_event *target_ep;
11085             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11086                 return -TARGET_EFAULT;
11087             }
11088             ep.events = tswap32(target_ep->events);
11089             /* The epoll_data_t union is just opaque data to the kernel,
11090              * so we transfer all 64 bits across and need not worry what
11091              * actual data type it is.
11092              */
11093             ep.data.u64 = tswap64(target_ep->data.u64);
11094             unlock_user_struct(target_ep, arg4, 0);
11095             epp = &ep;
11096         }
11097         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11098     }
11099 #endif
11100 
11101 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11102 #if defined(TARGET_NR_epoll_wait)
11103     case TARGET_NR_epoll_wait:
11104 #endif
11105 #if defined(TARGET_NR_epoll_pwait)
11106     case TARGET_NR_epoll_pwait:
11107 #endif
11108     {
11109         struct target_epoll_event *target_ep;
11110         struct epoll_event *ep;
11111         int epfd = arg1;
11112         int maxevents = arg3;
11113         int timeout = arg4;
11114 
11115         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11116             return -TARGET_EINVAL;
11117         }
11118 
11119         target_ep = lock_user(VERIFY_WRITE, arg2,
11120                               maxevents * sizeof(struct target_epoll_event), 1);
11121         if (!target_ep) {
11122             return -TARGET_EFAULT;
11123         }
11124 
11125         ep = g_try_new(struct epoll_event, maxevents);
11126         if (!ep) {
11127             unlock_user(target_ep, arg2, 0);
11128             return -TARGET_ENOMEM;
11129         }
11130 
11131         switch (num) {
11132 #if defined(TARGET_NR_epoll_pwait)
11133         case TARGET_NR_epoll_pwait:
11134         {
11135             target_sigset_t *target_set;
11136             sigset_t _set, *set = &_set;
11137 
11138             if (arg5) {
11139                 if (arg6 != sizeof(target_sigset_t)) {
11140                     ret = -TARGET_EINVAL;
11141                     break;
11142                 }
11143 
11144                 target_set = lock_user(VERIFY_READ, arg5,
11145                                        sizeof(target_sigset_t), 1);
11146                 if (!target_set) {
11147                     ret = -TARGET_EFAULT;
11148                     break;
11149                 }
11150                 target_to_host_sigset(set, target_set);
11151                 unlock_user(target_set, arg5, 0);
11152             } else {
11153                 set = NULL;
11154             }
11155 
11156             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11157                                              set, SIGSET_T_SIZE));
11158             break;
11159         }
11160 #endif
11161 #if defined(TARGET_NR_epoll_wait)
11162         case TARGET_NR_epoll_wait:
11163             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11164                                              NULL, 0));
11165             break;
11166 #endif
11167         default:
11168             ret = -TARGET_ENOSYS;
11169         }
11170         if (!is_error(ret)) {
11171             int i;
11172             for (i = 0; i < ret; i++) {
11173                 target_ep[i].events = tswap32(ep[i].events);
11174                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11175             }
11176             unlock_user(target_ep, arg2,
11177                         ret * sizeof(struct target_epoll_event));
11178         } else {
11179             unlock_user(target_ep, arg2, 0);
11180         }
11181         g_free(ep);
11182         return ret;
11183     }
11184 #endif
11185 #endif
11186 #ifdef TARGET_NR_prlimit64
11187     case TARGET_NR_prlimit64:
11188     {
11189         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11190         struct target_rlimit64 *target_rnew, *target_rold;
11191         struct host_rlimit64 rnew, rold, *rnewp = 0;
11192         int resource = target_to_host_resource(arg2);
11193         if (arg3) {
11194             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11195                 return -TARGET_EFAULT;
11196             }
11197             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11198             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11199             unlock_user_struct(target_rnew, arg3, 0);
11200             rnewp = &rnew;
11201         }
11202 
11203         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11204         if (!is_error(ret) && arg4) {
11205             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11206                 return -TARGET_EFAULT;
11207             }
11208             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11209             target_rold->rlim_max = tswap64(rold.rlim_max);
11210             unlock_user_struct(target_rold, arg4, 1);
11211         }
11212         return ret;
11213     }
11214 #endif
11215 #ifdef TARGET_NR_gethostname
11216     case TARGET_NR_gethostname:
11217     {
11218         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11219         if (name) {
11220             ret = get_errno(gethostname(name, arg2));
11221             unlock_user(name, arg1, arg2);
11222         } else {
11223             ret = -TARGET_EFAULT;
11224         }
11225         return ret;
11226     }
11227 #endif
11228 #ifdef TARGET_NR_atomic_cmpxchg_32
11229     case TARGET_NR_atomic_cmpxchg_32:
11230     {
11231         /* should use start_exclusive from main.c */
11232         abi_ulong mem_value;
11233         if (get_user_u32(mem_value, arg6)) {
11234             target_siginfo_t info;
11235             info.si_signo = SIGSEGV;
11236             info.si_errno = 0;
11237             info.si_code = TARGET_SEGV_MAPERR;
11238             info._sifields._sigfault._addr = arg6;
11239             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11240                          QEMU_SI_FAULT, &info);
11241             ret = 0xdeadbeef;
11242 
11243         }
11244         if (mem_value == arg2)
11245             put_user_u32(arg1, arg6);
11246         return mem_value;
11247     }
11248 #endif
11249 #ifdef TARGET_NR_atomic_barrier
11250     case TARGET_NR_atomic_barrier:
11251         /* Like the kernel implementation and the
11252            qemu arm barrier, no-op this? */
11253         return 0;
11254 #endif
11255 
11256 #ifdef TARGET_NR_timer_create
11257     case TARGET_NR_timer_create:
11258     {
11259         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11260 
11261         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11262 
11263         int clkid = arg1;
11264         int timer_index = next_free_host_timer();
11265 
11266         if (timer_index < 0) {
11267             ret = -TARGET_EAGAIN;
11268         } else {
11269             timer_t *phtimer = g_posix_timers  + timer_index;
11270 
11271             if (arg2) {
11272                 phost_sevp = &host_sevp;
11273                 ret = target_to_host_sigevent(phost_sevp, arg2);
11274                 if (ret != 0) {
11275                     return ret;
11276                 }
11277             }
11278 
11279             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11280             if (ret) {
11281                 phtimer = NULL;
11282             } else {
11283                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11284                     return -TARGET_EFAULT;
11285                 }
11286             }
11287         }
11288         return ret;
11289     }
11290 #endif
11291 
11292 #ifdef TARGET_NR_timer_settime
11293     case TARGET_NR_timer_settime:
11294     {
11295         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11296          * struct itimerspec * old_value */
11297         target_timer_t timerid = get_timer_id(arg1);
11298 
11299         if (timerid < 0) {
11300             ret = timerid;
11301         } else if (arg3 == 0) {
11302             ret = -TARGET_EINVAL;
11303         } else {
11304             timer_t htimer = g_posix_timers[timerid];
11305             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11306 
11307             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11308                 return -TARGET_EFAULT;
11309             }
11310             ret = get_errno(
11311                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11312             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11313                 return -TARGET_EFAULT;
11314             }
11315         }
11316         return ret;
11317     }
11318 #endif
11319 
11320 #ifdef TARGET_NR_timer_gettime
11321     case TARGET_NR_timer_gettime:
11322     {
11323         /* args: timer_t timerid, struct itimerspec *curr_value */
11324         target_timer_t timerid = get_timer_id(arg1);
11325 
11326         if (timerid < 0) {
11327             ret = timerid;
11328         } else if (!arg2) {
11329             ret = -TARGET_EFAULT;
11330         } else {
11331             timer_t htimer = g_posix_timers[timerid];
11332             struct itimerspec hspec;
11333             ret = get_errno(timer_gettime(htimer, &hspec));
11334 
11335             if (host_to_target_itimerspec(arg2, &hspec)) {
11336                 ret = -TARGET_EFAULT;
11337             }
11338         }
11339         return ret;
11340     }
11341 #endif
11342 
11343 #ifdef TARGET_NR_timer_getoverrun
11344     case TARGET_NR_timer_getoverrun:
11345     {
11346         /* args: timer_t timerid */
11347         target_timer_t timerid = get_timer_id(arg1);
11348 
11349         if (timerid < 0) {
11350             ret = timerid;
11351         } else {
11352             timer_t htimer = g_posix_timers[timerid];
11353             ret = get_errno(timer_getoverrun(htimer));
11354         }
11355         fd_trans_unregister(ret);
11356         return ret;
11357     }
11358 #endif
11359 
11360 #ifdef TARGET_NR_timer_delete
11361     case TARGET_NR_timer_delete:
11362     {
11363         /* args: timer_t timerid */
11364         target_timer_t timerid = get_timer_id(arg1);
11365 
11366         if (timerid < 0) {
11367             ret = timerid;
11368         } else {
11369             timer_t htimer = g_posix_timers[timerid];
11370             ret = get_errno(timer_delete(htimer));
11371             g_posix_timers[timerid] = 0;
11372         }
11373         return ret;
11374     }
11375 #endif
11376 
11377 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11378     case TARGET_NR_timerfd_create:
11379         return get_errno(timerfd_create(arg1,
11380                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11381 #endif
11382 
11383 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11384     case TARGET_NR_timerfd_gettime:
11385         {
11386             struct itimerspec its_curr;
11387 
11388             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11389 
11390             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11391                 return -TARGET_EFAULT;
11392             }
11393         }
11394         return ret;
11395 #endif
11396 
11397 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11398     case TARGET_NR_timerfd_settime:
11399         {
11400             struct itimerspec its_new, its_old, *p_new;
11401 
11402             if (arg3) {
11403                 if (target_to_host_itimerspec(&its_new, arg3)) {
11404                     return -TARGET_EFAULT;
11405                 }
11406                 p_new = &its_new;
11407             } else {
11408                 p_new = NULL;
11409             }
11410 
11411             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11412 
11413             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11414                 return -TARGET_EFAULT;
11415             }
11416         }
11417         return ret;
11418 #endif
11419 
11420 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11421     case TARGET_NR_ioprio_get:
11422         return get_errno(ioprio_get(arg1, arg2));
11423 #endif
11424 
11425 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11426     case TARGET_NR_ioprio_set:
11427         return get_errno(ioprio_set(arg1, arg2, arg3));
11428 #endif
11429 
11430 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11431     case TARGET_NR_setns:
11432         return get_errno(setns(arg1, arg2));
11433 #endif
11434 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11435     case TARGET_NR_unshare:
11436         return get_errno(unshare(arg1));
11437 #endif
11438 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11439     case TARGET_NR_kcmp:
11440         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11441 #endif
11442 #ifdef TARGET_NR_swapcontext
11443     case TARGET_NR_swapcontext:
11444         /* PowerPC specific.  */
11445         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11446 #endif
11447 
11448     default:
11449         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11450         return -TARGET_ENOSYS;
11451     }
11452     return ret;
11453 }
11454 
11455 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11456                     abi_long arg2, abi_long arg3, abi_long arg4,
11457                     abi_long arg5, abi_long arg6, abi_long arg7,
11458                     abi_long arg8)
11459 {
11460     CPUState *cpu = ENV_GET_CPU(cpu_env);
11461     abi_long ret;
11462 
11463 #ifdef DEBUG_ERESTARTSYS
11464     /* Debug-only code for exercising the syscall-restart code paths
11465      * in the per-architecture cpu main loops: restart every syscall
11466      * the guest makes once before letting it through.
11467      */
11468     {
11469         static bool flag;
11470         flag = !flag;
11471         if (flag) {
11472             return -TARGET_ERESTARTSYS;
11473         }
11474     }
11475 #endif
11476 
11477     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11478                              arg5, arg6, arg7, arg8);
11479 
11480     if (unlikely(do_strace)) {
11481         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11482         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11483                           arg5, arg6, arg7, arg8);
11484         print_syscall_ret(num, ret);
11485     } else {
11486         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11487                           arg5, arg6, arg7, arg8);
11488     }
11489 
11490     trace_guest_user_syscall_ret(cpu, num, ret);
11491     return ret;
11492 }
11493