xref: /openbmc/qemu/linux-user/syscall.c (revision 83eb6e50)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include "linux_loop.h"
106 #include "uname.h"
107 
108 #include "qemu.h"
109 #include "fd-trans.h"
110 
111 #ifndef CLONE_IO
112 #define CLONE_IO                0x80000000      /* Clone io context */
113 #endif
114 
115 /* We can't directly call the host clone syscall, because this will
116  * badly confuse libc (breaking mutexes, for example). So we must
117  * divide clone flags into:
118  *  * flag combinations that look like pthread_create()
119  *  * flag combinations that look like fork()
120  *  * flags we can implement within QEMU itself
121  *  * flags we can't support and will return an error for
122  */
123 /* For thread creation, all these flags must be present; for
124  * fork, none must be present.
125  */
126 #define CLONE_THREAD_FLAGS                              \
127     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
128      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
129 
130 /* These flags are ignored:
131  * CLONE_DETACHED is now ignored by the kernel;
132  * CLONE_IO is just an optimisation hint to the I/O scheduler
133  */
134 #define CLONE_IGNORED_FLAGS                     \
135     (CLONE_DETACHED | CLONE_IO)
136 
137 /* Flags for fork which we can implement within QEMU itself */
138 #define CLONE_OPTIONAL_FORK_FLAGS               \
139     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
140      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
141 
142 /* Flags for thread creation which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
144     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
145      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
146 
147 #define CLONE_INVALID_FORK_FLAGS                                        \
148     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
149 
150 #define CLONE_INVALID_THREAD_FLAGS                                      \
151     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
152        CLONE_IGNORED_FLAGS))
153 
154 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
155  * have almost all been allocated. We cannot support any of
156  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
157  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
158  * The checks against the invalid thread masks above will catch these.
159  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
160  */
161 
162 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
163  * once. This exercises the codepaths for restart.
164  */
165 //#define DEBUG_ERESTARTSYS
166 
167 //#include <linux/msdos_fs.h>
168 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
169 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
170 
171 #undef _syscall0
172 #undef _syscall1
173 #undef _syscall2
174 #undef _syscall3
175 #undef _syscall4
176 #undef _syscall5
177 #undef _syscall6
178 
179 #define _syscall0(type,name)		\
180 static type name (void)			\
181 {					\
182 	return syscall(__NR_##name);	\
183 }
184 
185 #define _syscall1(type,name,type1,arg1)		\
186 static type name (type1 arg1)			\
187 {						\
188 	return syscall(__NR_##name, arg1);	\
189 }
190 
191 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
192 static type name (type1 arg1,type2 arg2)		\
193 {							\
194 	return syscall(__NR_##name, arg1, arg2);	\
195 }
196 
197 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
198 static type name (type1 arg1,type2 arg2,type3 arg3)		\
199 {								\
200 	return syscall(__NR_##name, arg1, arg2, arg3);		\
201 }
202 
203 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
204 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
205 {										\
206 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
207 }
208 
209 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
210 		  type5,arg5)							\
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
212 {										\
213 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
214 }
215 
216 
217 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
218 		  type5,arg5,type6,arg6)					\
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
220                   type6 arg6)							\
221 {										\
222 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
223 }
224 
225 
226 #define __NR_sys_uname __NR_uname
227 #define __NR_sys_getcwd1 __NR_getcwd
228 #define __NR_sys_getdents __NR_getdents
229 #define __NR_sys_getdents64 __NR_getdents64
230 #define __NR_sys_getpriority __NR_getpriority
231 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
232 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
233 #define __NR_sys_syslog __NR_syslog
234 #define __NR_sys_futex __NR_futex
235 #define __NR_sys_inotify_init __NR_inotify_init
236 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
237 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
238 
239 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
240 #define __NR__llseek __NR_lseek
241 #endif
242 
243 /* Newer kernel ports have llseek() instead of _llseek() */
244 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
245 #define TARGET_NR__llseek TARGET_NR_llseek
246 #endif
247 
248 #ifdef __NR_gettid
249 _syscall0(int, gettid)
250 #else
251 /* This is a replacement for the host gettid() and must return a host
252    errno. */
253 static int gettid(void) {
254     return -ENOSYS;
255 }
256 #endif
257 
258 /* For the 64-bit guest on 32-bit host case we must emulate
259  * getdents using getdents64, because otherwise the host
260  * might hand us back more dirent records than we can fit
261  * into the guest buffer after structure format conversion.
262  * Otherwise we emulate getdents with getdents if the host has it.
263  */
264 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
265 #define EMULATE_GETDENTS_WITH_GETDENTS
266 #endif
267 
268 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
270 #endif
271 #if (defined(TARGET_NR_getdents) && \
272       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
273     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
274 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
275 #endif
276 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
277 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
278           loff_t *, res, uint, wh);
279 #endif
280 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
281 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
282           siginfo_t *, uinfo)
283 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
284 #ifdef __NR_exit_group
285 _syscall1(int,exit_group,int,error_code)
286 #endif
287 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
288 _syscall1(int,set_tid_address,int *,tidptr)
289 #endif
290 #if defined(TARGET_NR_futex) && defined(__NR_futex)
291 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
292           const struct timespec *,timeout,int *,uaddr2,int,val3)
293 #endif
294 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
295 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
296           unsigned long *, user_mask_ptr);
297 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
298 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
299           unsigned long *, user_mask_ptr);
300 #define __NR_sys_getcpu __NR_getcpu
301 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
302 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
303           void *, arg);
304 _syscall2(int, capget, struct __user_cap_header_struct *, header,
305           struct __user_cap_data_struct *, data);
306 _syscall2(int, capset, struct __user_cap_header_struct *, header,
307           struct __user_cap_data_struct *, data);
308 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
309 _syscall2(int, ioprio_get, int, which, int, who)
310 #endif
311 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
312 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
313 #endif
314 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
315 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
316 #endif
317 
318 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
319 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
320           unsigned long, idx1, unsigned long, idx2)
321 #endif
322 
323 static bitmask_transtbl fcntl_flags_tbl[] = {
324   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
325   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
326   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
327   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
328   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
329   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
330   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
331   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
332   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
333   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
334   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
335   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
336   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
337 #if defined(O_DIRECT)
338   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
339 #endif
340 #if defined(O_NOATIME)
341   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
342 #endif
343 #if defined(O_CLOEXEC)
344   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
345 #endif
346 #if defined(O_PATH)
347   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
348 #endif
349 #if defined(O_TMPFILE)
350   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
351 #endif
352   /* Don't terminate the list prematurely on 64-bit host+guest.  */
353 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
354   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
355 #endif
356   { 0, 0, 0, 0 }
357 };
358 
359 static int sys_getcwd1(char *buf, size_t size)
360 {
361   if (getcwd(buf, size) == NULL) {
362       /* getcwd() sets errno */
363       return (-1);
364   }
365   return strlen(buf)+1;
366 }
367 
368 #ifdef TARGET_NR_utimensat
369 #if defined(__NR_utimensat)
370 #define __NR_sys_utimensat __NR_utimensat
371 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
372           const struct timespec *,tsp,int,flags)
373 #else
374 static int sys_utimensat(int dirfd, const char *pathname,
375                          const struct timespec times[2], int flags)
376 {
377     errno = ENOSYS;
378     return -1;
379 }
380 #endif
381 #endif /* TARGET_NR_utimensat */
382 
383 #ifdef TARGET_NR_renameat2
384 #if defined(__NR_renameat2)
385 #define __NR_sys_renameat2 __NR_renameat2
386 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
387           const char *, new, unsigned int, flags)
388 #else
389 static int sys_renameat2(int oldfd, const char *old,
390                          int newfd, const char *new, int flags)
391 {
392     if (flags == 0) {
393         return renameat(oldfd, old, newfd, new);
394     }
395     errno = ENOSYS;
396     return -1;
397 }
398 #endif
399 #endif /* TARGET_NR_renameat2 */
400 
401 #ifdef CONFIG_INOTIFY
402 #include <sys/inotify.h>
403 
404 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
405 static int sys_inotify_init(void)
406 {
407   return (inotify_init());
408 }
409 #endif
410 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
411 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
412 {
413   return (inotify_add_watch(fd, pathname, mask));
414 }
415 #endif
416 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
417 static int sys_inotify_rm_watch(int fd, int32_t wd)
418 {
419   return (inotify_rm_watch(fd, wd));
420 }
421 #endif
422 #ifdef CONFIG_INOTIFY1
423 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
424 static int sys_inotify_init1(int flags)
425 {
426   return (inotify_init1(flags));
427 }
428 #endif
429 #endif
430 #else
431 /* Userspace can usually survive runtime without inotify */
432 #undef TARGET_NR_inotify_init
433 #undef TARGET_NR_inotify_init1
434 #undef TARGET_NR_inotify_add_watch
435 #undef TARGET_NR_inotify_rm_watch
436 #endif /* CONFIG_INOTIFY  */
437 
438 #if defined(TARGET_NR_prlimit64)
439 #ifndef __NR_prlimit64
440 # define __NR_prlimit64 -1
441 #endif
442 #define __NR_sys_prlimit64 __NR_prlimit64
443 /* The glibc rlimit structure may not be that used by the underlying syscall */
444 struct host_rlimit64 {
445     uint64_t rlim_cur;
446     uint64_t rlim_max;
447 };
448 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
449           const struct host_rlimit64 *, new_limit,
450           struct host_rlimit64 *, old_limit)
451 #endif
452 
453 
454 #if defined(TARGET_NR_timer_create)
455 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
456 static timer_t g_posix_timers[32] = { 0, } ;
457 
458 static inline int next_free_host_timer(void)
459 {
460     int k ;
461     /* FIXME: Does finding the next free slot require a lock? */
462     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
463         if (g_posix_timers[k] == 0) {
464             g_posix_timers[k] = (timer_t) 1;
465             return k;
466         }
467     }
468     return -1;
469 }
470 #endif
471 
472 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
473 #ifdef TARGET_ARM
474 static inline int regpairs_aligned(void *cpu_env, int num)
475 {
476     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
477 }
478 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
479 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
480 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
481 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
482  * of registers which translates to the same as ARM/MIPS, because we start with
483  * r3 as arg1 */
484 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
485 #elif defined(TARGET_SH4)
486 /* SH4 doesn't align register pairs, except for p{read,write}64 */
487 static inline int regpairs_aligned(void *cpu_env, int num)
488 {
489     switch (num) {
490     case TARGET_NR_pread64:
491     case TARGET_NR_pwrite64:
492         return 1;
493 
494     default:
495         return 0;
496     }
497 }
498 #elif defined(TARGET_XTENSA)
499 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
500 #else
501 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
502 #endif
503 
504 #define ERRNO_TABLE_SIZE 1200
505 
506 /* target_to_host_errno_table[] is initialized from
507  * host_to_target_errno_table[] in syscall_init(). */
508 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
509 };
510 
511 /*
512  * This list is the union of errno values overridden in asm-<arch>/errno.h
513  * minus the errnos that are not actually generic to all archs.
514  */
515 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
516     [EAGAIN]		= TARGET_EAGAIN,
517     [EIDRM]		= TARGET_EIDRM,
518     [ECHRNG]		= TARGET_ECHRNG,
519     [EL2NSYNC]		= TARGET_EL2NSYNC,
520     [EL3HLT]		= TARGET_EL3HLT,
521     [EL3RST]		= TARGET_EL3RST,
522     [ELNRNG]		= TARGET_ELNRNG,
523     [EUNATCH]		= TARGET_EUNATCH,
524     [ENOCSI]		= TARGET_ENOCSI,
525     [EL2HLT]		= TARGET_EL2HLT,
526     [EDEADLK]		= TARGET_EDEADLK,
527     [ENOLCK]		= TARGET_ENOLCK,
528     [EBADE]		= TARGET_EBADE,
529     [EBADR]		= TARGET_EBADR,
530     [EXFULL]		= TARGET_EXFULL,
531     [ENOANO]		= TARGET_ENOANO,
532     [EBADRQC]		= TARGET_EBADRQC,
533     [EBADSLT]		= TARGET_EBADSLT,
534     [EBFONT]		= TARGET_EBFONT,
535     [ENOSTR]		= TARGET_ENOSTR,
536     [ENODATA]		= TARGET_ENODATA,
537     [ETIME]		= TARGET_ETIME,
538     [ENOSR]		= TARGET_ENOSR,
539     [ENONET]		= TARGET_ENONET,
540     [ENOPKG]		= TARGET_ENOPKG,
541     [EREMOTE]		= TARGET_EREMOTE,
542     [ENOLINK]		= TARGET_ENOLINK,
543     [EADV]		= TARGET_EADV,
544     [ESRMNT]		= TARGET_ESRMNT,
545     [ECOMM]		= TARGET_ECOMM,
546     [EPROTO]		= TARGET_EPROTO,
547     [EDOTDOT]		= TARGET_EDOTDOT,
548     [EMULTIHOP]		= TARGET_EMULTIHOP,
549     [EBADMSG]		= TARGET_EBADMSG,
550     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
551     [EOVERFLOW]		= TARGET_EOVERFLOW,
552     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
553     [EBADFD]		= TARGET_EBADFD,
554     [EREMCHG]		= TARGET_EREMCHG,
555     [ELIBACC]		= TARGET_ELIBACC,
556     [ELIBBAD]		= TARGET_ELIBBAD,
557     [ELIBSCN]		= TARGET_ELIBSCN,
558     [ELIBMAX]		= TARGET_ELIBMAX,
559     [ELIBEXEC]		= TARGET_ELIBEXEC,
560     [EILSEQ]		= TARGET_EILSEQ,
561     [ENOSYS]		= TARGET_ENOSYS,
562     [ELOOP]		= TARGET_ELOOP,
563     [ERESTART]		= TARGET_ERESTART,
564     [ESTRPIPE]		= TARGET_ESTRPIPE,
565     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
566     [EUSERS]		= TARGET_EUSERS,
567     [ENOTSOCK]		= TARGET_ENOTSOCK,
568     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
569     [EMSGSIZE]		= TARGET_EMSGSIZE,
570     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
571     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
572     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
573     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
574     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
575     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
576     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
577     [EADDRINUSE]	= TARGET_EADDRINUSE,
578     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
579     [ENETDOWN]		= TARGET_ENETDOWN,
580     [ENETUNREACH]	= TARGET_ENETUNREACH,
581     [ENETRESET]		= TARGET_ENETRESET,
582     [ECONNABORTED]	= TARGET_ECONNABORTED,
583     [ECONNRESET]	= TARGET_ECONNRESET,
584     [ENOBUFS]		= TARGET_ENOBUFS,
585     [EISCONN]		= TARGET_EISCONN,
586     [ENOTCONN]		= TARGET_ENOTCONN,
587     [EUCLEAN]		= TARGET_EUCLEAN,
588     [ENOTNAM]		= TARGET_ENOTNAM,
589     [ENAVAIL]		= TARGET_ENAVAIL,
590     [EISNAM]		= TARGET_EISNAM,
591     [EREMOTEIO]		= TARGET_EREMOTEIO,
592     [EDQUOT]            = TARGET_EDQUOT,
593     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
594     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
595     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
596     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
597     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
598     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
599     [EALREADY]		= TARGET_EALREADY,
600     [EINPROGRESS]	= TARGET_EINPROGRESS,
601     [ESTALE]		= TARGET_ESTALE,
602     [ECANCELED]		= TARGET_ECANCELED,
603     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
604     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
605 #ifdef ENOKEY
606     [ENOKEY]		= TARGET_ENOKEY,
607 #endif
608 #ifdef EKEYEXPIRED
609     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
610 #endif
611 #ifdef EKEYREVOKED
612     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
613 #endif
614 #ifdef EKEYREJECTED
615     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
616 #endif
617 #ifdef EOWNERDEAD
618     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
619 #endif
620 #ifdef ENOTRECOVERABLE
621     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
622 #endif
623 #ifdef ENOMSG
624     [ENOMSG]            = TARGET_ENOMSG,
625 #endif
626 #ifdef ERKFILL
627     [ERFKILL]           = TARGET_ERFKILL,
628 #endif
629 #ifdef EHWPOISON
630     [EHWPOISON]         = TARGET_EHWPOISON,
631 #endif
632 };
633 
634 static inline int host_to_target_errno(int err)
635 {
636     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
637         host_to_target_errno_table[err]) {
638         return host_to_target_errno_table[err];
639     }
640     return err;
641 }
642 
643 static inline int target_to_host_errno(int err)
644 {
645     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
646         target_to_host_errno_table[err]) {
647         return target_to_host_errno_table[err];
648     }
649     return err;
650 }
651 
652 static inline abi_long get_errno(abi_long ret)
653 {
654     if (ret == -1)
655         return -host_to_target_errno(errno);
656     else
657         return ret;
658 }
659 
660 const char *target_strerror(int err)
661 {
662     if (err == TARGET_ERESTARTSYS) {
663         return "To be restarted";
664     }
665     if (err == TARGET_QEMU_ESIGRETURN) {
666         return "Successful exit from sigreturn";
667     }
668 
669     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
670         return NULL;
671     }
672     return strerror(target_to_host_errno(err));
673 }
674 
675 #define safe_syscall0(type, name) \
676 static type safe_##name(void) \
677 { \
678     return safe_syscall(__NR_##name); \
679 }
680 
681 #define safe_syscall1(type, name, type1, arg1) \
682 static type safe_##name(type1 arg1) \
683 { \
684     return safe_syscall(__NR_##name, arg1); \
685 }
686 
687 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
688 static type safe_##name(type1 arg1, type2 arg2) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2); \
691 }
692 
693 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
694 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
697 }
698 
699 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
700     type4, arg4) \
701 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
702 { \
703     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
704 }
705 
706 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
707     type4, arg4, type5, arg5) \
708 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
709     type5 arg5) \
710 { \
711     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
712 }
713 
714 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
715     type4, arg4, type5, arg5, type6, arg6) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717     type5 arg5, type6 arg6) \
718 { \
719     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
720 }
721 
722 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
723 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
724 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
725               int, flags, mode_t, mode)
726 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
727               struct rusage *, rusage)
728 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
729               int, options, struct rusage *, rusage)
730 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
731 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
732               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734               struct timespec *, tsp, const sigset_t *, sigmask,
735               size_t, sigsetsize)
736 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
737               int, maxevents, int, timeout, const sigset_t *, sigmask,
738               size_t, sigsetsize)
739 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
740               const struct timespec *,timeout,int *,uaddr2,int,val3)
741 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
742 safe_syscall2(int, kill, pid_t, pid, int, sig)
743 safe_syscall2(int, tkill, int, tid, int, sig)
744 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
745 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
746 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
747 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
748               unsigned long, pos_l, unsigned long, pos_h)
749 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
750               unsigned long, pos_l, unsigned long, pos_h)
751 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
752               socklen_t, addrlen)
753 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
754               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
755 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
756               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
757 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
758 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
759 safe_syscall2(int, flock, int, fd, int, operation)
760 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
761               const struct timespec *, uts, size_t, sigsetsize)
762 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
763               int, flags)
764 safe_syscall2(int, nanosleep, const struct timespec *, req,
765               struct timespec *, rem)
766 #ifdef TARGET_NR_clock_nanosleep
767 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
768               const struct timespec *, req, struct timespec *, rem)
769 #endif
770 #ifdef __NR_msgsnd
771 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
772               int, flags)
773 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
774               long, msgtype, int, flags)
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776               unsigned, nsops, const struct timespec *, timeout)
777 #else
778 /* This host kernel architecture uses a single ipc syscall; fake up
779  * wrappers for the sub-operations to hide this implementation detail.
780  * Annoyingly we can't include linux/ipc.h to get the constant definitions
781  * for the call parameter because some structs in there conflict with the
782  * sys/ipc.h ones. So we just define them here, and rely on them being
783  * the same for all host architectures.
784  */
785 #define Q_SEMTIMEDOP 4
786 #define Q_MSGSND 11
787 #define Q_MSGRCV 12
788 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
789 
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791               void *, ptr, long, fifth)
792 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
793 {
794     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
795 }
796 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
799 }
800 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
801                            const struct timespec *timeout)
802 {
803     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
804                     (long)timeout);
805 }
806 #endif
807 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
809               size_t, len, unsigned, prio, const struct timespec *, timeout)
810 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
811               size_t, len, unsigned *, prio, const struct timespec *, timeout)
812 #endif
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814  * "third argument might be integer or pointer or not present" behaviour of
815  * the libc function.
816  */
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820  *  use the flock64 struct rather than unsuffixed flock
821  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
822  */
823 #ifdef __NR_fcntl64
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
825 #else
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 #endif
828 
829 static inline int host_to_target_sock_type(int host_type)
830 {
831     int target_type;
832 
833     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
834     case SOCK_DGRAM:
835         target_type = TARGET_SOCK_DGRAM;
836         break;
837     case SOCK_STREAM:
838         target_type = TARGET_SOCK_STREAM;
839         break;
840     default:
841         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
842         break;
843     }
844 
845 #if defined(SOCK_CLOEXEC)
846     if (host_type & SOCK_CLOEXEC) {
847         target_type |= TARGET_SOCK_CLOEXEC;
848     }
849 #endif
850 
851 #if defined(SOCK_NONBLOCK)
852     if (host_type & SOCK_NONBLOCK) {
853         target_type |= TARGET_SOCK_NONBLOCK;
854     }
855 #endif
856 
857     return target_type;
858 }
859 
860 static abi_ulong target_brk;
861 static abi_ulong target_original_brk;
862 static abi_ulong brk_page;
863 
864 void target_set_brk(abi_ulong new_brk)
865 {
866     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
867     brk_page = HOST_PAGE_ALIGN(target_brk);
868 }
869 
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
872 
873 /* do_brk() must return target values and target errnos. */
874 abi_long do_brk(abi_ulong new_brk)
875 {
876     abi_long mapped_addr;
877     abi_ulong new_alloc_size;
878 
879     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
880 
881     if (!new_brk) {
882         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
883         return target_brk;
884     }
885     if (new_brk < target_original_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
887                    target_brk);
888         return target_brk;
889     }
890 
891     /* If the new brk is less than the highest page reserved to the
892      * target heap allocation, set it and we're almost done...  */
893     if (new_brk <= brk_page) {
894         /* Heap contents are initialized to zero, as for anonymous
895          * mapped pages.  */
896         if (new_brk > target_brk) {
897             memset(g2h(target_brk), 0, new_brk - target_brk);
898         }
899 	target_brk = new_brk;
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
901     	return target_brk;
902     }
903 
904     /* We need to allocate more memory after the brk... Note that
905      * we don't use MAP_FIXED because that will map over the top of
906      * any existing mapping (like the one with the host libc or qemu
907      * itself); instead we treat "mapped but at wrong address" as
908      * a failure and unmap again.
909      */
910     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
911     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
912                                         PROT_READ|PROT_WRITE,
913                                         MAP_ANON|MAP_PRIVATE, 0, 0));
914 
915     if (mapped_addr == brk_page) {
916         /* Heap contents are initialized to zero, as for anonymous
917          * mapped pages.  Technically the new pages are already
918          * initialized to zero since they *are* anonymous mapped
919          * pages, however we have to take care with the contents that
920          * come from the remaining part of the previous page: it may
921          * contains garbage data due to a previous heap usage (grown
922          * then shrunken).  */
923         memset(g2h(target_brk), 0, brk_page - target_brk);
924 
925         target_brk = new_brk;
926         brk_page = HOST_PAGE_ALIGN(target_brk);
927         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
928             target_brk);
929         return target_brk;
930     } else if (mapped_addr != -1) {
931         /* Mapped but at wrong address, meaning there wasn't actually
932          * enough space for this brk.
933          */
934         target_munmap(mapped_addr, new_alloc_size);
935         mapped_addr = -1;
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
937     }
938     else {
939         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
940     }
941 
942 #if defined(TARGET_ALPHA)
943     /* We (partially) emulate OSF/1 on Alpha, which requires we
944        return a proper errno, not an unchanged brk value.  */
945     return -TARGET_ENOMEM;
946 #endif
947     /* For everything else, return the previous break. */
948     return target_brk;
949 }
950 
951 static inline abi_long copy_from_user_fdset(fd_set *fds,
952                                             abi_ulong target_fds_addr,
953                                             int n)
954 {
955     int i, nw, j, k;
956     abi_ulong b, *target_fds;
957 
958     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
959     if (!(target_fds = lock_user(VERIFY_READ,
960                                  target_fds_addr,
961                                  sizeof(abi_ulong) * nw,
962                                  1)))
963         return -TARGET_EFAULT;
964 
965     FD_ZERO(fds);
966     k = 0;
967     for (i = 0; i < nw; i++) {
968         /* grab the abi_ulong */
969         __get_user(b, &target_fds[i]);
970         for (j = 0; j < TARGET_ABI_BITS; j++) {
971             /* check the bit inside the abi_ulong */
972             if ((b >> j) & 1)
973                 FD_SET(k, fds);
974             k++;
975         }
976     }
977 
978     unlock_user(target_fds, target_fds_addr, 0);
979 
980     return 0;
981 }
982 
983 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
984                                                  abi_ulong target_fds_addr,
985                                                  int n)
986 {
987     if (target_fds_addr) {
988         if (copy_from_user_fdset(fds, target_fds_addr, n))
989             return -TARGET_EFAULT;
990         *fds_ptr = fds;
991     } else {
992         *fds_ptr = NULL;
993     }
994     return 0;
995 }
996 
997 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
998                                           const fd_set *fds,
999                                           int n)
1000 {
1001     int i, nw, j, k;
1002     abi_long v;
1003     abi_ulong *target_fds;
1004 
1005     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1006     if (!(target_fds = lock_user(VERIFY_WRITE,
1007                                  target_fds_addr,
1008                                  sizeof(abi_ulong) * nw,
1009                                  0)))
1010         return -TARGET_EFAULT;
1011 
1012     k = 0;
1013     for (i = 0; i < nw; i++) {
1014         v = 0;
1015         for (j = 0; j < TARGET_ABI_BITS; j++) {
1016             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1017             k++;
1018         }
1019         __put_user(v, &target_fds[i]);
1020     }
1021 
1022     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1023 
1024     return 0;
1025 }
1026 
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1029 #else
1030 #define HOST_HZ 100
1031 #endif
1032 
1033 static inline abi_long host_to_target_clock_t(long ticks)
1034 {
1035 #if HOST_HZ == TARGET_HZ
1036     return ticks;
1037 #else
1038     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1039 #endif
1040 }
1041 
1042 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1043                                              const struct rusage *rusage)
1044 {
1045     struct target_rusage *target_rusage;
1046 
1047     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1048         return -TARGET_EFAULT;
1049     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1050     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1051     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1052     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1053     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1054     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1055     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1056     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1057     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1058     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1059     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1060     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1061     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1062     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1063     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1064     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1065     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1066     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1067     unlock_user_struct(target_rusage, target_addr, 1);
1068 
1069     return 0;
1070 }
1071 
1072 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1073 {
1074     abi_ulong target_rlim_swap;
1075     rlim_t result;
1076 
1077     target_rlim_swap = tswapal(target_rlim);
1078     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1079         return RLIM_INFINITY;
1080 
1081     result = target_rlim_swap;
1082     if (target_rlim_swap != (rlim_t)result)
1083         return RLIM_INFINITY;
1084 
1085     return result;
1086 }
1087 
1088 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1089 {
1090     abi_ulong target_rlim_swap;
1091     abi_ulong result;
1092 
1093     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1094         target_rlim_swap = TARGET_RLIM_INFINITY;
1095     else
1096         target_rlim_swap = rlim;
1097     result = tswapal(target_rlim_swap);
1098 
1099     return result;
1100 }
1101 
1102 static inline int target_to_host_resource(int code)
1103 {
1104     switch (code) {
1105     case TARGET_RLIMIT_AS:
1106         return RLIMIT_AS;
1107     case TARGET_RLIMIT_CORE:
1108         return RLIMIT_CORE;
1109     case TARGET_RLIMIT_CPU:
1110         return RLIMIT_CPU;
1111     case TARGET_RLIMIT_DATA:
1112         return RLIMIT_DATA;
1113     case TARGET_RLIMIT_FSIZE:
1114         return RLIMIT_FSIZE;
1115     case TARGET_RLIMIT_LOCKS:
1116         return RLIMIT_LOCKS;
1117     case TARGET_RLIMIT_MEMLOCK:
1118         return RLIMIT_MEMLOCK;
1119     case TARGET_RLIMIT_MSGQUEUE:
1120         return RLIMIT_MSGQUEUE;
1121     case TARGET_RLIMIT_NICE:
1122         return RLIMIT_NICE;
1123     case TARGET_RLIMIT_NOFILE:
1124         return RLIMIT_NOFILE;
1125     case TARGET_RLIMIT_NPROC:
1126         return RLIMIT_NPROC;
1127     case TARGET_RLIMIT_RSS:
1128         return RLIMIT_RSS;
1129     case TARGET_RLIMIT_RTPRIO:
1130         return RLIMIT_RTPRIO;
1131     case TARGET_RLIMIT_SIGPENDING:
1132         return RLIMIT_SIGPENDING;
1133     case TARGET_RLIMIT_STACK:
1134         return RLIMIT_STACK;
1135     default:
1136         return code;
1137     }
1138 }
1139 
1140 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1141                                               abi_ulong target_tv_addr)
1142 {
1143     struct target_timeval *target_tv;
1144 
1145     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1146         return -TARGET_EFAULT;
1147 
1148     __get_user(tv->tv_sec, &target_tv->tv_sec);
1149     __get_user(tv->tv_usec, &target_tv->tv_usec);
1150 
1151     unlock_user_struct(target_tv, target_tv_addr, 0);
1152 
1153     return 0;
1154 }
1155 
1156 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1157                                             const struct timeval *tv)
1158 {
1159     struct target_timeval *target_tv;
1160 
1161     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1162         return -TARGET_EFAULT;
1163 
1164     __put_user(tv->tv_sec, &target_tv->tv_sec);
1165     __put_user(tv->tv_usec, &target_tv->tv_usec);
1166 
1167     unlock_user_struct(target_tv, target_tv_addr, 1);
1168 
1169     return 0;
1170 }
1171 
1172 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1173                                                abi_ulong target_tz_addr)
1174 {
1175     struct target_timezone *target_tz;
1176 
1177     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1178         return -TARGET_EFAULT;
1179     }
1180 
1181     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1182     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1183 
1184     unlock_user_struct(target_tz, target_tz_addr, 0);
1185 
1186     return 0;
1187 }
1188 
1189 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1190 #include <mqueue.h>
1191 
1192 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1193                                               abi_ulong target_mq_attr_addr)
1194 {
1195     struct target_mq_attr *target_mq_attr;
1196 
1197     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1198                           target_mq_attr_addr, 1))
1199         return -TARGET_EFAULT;
1200 
1201     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1202     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1203     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1204     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1205 
1206     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1207 
1208     return 0;
1209 }
1210 
1211 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1212                                             const struct mq_attr *attr)
1213 {
1214     struct target_mq_attr *target_mq_attr;
1215 
1216     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1217                           target_mq_attr_addr, 0))
1218         return -TARGET_EFAULT;
1219 
1220     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1221     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1222     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1223     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1224 
1225     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1226 
1227     return 0;
1228 }
1229 #endif
1230 
1231 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1232 /* do_select() must return target values and target errnos. */
1233 static abi_long do_select(int n,
1234                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1235                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1236 {
1237     fd_set rfds, wfds, efds;
1238     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1239     struct timeval tv;
1240     struct timespec ts, *ts_ptr;
1241     abi_long ret;
1242 
1243     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1244     if (ret) {
1245         return ret;
1246     }
1247     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255 
1256     if (target_tv_addr) {
1257         if (copy_from_user_timeval(&tv, target_tv_addr))
1258             return -TARGET_EFAULT;
1259         ts.tv_sec = tv.tv_sec;
1260         ts.tv_nsec = tv.tv_usec * 1000;
1261         ts_ptr = &ts;
1262     } else {
1263         ts_ptr = NULL;
1264     }
1265 
1266     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1267                                   ts_ptr, NULL));
1268 
1269     if (!is_error(ret)) {
1270         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1271             return -TARGET_EFAULT;
1272         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1273             return -TARGET_EFAULT;
1274         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1275             return -TARGET_EFAULT;
1276 
1277         if (target_tv_addr) {
1278             tv.tv_sec = ts.tv_sec;
1279             tv.tv_usec = ts.tv_nsec / 1000;
1280             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1281                 return -TARGET_EFAULT;
1282             }
1283         }
1284     }
1285 
1286     return ret;
1287 }
1288 
1289 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1290 static abi_long do_old_select(abi_ulong arg1)
1291 {
1292     struct target_sel_arg_struct *sel;
1293     abi_ulong inp, outp, exp, tvp;
1294     long nsel;
1295 
1296     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1297         return -TARGET_EFAULT;
1298     }
1299 
1300     nsel = tswapal(sel->n);
1301     inp = tswapal(sel->inp);
1302     outp = tswapal(sel->outp);
1303     exp = tswapal(sel->exp);
1304     tvp = tswapal(sel->tvp);
1305 
1306     unlock_user_struct(sel, arg1, 0);
1307 
1308     return do_select(nsel, inp, outp, exp, tvp);
1309 }
1310 #endif
1311 #endif
1312 
1313 static abi_long do_pipe2(int host_pipe[], int flags)
1314 {
1315 #ifdef CONFIG_PIPE2
1316     return pipe2(host_pipe, flags);
1317 #else
1318     return -ENOSYS;
1319 #endif
1320 }
1321 
1322 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1323                         int flags, int is_pipe2)
1324 {
1325     int host_pipe[2];
1326     abi_long ret;
1327     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1328 
1329     if (is_error(ret))
1330         return get_errno(ret);
1331 
1332     /* Several targets have special calling conventions for the original
1333        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1334     if (!is_pipe2) {
1335 #if defined(TARGET_ALPHA)
1336         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1337         return host_pipe[0];
1338 #elif defined(TARGET_MIPS)
1339         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1340         return host_pipe[0];
1341 #elif defined(TARGET_SH4)
1342         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1343         return host_pipe[0];
1344 #elif defined(TARGET_SPARC)
1345         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1346         return host_pipe[0];
1347 #endif
1348     }
1349 
1350     if (put_user_s32(host_pipe[0], pipedes)
1351         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1352         return -TARGET_EFAULT;
1353     return get_errno(ret);
1354 }
1355 
1356 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1357                                               abi_ulong target_addr,
1358                                               socklen_t len)
1359 {
1360     struct target_ip_mreqn *target_smreqn;
1361 
1362     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1363     if (!target_smreqn)
1364         return -TARGET_EFAULT;
1365     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1366     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1367     if (len == sizeof(struct target_ip_mreqn))
1368         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1369     unlock_user(target_smreqn, target_addr, 0);
1370 
1371     return 0;
1372 }
1373 
1374 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1375                                                abi_ulong target_addr,
1376                                                socklen_t len)
1377 {
1378     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1379     sa_family_t sa_family;
1380     struct target_sockaddr *target_saddr;
1381 
1382     if (fd_trans_target_to_host_addr(fd)) {
1383         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1384     }
1385 
1386     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1387     if (!target_saddr)
1388         return -TARGET_EFAULT;
1389 
1390     sa_family = tswap16(target_saddr->sa_family);
1391 
1392     /* Oops. The caller might send a incomplete sun_path; sun_path
1393      * must be terminated by \0 (see the manual page), but
1394      * unfortunately it is quite common to specify sockaddr_un
1395      * length as "strlen(x->sun_path)" while it should be
1396      * "strlen(...) + 1". We'll fix that here if needed.
1397      * Linux kernel has a similar feature.
1398      */
1399 
1400     if (sa_family == AF_UNIX) {
1401         if (len < unix_maxlen && len > 0) {
1402             char *cp = (char*)target_saddr;
1403 
1404             if ( cp[len-1] && !cp[len] )
1405                 len++;
1406         }
1407         if (len > unix_maxlen)
1408             len = unix_maxlen;
1409     }
1410 
1411     memcpy(addr, target_saddr, len);
1412     addr->sa_family = sa_family;
1413     if (sa_family == AF_NETLINK) {
1414         struct sockaddr_nl *nladdr;
1415 
1416         nladdr = (struct sockaddr_nl *)addr;
1417         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1418         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1419     } else if (sa_family == AF_PACKET) {
1420 	struct target_sockaddr_ll *lladdr;
1421 
1422 	lladdr = (struct target_sockaddr_ll *)addr;
1423 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1424 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1425     }
1426     unlock_user(target_saddr, target_addr, 0);
1427 
1428     return 0;
1429 }
1430 
1431 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1432                                                struct sockaddr *addr,
1433                                                socklen_t len)
1434 {
1435     struct target_sockaddr *target_saddr;
1436 
1437     if (len == 0) {
1438         return 0;
1439     }
1440     assert(addr);
1441 
1442     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1443     if (!target_saddr)
1444         return -TARGET_EFAULT;
1445     memcpy(target_saddr, addr, len);
1446     if (len >= offsetof(struct target_sockaddr, sa_family) +
1447         sizeof(target_saddr->sa_family)) {
1448         target_saddr->sa_family = tswap16(addr->sa_family);
1449     }
1450     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1451         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1452         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1453         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1454     } else if (addr->sa_family == AF_PACKET) {
1455         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1456         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1457         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1458     } else if (addr->sa_family == AF_INET6 &&
1459                len >= sizeof(struct target_sockaddr_in6)) {
1460         struct target_sockaddr_in6 *target_in6 =
1461                (struct target_sockaddr_in6 *)target_saddr;
1462         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1463     }
1464     unlock_user(target_saddr, target_addr, len);
1465 
1466     return 0;
1467 }
1468 
1469 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1470                                            struct target_msghdr *target_msgh)
1471 {
1472     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1473     abi_long msg_controllen;
1474     abi_ulong target_cmsg_addr;
1475     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1476     socklen_t space = 0;
1477 
1478     msg_controllen = tswapal(target_msgh->msg_controllen);
1479     if (msg_controllen < sizeof (struct target_cmsghdr))
1480         goto the_end;
1481     target_cmsg_addr = tswapal(target_msgh->msg_control);
1482     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1483     target_cmsg_start = target_cmsg;
1484     if (!target_cmsg)
1485         return -TARGET_EFAULT;
1486 
1487     while (cmsg && target_cmsg) {
1488         void *data = CMSG_DATA(cmsg);
1489         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1490 
1491         int len = tswapal(target_cmsg->cmsg_len)
1492             - sizeof(struct target_cmsghdr);
1493 
1494         space += CMSG_SPACE(len);
1495         if (space > msgh->msg_controllen) {
1496             space -= CMSG_SPACE(len);
1497             /* This is a QEMU bug, since we allocated the payload
1498              * area ourselves (unlike overflow in host-to-target
1499              * conversion, which is just the guest giving us a buffer
1500              * that's too small). It can't happen for the payload types
1501              * we currently support; if it becomes an issue in future
1502              * we would need to improve our allocation strategy to
1503              * something more intelligent than "twice the size of the
1504              * target buffer we're reading from".
1505              */
1506             gemu_log("Host cmsg overflow\n");
1507             break;
1508         }
1509 
1510         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1511             cmsg->cmsg_level = SOL_SOCKET;
1512         } else {
1513             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1514         }
1515         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1516         cmsg->cmsg_len = CMSG_LEN(len);
1517 
1518         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1519             int *fd = (int *)data;
1520             int *target_fd = (int *)target_data;
1521             int i, numfds = len / sizeof(int);
1522 
1523             for (i = 0; i < numfds; i++) {
1524                 __get_user(fd[i], target_fd + i);
1525             }
1526         } else if (cmsg->cmsg_level == SOL_SOCKET
1527                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1528             struct ucred *cred = (struct ucred *)data;
1529             struct target_ucred *target_cred =
1530                 (struct target_ucred *)target_data;
1531 
1532             __get_user(cred->pid, &target_cred->pid);
1533             __get_user(cred->uid, &target_cred->uid);
1534             __get_user(cred->gid, &target_cred->gid);
1535         } else {
1536             gemu_log("Unsupported ancillary data: %d/%d\n",
1537                                         cmsg->cmsg_level, cmsg->cmsg_type);
1538             memcpy(data, target_data, len);
1539         }
1540 
1541         cmsg = CMSG_NXTHDR(msgh, cmsg);
1542         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1543                                          target_cmsg_start);
1544     }
1545     unlock_user(target_cmsg, target_cmsg_addr, 0);
1546  the_end:
1547     msgh->msg_controllen = space;
1548     return 0;
1549 }
1550 
1551 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1552                                            struct msghdr *msgh)
1553 {
1554     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1555     abi_long msg_controllen;
1556     abi_ulong target_cmsg_addr;
1557     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1558     socklen_t space = 0;
1559 
1560     msg_controllen = tswapal(target_msgh->msg_controllen);
1561     if (msg_controllen < sizeof (struct target_cmsghdr))
1562         goto the_end;
1563     target_cmsg_addr = tswapal(target_msgh->msg_control);
1564     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1565     target_cmsg_start = target_cmsg;
1566     if (!target_cmsg)
1567         return -TARGET_EFAULT;
1568 
1569     while (cmsg && target_cmsg) {
1570         void *data = CMSG_DATA(cmsg);
1571         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1572 
1573         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1574         int tgt_len, tgt_space;
1575 
1576         /* We never copy a half-header but may copy half-data;
1577          * this is Linux's behaviour in put_cmsg(). Note that
1578          * truncation here is a guest problem (which we report
1579          * to the guest via the CTRUNC bit), unlike truncation
1580          * in target_to_host_cmsg, which is a QEMU bug.
1581          */
1582         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1583             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1584             break;
1585         }
1586 
1587         if (cmsg->cmsg_level == SOL_SOCKET) {
1588             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1589         } else {
1590             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1591         }
1592         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1593 
1594         /* Payload types which need a different size of payload on
1595          * the target must adjust tgt_len here.
1596          */
1597         tgt_len = len;
1598         switch (cmsg->cmsg_level) {
1599         case SOL_SOCKET:
1600             switch (cmsg->cmsg_type) {
1601             case SO_TIMESTAMP:
1602                 tgt_len = sizeof(struct target_timeval);
1603                 break;
1604             default:
1605                 break;
1606             }
1607             break;
1608         default:
1609             break;
1610         }
1611 
1612         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1613             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1614             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1615         }
1616 
1617         /* We must now copy-and-convert len bytes of payload
1618          * into tgt_len bytes of destination space. Bear in mind
1619          * that in both source and destination we may be dealing
1620          * with a truncated value!
1621          */
1622         switch (cmsg->cmsg_level) {
1623         case SOL_SOCKET:
1624             switch (cmsg->cmsg_type) {
1625             case SCM_RIGHTS:
1626             {
1627                 int *fd = (int *)data;
1628                 int *target_fd = (int *)target_data;
1629                 int i, numfds = tgt_len / sizeof(int);
1630 
1631                 for (i = 0; i < numfds; i++) {
1632                     __put_user(fd[i], target_fd + i);
1633                 }
1634                 break;
1635             }
1636             case SO_TIMESTAMP:
1637             {
1638                 struct timeval *tv = (struct timeval *)data;
1639                 struct target_timeval *target_tv =
1640                     (struct target_timeval *)target_data;
1641 
1642                 if (len != sizeof(struct timeval) ||
1643                     tgt_len != sizeof(struct target_timeval)) {
1644                     goto unimplemented;
1645                 }
1646 
1647                 /* copy struct timeval to target */
1648                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1649                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1650                 break;
1651             }
1652             case SCM_CREDENTIALS:
1653             {
1654                 struct ucred *cred = (struct ucred *)data;
1655                 struct target_ucred *target_cred =
1656                     (struct target_ucred *)target_data;
1657 
1658                 __put_user(cred->pid, &target_cred->pid);
1659                 __put_user(cred->uid, &target_cred->uid);
1660                 __put_user(cred->gid, &target_cred->gid);
1661                 break;
1662             }
1663             default:
1664                 goto unimplemented;
1665             }
1666             break;
1667 
1668         case SOL_IP:
1669             switch (cmsg->cmsg_type) {
1670             case IP_TTL:
1671             {
1672                 uint32_t *v = (uint32_t *)data;
1673                 uint32_t *t_int = (uint32_t *)target_data;
1674 
1675                 if (len != sizeof(uint32_t) ||
1676                     tgt_len != sizeof(uint32_t)) {
1677                     goto unimplemented;
1678                 }
1679                 __put_user(*v, t_int);
1680                 break;
1681             }
1682             case IP_RECVERR:
1683             {
1684                 struct errhdr_t {
1685                    struct sock_extended_err ee;
1686                    struct sockaddr_in offender;
1687                 };
1688                 struct errhdr_t *errh = (struct errhdr_t *)data;
1689                 struct errhdr_t *target_errh =
1690                     (struct errhdr_t *)target_data;
1691 
1692                 if (len != sizeof(struct errhdr_t) ||
1693                     tgt_len != sizeof(struct errhdr_t)) {
1694                     goto unimplemented;
1695                 }
1696                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1697                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1698                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1699                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1700                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1701                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1702                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1703                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1704                     (void *) &errh->offender, sizeof(errh->offender));
1705                 break;
1706             }
1707             default:
1708                 goto unimplemented;
1709             }
1710             break;
1711 
1712         case SOL_IPV6:
1713             switch (cmsg->cmsg_type) {
1714             case IPV6_HOPLIMIT:
1715             {
1716                 uint32_t *v = (uint32_t *)data;
1717                 uint32_t *t_int = (uint32_t *)target_data;
1718 
1719                 if (len != sizeof(uint32_t) ||
1720                     tgt_len != sizeof(uint32_t)) {
1721                     goto unimplemented;
1722                 }
1723                 __put_user(*v, t_int);
1724                 break;
1725             }
1726             case IPV6_RECVERR:
1727             {
1728                 struct errhdr6_t {
1729                    struct sock_extended_err ee;
1730                    struct sockaddr_in6 offender;
1731                 };
1732                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1733                 struct errhdr6_t *target_errh =
1734                     (struct errhdr6_t *)target_data;
1735 
1736                 if (len != sizeof(struct errhdr6_t) ||
1737                     tgt_len != sizeof(struct errhdr6_t)) {
1738                     goto unimplemented;
1739                 }
1740                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1741                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1742                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1743                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1744                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1745                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1746                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1747                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1748                     (void *) &errh->offender, sizeof(errh->offender));
1749                 break;
1750             }
1751             default:
1752                 goto unimplemented;
1753             }
1754             break;
1755 
1756         default:
1757         unimplemented:
1758             gemu_log("Unsupported ancillary data: %d/%d\n",
1759                                         cmsg->cmsg_level, cmsg->cmsg_type);
1760             memcpy(target_data, data, MIN(len, tgt_len));
1761             if (tgt_len > len) {
1762                 memset(target_data + len, 0, tgt_len - len);
1763             }
1764         }
1765 
1766         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1767         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1768         if (msg_controllen < tgt_space) {
1769             tgt_space = msg_controllen;
1770         }
1771         msg_controllen -= tgt_space;
1772         space += tgt_space;
1773         cmsg = CMSG_NXTHDR(msgh, cmsg);
1774         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1775                                          target_cmsg_start);
1776     }
1777     unlock_user(target_cmsg, target_cmsg_addr, space);
1778  the_end:
1779     target_msgh->msg_controllen = tswapal(space);
1780     return 0;
1781 }
1782 
1783 /* do_setsockopt() Must return target values and target errnos. */
1784 static abi_long do_setsockopt(int sockfd, int level, int optname,
1785                               abi_ulong optval_addr, socklen_t optlen)
1786 {
1787     abi_long ret;
1788     int val;
1789     struct ip_mreqn *ip_mreq;
1790     struct ip_mreq_source *ip_mreq_source;
1791 
1792     switch(level) {
1793     case SOL_TCP:
1794         /* TCP options all take an 'int' value.  */
1795         if (optlen < sizeof(uint32_t))
1796             return -TARGET_EINVAL;
1797 
1798         if (get_user_u32(val, optval_addr))
1799             return -TARGET_EFAULT;
1800         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1801         break;
1802     case SOL_IP:
1803         switch(optname) {
1804         case IP_TOS:
1805         case IP_TTL:
1806         case IP_HDRINCL:
1807         case IP_ROUTER_ALERT:
1808         case IP_RECVOPTS:
1809         case IP_RETOPTS:
1810         case IP_PKTINFO:
1811         case IP_MTU_DISCOVER:
1812         case IP_RECVERR:
1813         case IP_RECVTTL:
1814         case IP_RECVTOS:
1815 #ifdef IP_FREEBIND
1816         case IP_FREEBIND:
1817 #endif
1818         case IP_MULTICAST_TTL:
1819         case IP_MULTICAST_LOOP:
1820             val = 0;
1821             if (optlen >= sizeof(uint32_t)) {
1822                 if (get_user_u32(val, optval_addr))
1823                     return -TARGET_EFAULT;
1824             } else if (optlen >= 1) {
1825                 if (get_user_u8(val, optval_addr))
1826                     return -TARGET_EFAULT;
1827             }
1828             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1829             break;
1830         case IP_ADD_MEMBERSHIP:
1831         case IP_DROP_MEMBERSHIP:
1832             if (optlen < sizeof (struct target_ip_mreq) ||
1833                 optlen > sizeof (struct target_ip_mreqn))
1834                 return -TARGET_EINVAL;
1835 
1836             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1837             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1838             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1839             break;
1840 
1841         case IP_BLOCK_SOURCE:
1842         case IP_UNBLOCK_SOURCE:
1843         case IP_ADD_SOURCE_MEMBERSHIP:
1844         case IP_DROP_SOURCE_MEMBERSHIP:
1845             if (optlen != sizeof (struct target_ip_mreq_source))
1846                 return -TARGET_EINVAL;
1847 
1848             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1849             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1850             unlock_user (ip_mreq_source, optval_addr, 0);
1851             break;
1852 
1853         default:
1854             goto unimplemented;
1855         }
1856         break;
1857     case SOL_IPV6:
1858         switch (optname) {
1859         case IPV6_MTU_DISCOVER:
1860         case IPV6_MTU:
1861         case IPV6_V6ONLY:
1862         case IPV6_RECVPKTINFO:
1863         case IPV6_UNICAST_HOPS:
1864         case IPV6_MULTICAST_HOPS:
1865         case IPV6_MULTICAST_LOOP:
1866         case IPV6_RECVERR:
1867         case IPV6_RECVHOPLIMIT:
1868         case IPV6_2292HOPLIMIT:
1869         case IPV6_CHECKSUM:
1870             val = 0;
1871             if (optlen < sizeof(uint32_t)) {
1872                 return -TARGET_EINVAL;
1873             }
1874             if (get_user_u32(val, optval_addr)) {
1875                 return -TARGET_EFAULT;
1876             }
1877             ret = get_errno(setsockopt(sockfd, level, optname,
1878                                        &val, sizeof(val)));
1879             break;
1880         case IPV6_PKTINFO:
1881         {
1882             struct in6_pktinfo pki;
1883 
1884             if (optlen < sizeof(pki)) {
1885                 return -TARGET_EINVAL;
1886             }
1887 
1888             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1889                 return -TARGET_EFAULT;
1890             }
1891 
1892             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1893 
1894             ret = get_errno(setsockopt(sockfd, level, optname,
1895                                        &pki, sizeof(pki)));
1896             break;
1897         }
1898         default:
1899             goto unimplemented;
1900         }
1901         break;
1902     case SOL_ICMPV6:
1903         switch (optname) {
1904         case ICMPV6_FILTER:
1905         {
1906             struct icmp6_filter icmp6f;
1907 
1908             if (optlen > sizeof(icmp6f)) {
1909                 optlen = sizeof(icmp6f);
1910             }
1911 
1912             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1913                 return -TARGET_EFAULT;
1914             }
1915 
1916             for (val = 0; val < 8; val++) {
1917                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1918             }
1919 
1920             ret = get_errno(setsockopt(sockfd, level, optname,
1921                                        &icmp6f, optlen));
1922             break;
1923         }
1924         default:
1925             goto unimplemented;
1926         }
1927         break;
1928     case SOL_RAW:
1929         switch (optname) {
1930         case ICMP_FILTER:
1931         case IPV6_CHECKSUM:
1932             /* those take an u32 value */
1933             if (optlen < sizeof(uint32_t)) {
1934                 return -TARGET_EINVAL;
1935             }
1936 
1937             if (get_user_u32(val, optval_addr)) {
1938                 return -TARGET_EFAULT;
1939             }
1940             ret = get_errno(setsockopt(sockfd, level, optname,
1941                                        &val, sizeof(val)));
1942             break;
1943 
1944         default:
1945             goto unimplemented;
1946         }
1947         break;
1948     case TARGET_SOL_SOCKET:
1949         switch (optname) {
1950         case TARGET_SO_RCVTIMEO:
1951         {
1952                 struct timeval tv;
1953 
1954                 optname = SO_RCVTIMEO;
1955 
1956 set_timeout:
1957                 if (optlen != sizeof(struct target_timeval)) {
1958                     return -TARGET_EINVAL;
1959                 }
1960 
1961                 if (copy_from_user_timeval(&tv, optval_addr)) {
1962                     return -TARGET_EFAULT;
1963                 }
1964 
1965                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1966                                 &tv, sizeof(tv)));
1967                 return ret;
1968         }
1969         case TARGET_SO_SNDTIMEO:
1970                 optname = SO_SNDTIMEO;
1971                 goto set_timeout;
1972         case TARGET_SO_ATTACH_FILTER:
1973         {
1974                 struct target_sock_fprog *tfprog;
1975                 struct target_sock_filter *tfilter;
1976                 struct sock_fprog fprog;
1977                 struct sock_filter *filter;
1978                 int i;
1979 
1980                 if (optlen != sizeof(*tfprog)) {
1981                     return -TARGET_EINVAL;
1982                 }
1983                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1984                     return -TARGET_EFAULT;
1985                 }
1986                 if (!lock_user_struct(VERIFY_READ, tfilter,
1987                                       tswapal(tfprog->filter), 0)) {
1988                     unlock_user_struct(tfprog, optval_addr, 1);
1989                     return -TARGET_EFAULT;
1990                 }
1991 
1992                 fprog.len = tswap16(tfprog->len);
1993                 filter = g_try_new(struct sock_filter, fprog.len);
1994                 if (filter == NULL) {
1995                     unlock_user_struct(tfilter, tfprog->filter, 1);
1996                     unlock_user_struct(tfprog, optval_addr, 1);
1997                     return -TARGET_ENOMEM;
1998                 }
1999                 for (i = 0; i < fprog.len; i++) {
2000                     filter[i].code = tswap16(tfilter[i].code);
2001                     filter[i].jt = tfilter[i].jt;
2002                     filter[i].jf = tfilter[i].jf;
2003                     filter[i].k = tswap32(tfilter[i].k);
2004                 }
2005                 fprog.filter = filter;
2006 
2007                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2008                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2009                 g_free(filter);
2010 
2011                 unlock_user_struct(tfilter, tfprog->filter, 1);
2012                 unlock_user_struct(tfprog, optval_addr, 1);
2013                 return ret;
2014         }
2015 	case TARGET_SO_BINDTODEVICE:
2016 	{
2017 		char *dev_ifname, *addr_ifname;
2018 
2019 		if (optlen > IFNAMSIZ - 1) {
2020 		    optlen = IFNAMSIZ - 1;
2021 		}
2022 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2023 		if (!dev_ifname) {
2024 		    return -TARGET_EFAULT;
2025 		}
2026 		optname = SO_BINDTODEVICE;
2027 		addr_ifname = alloca(IFNAMSIZ);
2028 		memcpy(addr_ifname, dev_ifname, optlen);
2029 		addr_ifname[optlen] = 0;
2030 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2031                                            addr_ifname, optlen));
2032 		unlock_user (dev_ifname, optval_addr, 0);
2033 		return ret;
2034 	}
2035         case TARGET_SO_LINGER:
2036         {
2037                 struct linger lg;
2038                 struct target_linger *tlg;
2039 
2040                 if (optlen != sizeof(struct target_linger)) {
2041                     return -TARGET_EINVAL;
2042                 }
2043                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2044                     return -TARGET_EFAULT;
2045                 }
2046                 __get_user(lg.l_onoff, &tlg->l_onoff);
2047                 __get_user(lg.l_linger, &tlg->l_linger);
2048                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2049                                 &lg, sizeof(lg)));
2050                 unlock_user_struct(tlg, optval_addr, 0);
2051                 return ret;
2052         }
2053             /* Options with 'int' argument.  */
2054         case TARGET_SO_DEBUG:
2055 		optname = SO_DEBUG;
2056 		break;
2057         case TARGET_SO_REUSEADDR:
2058 		optname = SO_REUSEADDR;
2059 		break;
2060         case TARGET_SO_TYPE:
2061 		optname = SO_TYPE;
2062 		break;
2063         case TARGET_SO_ERROR:
2064 		optname = SO_ERROR;
2065 		break;
2066         case TARGET_SO_DONTROUTE:
2067 		optname = SO_DONTROUTE;
2068 		break;
2069         case TARGET_SO_BROADCAST:
2070 		optname = SO_BROADCAST;
2071 		break;
2072         case TARGET_SO_SNDBUF:
2073 		optname = SO_SNDBUF;
2074 		break;
2075         case TARGET_SO_SNDBUFFORCE:
2076                 optname = SO_SNDBUFFORCE;
2077                 break;
2078         case TARGET_SO_RCVBUF:
2079 		optname = SO_RCVBUF;
2080 		break;
2081         case TARGET_SO_RCVBUFFORCE:
2082                 optname = SO_RCVBUFFORCE;
2083                 break;
2084         case TARGET_SO_KEEPALIVE:
2085 		optname = SO_KEEPALIVE;
2086 		break;
2087         case TARGET_SO_OOBINLINE:
2088 		optname = SO_OOBINLINE;
2089 		break;
2090         case TARGET_SO_NO_CHECK:
2091 		optname = SO_NO_CHECK;
2092 		break;
2093         case TARGET_SO_PRIORITY:
2094 		optname = SO_PRIORITY;
2095 		break;
2096 #ifdef SO_BSDCOMPAT
2097         case TARGET_SO_BSDCOMPAT:
2098 		optname = SO_BSDCOMPAT;
2099 		break;
2100 #endif
2101         case TARGET_SO_PASSCRED:
2102 		optname = SO_PASSCRED;
2103 		break;
2104         case TARGET_SO_PASSSEC:
2105                 optname = SO_PASSSEC;
2106                 break;
2107         case TARGET_SO_TIMESTAMP:
2108 		optname = SO_TIMESTAMP;
2109 		break;
2110         case TARGET_SO_RCVLOWAT:
2111 		optname = SO_RCVLOWAT;
2112 		break;
2113         default:
2114             goto unimplemented;
2115         }
2116 	if (optlen < sizeof(uint32_t))
2117             return -TARGET_EINVAL;
2118 
2119 	if (get_user_u32(val, optval_addr))
2120             return -TARGET_EFAULT;
2121 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2122         break;
2123     default:
2124     unimplemented:
2125         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2126         ret = -TARGET_ENOPROTOOPT;
2127     }
2128     return ret;
2129 }
2130 
2131 /* do_getsockopt() Must return target values and target errnos. */
2132 static abi_long do_getsockopt(int sockfd, int level, int optname,
2133                               abi_ulong optval_addr, abi_ulong optlen)
2134 {
2135     abi_long ret;
2136     int len, val;
2137     socklen_t lv;
2138 
2139     switch(level) {
2140     case TARGET_SOL_SOCKET:
2141         level = SOL_SOCKET;
2142         switch (optname) {
2143         /* These don't just return a single integer */
2144         case TARGET_SO_RCVTIMEO:
2145         case TARGET_SO_SNDTIMEO:
2146         case TARGET_SO_PEERNAME:
2147             goto unimplemented;
2148         case TARGET_SO_PEERCRED: {
2149             struct ucred cr;
2150             socklen_t crlen;
2151             struct target_ucred *tcr;
2152 
2153             if (get_user_u32(len, optlen)) {
2154                 return -TARGET_EFAULT;
2155             }
2156             if (len < 0) {
2157                 return -TARGET_EINVAL;
2158             }
2159 
2160             crlen = sizeof(cr);
2161             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2162                                        &cr, &crlen));
2163             if (ret < 0) {
2164                 return ret;
2165             }
2166             if (len > crlen) {
2167                 len = crlen;
2168             }
2169             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2170                 return -TARGET_EFAULT;
2171             }
2172             __put_user(cr.pid, &tcr->pid);
2173             __put_user(cr.uid, &tcr->uid);
2174             __put_user(cr.gid, &tcr->gid);
2175             unlock_user_struct(tcr, optval_addr, 1);
2176             if (put_user_u32(len, optlen)) {
2177                 return -TARGET_EFAULT;
2178             }
2179             break;
2180         }
2181         case TARGET_SO_LINGER:
2182         {
2183             struct linger lg;
2184             socklen_t lglen;
2185             struct target_linger *tlg;
2186 
2187             if (get_user_u32(len, optlen)) {
2188                 return -TARGET_EFAULT;
2189             }
2190             if (len < 0) {
2191                 return -TARGET_EINVAL;
2192             }
2193 
2194             lglen = sizeof(lg);
2195             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2196                                        &lg, &lglen));
2197             if (ret < 0) {
2198                 return ret;
2199             }
2200             if (len > lglen) {
2201                 len = lglen;
2202             }
2203             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2204                 return -TARGET_EFAULT;
2205             }
2206             __put_user(lg.l_onoff, &tlg->l_onoff);
2207             __put_user(lg.l_linger, &tlg->l_linger);
2208             unlock_user_struct(tlg, optval_addr, 1);
2209             if (put_user_u32(len, optlen)) {
2210                 return -TARGET_EFAULT;
2211             }
2212             break;
2213         }
2214         /* Options with 'int' argument.  */
2215         case TARGET_SO_DEBUG:
2216             optname = SO_DEBUG;
2217             goto int_case;
2218         case TARGET_SO_REUSEADDR:
2219             optname = SO_REUSEADDR;
2220             goto int_case;
2221         case TARGET_SO_TYPE:
2222             optname = SO_TYPE;
2223             goto int_case;
2224         case TARGET_SO_ERROR:
2225             optname = SO_ERROR;
2226             goto int_case;
2227         case TARGET_SO_DONTROUTE:
2228             optname = SO_DONTROUTE;
2229             goto int_case;
2230         case TARGET_SO_BROADCAST:
2231             optname = SO_BROADCAST;
2232             goto int_case;
2233         case TARGET_SO_SNDBUF:
2234             optname = SO_SNDBUF;
2235             goto int_case;
2236         case TARGET_SO_RCVBUF:
2237             optname = SO_RCVBUF;
2238             goto int_case;
2239         case TARGET_SO_KEEPALIVE:
2240             optname = SO_KEEPALIVE;
2241             goto int_case;
2242         case TARGET_SO_OOBINLINE:
2243             optname = SO_OOBINLINE;
2244             goto int_case;
2245         case TARGET_SO_NO_CHECK:
2246             optname = SO_NO_CHECK;
2247             goto int_case;
2248         case TARGET_SO_PRIORITY:
2249             optname = SO_PRIORITY;
2250             goto int_case;
2251 #ifdef SO_BSDCOMPAT
2252         case TARGET_SO_BSDCOMPAT:
2253             optname = SO_BSDCOMPAT;
2254             goto int_case;
2255 #endif
2256         case TARGET_SO_PASSCRED:
2257             optname = SO_PASSCRED;
2258             goto int_case;
2259         case TARGET_SO_TIMESTAMP:
2260             optname = SO_TIMESTAMP;
2261             goto int_case;
2262         case TARGET_SO_RCVLOWAT:
2263             optname = SO_RCVLOWAT;
2264             goto int_case;
2265         case TARGET_SO_ACCEPTCONN:
2266             optname = SO_ACCEPTCONN;
2267             goto int_case;
2268         default:
2269             goto int_case;
2270         }
2271         break;
2272     case SOL_TCP:
2273         /* TCP options all take an 'int' value.  */
2274     int_case:
2275         if (get_user_u32(len, optlen))
2276             return -TARGET_EFAULT;
2277         if (len < 0)
2278             return -TARGET_EINVAL;
2279         lv = sizeof(lv);
2280         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2281         if (ret < 0)
2282             return ret;
2283         if (optname == SO_TYPE) {
2284             val = host_to_target_sock_type(val);
2285         }
2286         if (len > lv)
2287             len = lv;
2288         if (len == 4) {
2289             if (put_user_u32(val, optval_addr))
2290                 return -TARGET_EFAULT;
2291         } else {
2292             if (put_user_u8(val, optval_addr))
2293                 return -TARGET_EFAULT;
2294         }
2295         if (put_user_u32(len, optlen))
2296             return -TARGET_EFAULT;
2297         break;
2298     case SOL_IP:
2299         switch(optname) {
2300         case IP_TOS:
2301         case IP_TTL:
2302         case IP_HDRINCL:
2303         case IP_ROUTER_ALERT:
2304         case IP_RECVOPTS:
2305         case IP_RETOPTS:
2306         case IP_PKTINFO:
2307         case IP_MTU_DISCOVER:
2308         case IP_RECVERR:
2309         case IP_RECVTOS:
2310 #ifdef IP_FREEBIND
2311         case IP_FREEBIND:
2312 #endif
2313         case IP_MULTICAST_TTL:
2314         case IP_MULTICAST_LOOP:
2315             if (get_user_u32(len, optlen))
2316                 return -TARGET_EFAULT;
2317             if (len < 0)
2318                 return -TARGET_EINVAL;
2319             lv = sizeof(lv);
2320             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2321             if (ret < 0)
2322                 return ret;
2323             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2324                 len = 1;
2325                 if (put_user_u32(len, optlen)
2326                     || put_user_u8(val, optval_addr))
2327                     return -TARGET_EFAULT;
2328             } else {
2329                 if (len > sizeof(int))
2330                     len = sizeof(int);
2331                 if (put_user_u32(len, optlen)
2332                     || put_user_u32(val, optval_addr))
2333                     return -TARGET_EFAULT;
2334             }
2335             break;
2336         default:
2337             ret = -TARGET_ENOPROTOOPT;
2338             break;
2339         }
2340         break;
2341     default:
2342     unimplemented:
2343         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2344                  level, optname);
2345         ret = -TARGET_EOPNOTSUPP;
2346         break;
2347     }
2348     return ret;
2349 }
2350 
2351 /* Convert target low/high pair representing file offset into the host
2352  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2353  * as the kernel doesn't handle them either.
2354  */
2355 static void target_to_host_low_high(abi_ulong tlow,
2356                                     abi_ulong thigh,
2357                                     unsigned long *hlow,
2358                                     unsigned long *hhigh)
2359 {
2360     uint64_t off = tlow |
2361         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2362         TARGET_LONG_BITS / 2;
2363 
2364     *hlow = off;
2365     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2366 }
2367 
2368 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2369                                 abi_ulong count, int copy)
2370 {
2371     struct target_iovec *target_vec;
2372     struct iovec *vec;
2373     abi_ulong total_len, max_len;
2374     int i;
2375     int err = 0;
2376     bool bad_address = false;
2377 
2378     if (count == 0) {
2379         errno = 0;
2380         return NULL;
2381     }
2382     if (count > IOV_MAX) {
2383         errno = EINVAL;
2384         return NULL;
2385     }
2386 
2387     vec = g_try_new0(struct iovec, count);
2388     if (vec == NULL) {
2389         errno = ENOMEM;
2390         return NULL;
2391     }
2392 
2393     target_vec = lock_user(VERIFY_READ, target_addr,
2394                            count * sizeof(struct target_iovec), 1);
2395     if (target_vec == NULL) {
2396         err = EFAULT;
2397         goto fail2;
2398     }
2399 
2400     /* ??? If host page size > target page size, this will result in a
2401        value larger than what we can actually support.  */
2402     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2403     total_len = 0;
2404 
2405     for (i = 0; i < count; i++) {
2406         abi_ulong base = tswapal(target_vec[i].iov_base);
2407         abi_long len = tswapal(target_vec[i].iov_len);
2408 
2409         if (len < 0) {
2410             err = EINVAL;
2411             goto fail;
2412         } else if (len == 0) {
2413             /* Zero length pointer is ignored.  */
2414             vec[i].iov_base = 0;
2415         } else {
2416             vec[i].iov_base = lock_user(type, base, len, copy);
2417             /* If the first buffer pointer is bad, this is a fault.  But
2418              * subsequent bad buffers will result in a partial write; this
2419              * is realized by filling the vector with null pointers and
2420              * zero lengths. */
2421             if (!vec[i].iov_base) {
2422                 if (i == 0) {
2423                     err = EFAULT;
2424                     goto fail;
2425                 } else {
2426                     bad_address = true;
2427                 }
2428             }
2429             if (bad_address) {
2430                 len = 0;
2431             }
2432             if (len > max_len - total_len) {
2433                 len = max_len - total_len;
2434             }
2435         }
2436         vec[i].iov_len = len;
2437         total_len += len;
2438     }
2439 
2440     unlock_user(target_vec, target_addr, 0);
2441     return vec;
2442 
2443  fail:
2444     while (--i >= 0) {
2445         if (tswapal(target_vec[i].iov_len) > 0) {
2446             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2447         }
2448     }
2449     unlock_user(target_vec, target_addr, 0);
2450  fail2:
2451     g_free(vec);
2452     errno = err;
2453     return NULL;
2454 }
2455 
2456 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2457                          abi_ulong count, int copy)
2458 {
2459     struct target_iovec *target_vec;
2460     int i;
2461 
2462     target_vec = lock_user(VERIFY_READ, target_addr,
2463                            count * sizeof(struct target_iovec), 1);
2464     if (target_vec) {
2465         for (i = 0; i < count; i++) {
2466             abi_ulong base = tswapal(target_vec[i].iov_base);
2467             abi_long len = tswapal(target_vec[i].iov_len);
2468             if (len < 0) {
2469                 break;
2470             }
2471             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2472         }
2473         unlock_user(target_vec, target_addr, 0);
2474     }
2475 
2476     g_free(vec);
2477 }
2478 
2479 static inline int target_to_host_sock_type(int *type)
2480 {
2481     int host_type = 0;
2482     int target_type = *type;
2483 
2484     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2485     case TARGET_SOCK_DGRAM:
2486         host_type = SOCK_DGRAM;
2487         break;
2488     case TARGET_SOCK_STREAM:
2489         host_type = SOCK_STREAM;
2490         break;
2491     default:
2492         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2493         break;
2494     }
2495     if (target_type & TARGET_SOCK_CLOEXEC) {
2496 #if defined(SOCK_CLOEXEC)
2497         host_type |= SOCK_CLOEXEC;
2498 #else
2499         return -TARGET_EINVAL;
2500 #endif
2501     }
2502     if (target_type & TARGET_SOCK_NONBLOCK) {
2503 #if defined(SOCK_NONBLOCK)
2504         host_type |= SOCK_NONBLOCK;
2505 #elif !defined(O_NONBLOCK)
2506         return -TARGET_EINVAL;
2507 #endif
2508     }
2509     *type = host_type;
2510     return 0;
2511 }
2512 
2513 /* Try to emulate socket type flags after socket creation.  */
2514 static int sock_flags_fixup(int fd, int target_type)
2515 {
2516 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2517     if (target_type & TARGET_SOCK_NONBLOCK) {
2518         int flags = fcntl(fd, F_GETFL);
2519         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2520             close(fd);
2521             return -TARGET_EINVAL;
2522         }
2523     }
2524 #endif
2525     return fd;
2526 }
2527 
2528 /* do_socket() Must return target values and target errnos. */
2529 static abi_long do_socket(int domain, int type, int protocol)
2530 {
2531     int target_type = type;
2532     int ret;
2533 
2534     ret = target_to_host_sock_type(&type);
2535     if (ret) {
2536         return ret;
2537     }
2538 
2539     if (domain == PF_NETLINK && !(
2540 #ifdef CONFIG_RTNETLINK
2541          protocol == NETLINK_ROUTE ||
2542 #endif
2543          protocol == NETLINK_KOBJECT_UEVENT ||
2544          protocol == NETLINK_AUDIT)) {
2545         return -EPFNOSUPPORT;
2546     }
2547 
2548     if (domain == AF_PACKET ||
2549         (domain == AF_INET && type == SOCK_PACKET)) {
2550         protocol = tswap16(protocol);
2551     }
2552 
2553     ret = get_errno(socket(domain, type, protocol));
2554     if (ret >= 0) {
2555         ret = sock_flags_fixup(ret, target_type);
2556         if (type == SOCK_PACKET) {
2557             /* Manage an obsolete case :
2558              * if socket type is SOCK_PACKET, bind by name
2559              */
2560             fd_trans_register(ret, &target_packet_trans);
2561         } else if (domain == PF_NETLINK) {
2562             switch (protocol) {
2563 #ifdef CONFIG_RTNETLINK
2564             case NETLINK_ROUTE:
2565                 fd_trans_register(ret, &target_netlink_route_trans);
2566                 break;
2567 #endif
2568             case NETLINK_KOBJECT_UEVENT:
2569                 /* nothing to do: messages are strings */
2570                 break;
2571             case NETLINK_AUDIT:
2572                 fd_trans_register(ret, &target_netlink_audit_trans);
2573                 break;
2574             default:
2575                 g_assert_not_reached();
2576             }
2577         }
2578     }
2579     return ret;
2580 }
2581 
2582 /* do_bind() Must return target values and target errnos. */
2583 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2584                         socklen_t addrlen)
2585 {
2586     void *addr;
2587     abi_long ret;
2588 
2589     if ((int)addrlen < 0) {
2590         return -TARGET_EINVAL;
2591     }
2592 
2593     addr = alloca(addrlen+1);
2594 
2595     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2596     if (ret)
2597         return ret;
2598 
2599     return get_errno(bind(sockfd, addr, addrlen));
2600 }
2601 
2602 /* do_connect() Must return target values and target errnos. */
2603 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2604                            socklen_t addrlen)
2605 {
2606     void *addr;
2607     abi_long ret;
2608 
2609     if ((int)addrlen < 0) {
2610         return -TARGET_EINVAL;
2611     }
2612 
2613     addr = alloca(addrlen+1);
2614 
2615     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2616     if (ret)
2617         return ret;
2618 
2619     return get_errno(safe_connect(sockfd, addr, addrlen));
2620 }
2621 
2622 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2623 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2624                                       int flags, int send)
2625 {
2626     abi_long ret, len;
2627     struct msghdr msg;
2628     abi_ulong count;
2629     struct iovec *vec;
2630     abi_ulong target_vec;
2631 
2632     if (msgp->msg_name) {
2633         msg.msg_namelen = tswap32(msgp->msg_namelen);
2634         msg.msg_name = alloca(msg.msg_namelen+1);
2635         ret = target_to_host_sockaddr(fd, msg.msg_name,
2636                                       tswapal(msgp->msg_name),
2637                                       msg.msg_namelen);
2638         if (ret == -TARGET_EFAULT) {
2639             /* For connected sockets msg_name and msg_namelen must
2640              * be ignored, so returning EFAULT immediately is wrong.
2641              * Instead, pass a bad msg_name to the host kernel, and
2642              * let it decide whether to return EFAULT or not.
2643              */
2644             msg.msg_name = (void *)-1;
2645         } else if (ret) {
2646             goto out2;
2647         }
2648     } else {
2649         msg.msg_name = NULL;
2650         msg.msg_namelen = 0;
2651     }
2652     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2653     msg.msg_control = alloca(msg.msg_controllen);
2654     memset(msg.msg_control, 0, msg.msg_controllen);
2655 
2656     msg.msg_flags = tswap32(msgp->msg_flags);
2657 
2658     count = tswapal(msgp->msg_iovlen);
2659     target_vec = tswapal(msgp->msg_iov);
2660 
2661     if (count > IOV_MAX) {
2662         /* sendrcvmsg returns a different errno for this condition than
2663          * readv/writev, so we must catch it here before lock_iovec() does.
2664          */
2665         ret = -TARGET_EMSGSIZE;
2666         goto out2;
2667     }
2668 
2669     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2670                      target_vec, count, send);
2671     if (vec == NULL) {
2672         ret = -host_to_target_errno(errno);
2673         goto out2;
2674     }
2675     msg.msg_iovlen = count;
2676     msg.msg_iov = vec;
2677 
2678     if (send) {
2679         if (fd_trans_target_to_host_data(fd)) {
2680             void *host_msg;
2681 
2682             host_msg = g_malloc(msg.msg_iov->iov_len);
2683             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2684             ret = fd_trans_target_to_host_data(fd)(host_msg,
2685                                                    msg.msg_iov->iov_len);
2686             if (ret >= 0) {
2687                 msg.msg_iov->iov_base = host_msg;
2688                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2689             }
2690             g_free(host_msg);
2691         } else {
2692             ret = target_to_host_cmsg(&msg, msgp);
2693             if (ret == 0) {
2694                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2695             }
2696         }
2697     } else {
2698         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2699         if (!is_error(ret)) {
2700             len = ret;
2701             if (fd_trans_host_to_target_data(fd)) {
2702                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2703                                                MIN(msg.msg_iov->iov_len, len));
2704             } else {
2705                 ret = host_to_target_cmsg(msgp, &msg);
2706             }
2707             if (!is_error(ret)) {
2708                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2709                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2710                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2711                                     msg.msg_name, msg.msg_namelen);
2712                     if (ret) {
2713                         goto out;
2714                     }
2715                 }
2716 
2717                 ret = len;
2718             }
2719         }
2720     }
2721 
2722 out:
2723     unlock_iovec(vec, target_vec, count, !send);
2724 out2:
2725     return ret;
2726 }
2727 
2728 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2729                                int flags, int send)
2730 {
2731     abi_long ret;
2732     struct target_msghdr *msgp;
2733 
2734     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2735                           msgp,
2736                           target_msg,
2737                           send ? 1 : 0)) {
2738         return -TARGET_EFAULT;
2739     }
2740     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2741     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2742     return ret;
2743 }
2744 
2745 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2746  * so it might not have this *mmsg-specific flag either.
2747  */
2748 #ifndef MSG_WAITFORONE
2749 #define MSG_WAITFORONE 0x10000
2750 #endif
2751 
2752 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2753                                 unsigned int vlen, unsigned int flags,
2754                                 int send)
2755 {
2756     struct target_mmsghdr *mmsgp;
2757     abi_long ret = 0;
2758     int i;
2759 
2760     if (vlen > UIO_MAXIOV) {
2761         vlen = UIO_MAXIOV;
2762     }
2763 
2764     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2765     if (!mmsgp) {
2766         return -TARGET_EFAULT;
2767     }
2768 
2769     for (i = 0; i < vlen; i++) {
2770         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2771         if (is_error(ret)) {
2772             break;
2773         }
2774         mmsgp[i].msg_len = tswap32(ret);
2775         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2776         if (flags & MSG_WAITFORONE) {
2777             flags |= MSG_DONTWAIT;
2778         }
2779     }
2780 
2781     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2782 
2783     /* Return number of datagrams sent if we sent any at all;
2784      * otherwise return the error.
2785      */
2786     if (i) {
2787         return i;
2788     }
2789     return ret;
2790 }
2791 
2792 /* do_accept4() Must return target values and target errnos. */
2793 static abi_long do_accept4(int fd, abi_ulong target_addr,
2794                            abi_ulong target_addrlen_addr, int flags)
2795 {
2796     socklen_t addrlen;
2797     void *addr;
2798     abi_long ret;
2799     int host_flags;
2800 
2801     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2802 
2803     if (target_addr == 0) {
2804         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2805     }
2806 
2807     /* linux returns EINVAL if addrlen pointer is invalid */
2808     if (get_user_u32(addrlen, target_addrlen_addr))
2809         return -TARGET_EINVAL;
2810 
2811     if ((int)addrlen < 0) {
2812         return -TARGET_EINVAL;
2813     }
2814 
2815     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2816         return -TARGET_EINVAL;
2817 
2818     addr = alloca(addrlen);
2819 
2820     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2821     if (!is_error(ret)) {
2822         host_to_target_sockaddr(target_addr, addr, addrlen);
2823         if (put_user_u32(addrlen, target_addrlen_addr))
2824             ret = -TARGET_EFAULT;
2825     }
2826     return ret;
2827 }
2828 
2829 /* do_getpeername() Must return target values and target errnos. */
2830 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2831                                abi_ulong target_addrlen_addr)
2832 {
2833     socklen_t addrlen;
2834     void *addr;
2835     abi_long ret;
2836 
2837     if (get_user_u32(addrlen, target_addrlen_addr))
2838         return -TARGET_EFAULT;
2839 
2840     if ((int)addrlen < 0) {
2841         return -TARGET_EINVAL;
2842     }
2843 
2844     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2845         return -TARGET_EFAULT;
2846 
2847     addr = alloca(addrlen);
2848 
2849     ret = get_errno(getpeername(fd, addr, &addrlen));
2850     if (!is_error(ret)) {
2851         host_to_target_sockaddr(target_addr, addr, addrlen);
2852         if (put_user_u32(addrlen, target_addrlen_addr))
2853             ret = -TARGET_EFAULT;
2854     }
2855     return ret;
2856 }
2857 
2858 /* do_getsockname() Must return target values and target errnos. */
2859 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2860                                abi_ulong target_addrlen_addr)
2861 {
2862     socklen_t addrlen;
2863     void *addr;
2864     abi_long ret;
2865 
2866     if (get_user_u32(addrlen, target_addrlen_addr))
2867         return -TARGET_EFAULT;
2868 
2869     if ((int)addrlen < 0) {
2870         return -TARGET_EINVAL;
2871     }
2872 
2873     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2874         return -TARGET_EFAULT;
2875 
2876     addr = alloca(addrlen);
2877 
2878     ret = get_errno(getsockname(fd, addr, &addrlen));
2879     if (!is_error(ret)) {
2880         host_to_target_sockaddr(target_addr, addr, addrlen);
2881         if (put_user_u32(addrlen, target_addrlen_addr))
2882             ret = -TARGET_EFAULT;
2883     }
2884     return ret;
2885 }
2886 
2887 /* do_socketpair() Must return target values and target errnos. */
2888 static abi_long do_socketpair(int domain, int type, int protocol,
2889                               abi_ulong target_tab_addr)
2890 {
2891     int tab[2];
2892     abi_long ret;
2893 
2894     target_to_host_sock_type(&type);
2895 
2896     ret = get_errno(socketpair(domain, type, protocol, tab));
2897     if (!is_error(ret)) {
2898         if (put_user_s32(tab[0], target_tab_addr)
2899             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2900             ret = -TARGET_EFAULT;
2901     }
2902     return ret;
2903 }
2904 
2905 /* do_sendto() Must return target values and target errnos. */
2906 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2907                           abi_ulong target_addr, socklen_t addrlen)
2908 {
2909     void *addr;
2910     void *host_msg;
2911     void *copy_msg = NULL;
2912     abi_long ret;
2913 
2914     if ((int)addrlen < 0) {
2915         return -TARGET_EINVAL;
2916     }
2917 
2918     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2919     if (!host_msg)
2920         return -TARGET_EFAULT;
2921     if (fd_trans_target_to_host_data(fd)) {
2922         copy_msg = host_msg;
2923         host_msg = g_malloc(len);
2924         memcpy(host_msg, copy_msg, len);
2925         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2926         if (ret < 0) {
2927             goto fail;
2928         }
2929     }
2930     if (target_addr) {
2931         addr = alloca(addrlen+1);
2932         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2933         if (ret) {
2934             goto fail;
2935         }
2936         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2937     } else {
2938         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2939     }
2940 fail:
2941     if (copy_msg) {
2942         g_free(host_msg);
2943         host_msg = copy_msg;
2944     }
2945     unlock_user(host_msg, msg, 0);
2946     return ret;
2947 }
2948 
2949 /* do_recvfrom() Must return target values and target errnos. */
2950 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2951                             abi_ulong target_addr,
2952                             abi_ulong target_addrlen)
2953 {
2954     socklen_t addrlen;
2955     void *addr;
2956     void *host_msg;
2957     abi_long ret;
2958 
2959     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2960     if (!host_msg)
2961         return -TARGET_EFAULT;
2962     if (target_addr) {
2963         if (get_user_u32(addrlen, target_addrlen)) {
2964             ret = -TARGET_EFAULT;
2965             goto fail;
2966         }
2967         if ((int)addrlen < 0) {
2968             ret = -TARGET_EINVAL;
2969             goto fail;
2970         }
2971         addr = alloca(addrlen);
2972         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
2973                                       addr, &addrlen));
2974     } else {
2975         addr = NULL; /* To keep compiler quiet.  */
2976         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
2977     }
2978     if (!is_error(ret)) {
2979         if (fd_trans_host_to_target_data(fd)) {
2980             abi_long trans;
2981             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
2982             if (is_error(trans)) {
2983                 ret = trans;
2984                 goto fail;
2985             }
2986         }
2987         if (target_addr) {
2988             host_to_target_sockaddr(target_addr, addr, addrlen);
2989             if (put_user_u32(addrlen, target_addrlen)) {
2990                 ret = -TARGET_EFAULT;
2991                 goto fail;
2992             }
2993         }
2994         unlock_user(host_msg, msg, len);
2995     } else {
2996 fail:
2997         unlock_user(host_msg, msg, 0);
2998     }
2999     return ret;
3000 }
3001 
3002 #ifdef TARGET_NR_socketcall
3003 /* do_socketcall() must return target values and target errnos. */
3004 static abi_long do_socketcall(int num, abi_ulong vptr)
3005 {
3006     static const unsigned nargs[] = { /* number of arguments per operation */
3007         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3008         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3009         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3010         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3011         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3012         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3013         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3014         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3015         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3016         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3017         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3018         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3019         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3020         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3021         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3022         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3023         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3024         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3025         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3026         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3027     };
3028     abi_long a[6]; /* max 6 args */
3029     unsigned i;
3030 
3031     /* check the range of the first argument num */
3032     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3033     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3034         return -TARGET_EINVAL;
3035     }
3036     /* ensure we have space for args */
3037     if (nargs[num] > ARRAY_SIZE(a)) {
3038         return -TARGET_EINVAL;
3039     }
3040     /* collect the arguments in a[] according to nargs[] */
3041     for (i = 0; i < nargs[num]; ++i) {
3042         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3043             return -TARGET_EFAULT;
3044         }
3045     }
3046     /* now when we have the args, invoke the appropriate underlying function */
3047     switch (num) {
3048     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3049         return do_socket(a[0], a[1], a[2]);
3050     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3051         return do_bind(a[0], a[1], a[2]);
3052     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3053         return do_connect(a[0], a[1], a[2]);
3054     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3055         return get_errno(listen(a[0], a[1]));
3056     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3057         return do_accept4(a[0], a[1], a[2], 0);
3058     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3059         return do_getsockname(a[0], a[1], a[2]);
3060     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3061         return do_getpeername(a[0], a[1], a[2]);
3062     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3063         return do_socketpair(a[0], a[1], a[2], a[3]);
3064     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3065         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3066     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3067         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3068     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3069         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3070     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3071         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3072     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3073         return get_errno(shutdown(a[0], a[1]));
3074     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3075         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3076     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3077         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3078     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3079         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3080     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3081         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3082     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3083         return do_accept4(a[0], a[1], a[2], a[3]);
3084     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3085         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3086     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3087         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3088     default:
3089         gemu_log("Unsupported socketcall: %d\n", num);
3090         return -TARGET_EINVAL;
3091     }
3092 }
3093 #endif
3094 
3095 #define N_SHM_REGIONS	32
3096 
3097 static struct shm_region {
3098     abi_ulong start;
3099     abi_ulong size;
3100     bool in_use;
3101 } shm_regions[N_SHM_REGIONS];
3102 
3103 #ifndef TARGET_SEMID64_DS
3104 /* asm-generic version of this struct */
3105 struct target_semid64_ds
3106 {
3107   struct target_ipc_perm sem_perm;
3108   abi_ulong sem_otime;
3109 #if TARGET_ABI_BITS == 32
3110   abi_ulong __unused1;
3111 #endif
3112   abi_ulong sem_ctime;
3113 #if TARGET_ABI_BITS == 32
3114   abi_ulong __unused2;
3115 #endif
3116   abi_ulong sem_nsems;
3117   abi_ulong __unused3;
3118   abi_ulong __unused4;
3119 };
3120 #endif
3121 
3122 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3123                                                abi_ulong target_addr)
3124 {
3125     struct target_ipc_perm *target_ip;
3126     struct target_semid64_ds *target_sd;
3127 
3128     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3129         return -TARGET_EFAULT;
3130     target_ip = &(target_sd->sem_perm);
3131     host_ip->__key = tswap32(target_ip->__key);
3132     host_ip->uid = tswap32(target_ip->uid);
3133     host_ip->gid = tswap32(target_ip->gid);
3134     host_ip->cuid = tswap32(target_ip->cuid);
3135     host_ip->cgid = tswap32(target_ip->cgid);
3136 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3137     host_ip->mode = tswap32(target_ip->mode);
3138 #else
3139     host_ip->mode = tswap16(target_ip->mode);
3140 #endif
3141 #if defined(TARGET_PPC)
3142     host_ip->__seq = tswap32(target_ip->__seq);
3143 #else
3144     host_ip->__seq = tswap16(target_ip->__seq);
3145 #endif
3146     unlock_user_struct(target_sd, target_addr, 0);
3147     return 0;
3148 }
3149 
3150 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3151                                                struct ipc_perm *host_ip)
3152 {
3153     struct target_ipc_perm *target_ip;
3154     struct target_semid64_ds *target_sd;
3155 
3156     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3157         return -TARGET_EFAULT;
3158     target_ip = &(target_sd->sem_perm);
3159     target_ip->__key = tswap32(host_ip->__key);
3160     target_ip->uid = tswap32(host_ip->uid);
3161     target_ip->gid = tswap32(host_ip->gid);
3162     target_ip->cuid = tswap32(host_ip->cuid);
3163     target_ip->cgid = tswap32(host_ip->cgid);
3164 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3165     target_ip->mode = tswap32(host_ip->mode);
3166 #else
3167     target_ip->mode = tswap16(host_ip->mode);
3168 #endif
3169 #if defined(TARGET_PPC)
3170     target_ip->__seq = tswap32(host_ip->__seq);
3171 #else
3172     target_ip->__seq = tswap16(host_ip->__seq);
3173 #endif
3174     unlock_user_struct(target_sd, target_addr, 1);
3175     return 0;
3176 }
3177 
3178 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3179                                                abi_ulong target_addr)
3180 {
3181     struct target_semid64_ds *target_sd;
3182 
3183     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3184         return -TARGET_EFAULT;
3185     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3186         return -TARGET_EFAULT;
3187     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3188     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3189     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3190     unlock_user_struct(target_sd, target_addr, 0);
3191     return 0;
3192 }
3193 
3194 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3195                                                struct semid_ds *host_sd)
3196 {
3197     struct target_semid64_ds *target_sd;
3198 
3199     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3200         return -TARGET_EFAULT;
3201     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3202         return -TARGET_EFAULT;
3203     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3204     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3205     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3206     unlock_user_struct(target_sd, target_addr, 1);
3207     return 0;
3208 }
3209 
3210 struct target_seminfo {
3211     int semmap;
3212     int semmni;
3213     int semmns;
3214     int semmnu;
3215     int semmsl;
3216     int semopm;
3217     int semume;
3218     int semusz;
3219     int semvmx;
3220     int semaem;
3221 };
3222 
3223 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3224                                               struct seminfo *host_seminfo)
3225 {
3226     struct target_seminfo *target_seminfo;
3227     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3228         return -TARGET_EFAULT;
3229     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3230     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3231     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3232     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3233     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3234     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3235     __put_user(host_seminfo->semume, &target_seminfo->semume);
3236     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3237     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3238     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3239     unlock_user_struct(target_seminfo, target_addr, 1);
3240     return 0;
3241 }
3242 
3243 union semun {
3244 	int val;
3245 	struct semid_ds *buf;
3246 	unsigned short *array;
3247 	struct seminfo *__buf;
3248 };
3249 
3250 union target_semun {
3251 	int val;
3252 	abi_ulong buf;
3253 	abi_ulong array;
3254 	abi_ulong __buf;
3255 };
3256 
3257 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3258                                                abi_ulong target_addr)
3259 {
3260     int nsems;
3261     unsigned short *array;
3262     union semun semun;
3263     struct semid_ds semid_ds;
3264     int i, ret;
3265 
3266     semun.buf = &semid_ds;
3267 
3268     ret = semctl(semid, 0, IPC_STAT, semun);
3269     if (ret == -1)
3270         return get_errno(ret);
3271 
3272     nsems = semid_ds.sem_nsems;
3273 
3274     *host_array = g_try_new(unsigned short, nsems);
3275     if (!*host_array) {
3276         return -TARGET_ENOMEM;
3277     }
3278     array = lock_user(VERIFY_READ, target_addr,
3279                       nsems*sizeof(unsigned short), 1);
3280     if (!array) {
3281         g_free(*host_array);
3282         return -TARGET_EFAULT;
3283     }
3284 
3285     for(i=0; i<nsems; i++) {
3286         __get_user((*host_array)[i], &array[i]);
3287     }
3288     unlock_user(array, target_addr, 0);
3289 
3290     return 0;
3291 }
3292 
3293 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3294                                                unsigned short **host_array)
3295 {
3296     int nsems;
3297     unsigned short *array;
3298     union semun semun;
3299     struct semid_ds semid_ds;
3300     int i, ret;
3301 
3302     semun.buf = &semid_ds;
3303 
3304     ret = semctl(semid, 0, IPC_STAT, semun);
3305     if (ret == -1)
3306         return get_errno(ret);
3307 
3308     nsems = semid_ds.sem_nsems;
3309 
3310     array = lock_user(VERIFY_WRITE, target_addr,
3311                       nsems*sizeof(unsigned short), 0);
3312     if (!array)
3313         return -TARGET_EFAULT;
3314 
3315     for(i=0; i<nsems; i++) {
3316         __put_user((*host_array)[i], &array[i]);
3317     }
3318     g_free(*host_array);
3319     unlock_user(array, target_addr, 1);
3320 
3321     return 0;
3322 }
3323 
3324 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3325                                  abi_ulong target_arg)
3326 {
3327     union target_semun target_su = { .buf = target_arg };
3328     union semun arg;
3329     struct semid_ds dsarg;
3330     unsigned short *array = NULL;
3331     struct seminfo seminfo;
3332     abi_long ret = -TARGET_EINVAL;
3333     abi_long err;
3334     cmd &= 0xff;
3335 
3336     switch( cmd ) {
3337 	case GETVAL:
3338 	case SETVAL:
3339             /* In 64 bit cross-endian situations, we will erroneously pick up
3340              * the wrong half of the union for the "val" element.  To rectify
3341              * this, the entire 8-byte structure is byteswapped, followed by
3342 	     * a swap of the 4 byte val field. In other cases, the data is
3343 	     * already in proper host byte order. */
3344 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3345 		target_su.buf = tswapal(target_su.buf);
3346 		arg.val = tswap32(target_su.val);
3347 	    } else {
3348 		arg.val = target_su.val;
3349 	    }
3350             ret = get_errno(semctl(semid, semnum, cmd, arg));
3351             break;
3352 	case GETALL:
3353 	case SETALL:
3354             err = target_to_host_semarray(semid, &array, target_su.array);
3355             if (err)
3356                 return err;
3357             arg.array = array;
3358             ret = get_errno(semctl(semid, semnum, cmd, arg));
3359             err = host_to_target_semarray(semid, target_su.array, &array);
3360             if (err)
3361                 return err;
3362             break;
3363 	case IPC_STAT:
3364 	case IPC_SET:
3365 	case SEM_STAT:
3366             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3367             if (err)
3368                 return err;
3369             arg.buf = &dsarg;
3370             ret = get_errno(semctl(semid, semnum, cmd, arg));
3371             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3372             if (err)
3373                 return err;
3374             break;
3375 	case IPC_INFO:
3376 	case SEM_INFO:
3377             arg.__buf = &seminfo;
3378             ret = get_errno(semctl(semid, semnum, cmd, arg));
3379             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3380             if (err)
3381                 return err;
3382             break;
3383 	case IPC_RMID:
3384 	case GETPID:
3385 	case GETNCNT:
3386 	case GETZCNT:
3387             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3388             break;
3389     }
3390 
3391     return ret;
3392 }
3393 
3394 struct target_sembuf {
3395     unsigned short sem_num;
3396     short sem_op;
3397     short sem_flg;
3398 };
3399 
3400 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3401                                              abi_ulong target_addr,
3402                                              unsigned nsops)
3403 {
3404     struct target_sembuf *target_sembuf;
3405     int i;
3406 
3407     target_sembuf = lock_user(VERIFY_READ, target_addr,
3408                               nsops*sizeof(struct target_sembuf), 1);
3409     if (!target_sembuf)
3410         return -TARGET_EFAULT;
3411 
3412     for(i=0; i<nsops; i++) {
3413         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3414         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3415         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3416     }
3417 
3418     unlock_user(target_sembuf, target_addr, 0);
3419 
3420     return 0;
3421 }
3422 
3423 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3424 {
3425     struct sembuf sops[nsops];
3426 
3427     if (target_to_host_sembuf(sops, ptr, nsops))
3428         return -TARGET_EFAULT;
3429 
3430     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3431 }
3432 
3433 struct target_msqid_ds
3434 {
3435     struct target_ipc_perm msg_perm;
3436     abi_ulong msg_stime;
3437 #if TARGET_ABI_BITS == 32
3438     abi_ulong __unused1;
3439 #endif
3440     abi_ulong msg_rtime;
3441 #if TARGET_ABI_BITS == 32
3442     abi_ulong __unused2;
3443 #endif
3444     abi_ulong msg_ctime;
3445 #if TARGET_ABI_BITS == 32
3446     abi_ulong __unused3;
3447 #endif
3448     abi_ulong __msg_cbytes;
3449     abi_ulong msg_qnum;
3450     abi_ulong msg_qbytes;
3451     abi_ulong msg_lspid;
3452     abi_ulong msg_lrpid;
3453     abi_ulong __unused4;
3454     abi_ulong __unused5;
3455 };
3456 
3457 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3458                                                abi_ulong target_addr)
3459 {
3460     struct target_msqid_ds *target_md;
3461 
3462     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3463         return -TARGET_EFAULT;
3464     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3465         return -TARGET_EFAULT;
3466     host_md->msg_stime = tswapal(target_md->msg_stime);
3467     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3468     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3469     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3470     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3471     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3472     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3473     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3474     unlock_user_struct(target_md, target_addr, 0);
3475     return 0;
3476 }
3477 
3478 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3479                                                struct msqid_ds *host_md)
3480 {
3481     struct target_msqid_ds *target_md;
3482 
3483     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3484         return -TARGET_EFAULT;
3485     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3486         return -TARGET_EFAULT;
3487     target_md->msg_stime = tswapal(host_md->msg_stime);
3488     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3489     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3490     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3491     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3492     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3493     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3494     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3495     unlock_user_struct(target_md, target_addr, 1);
3496     return 0;
3497 }
3498 
3499 struct target_msginfo {
3500     int msgpool;
3501     int msgmap;
3502     int msgmax;
3503     int msgmnb;
3504     int msgmni;
3505     int msgssz;
3506     int msgtql;
3507     unsigned short int msgseg;
3508 };
3509 
3510 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3511                                               struct msginfo *host_msginfo)
3512 {
3513     struct target_msginfo *target_msginfo;
3514     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3515         return -TARGET_EFAULT;
3516     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3517     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3518     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3519     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3520     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3521     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3522     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3523     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3524     unlock_user_struct(target_msginfo, target_addr, 1);
3525     return 0;
3526 }
3527 
3528 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3529 {
3530     struct msqid_ds dsarg;
3531     struct msginfo msginfo;
3532     abi_long ret = -TARGET_EINVAL;
3533 
3534     cmd &= 0xff;
3535 
3536     switch (cmd) {
3537     case IPC_STAT:
3538     case IPC_SET:
3539     case MSG_STAT:
3540         if (target_to_host_msqid_ds(&dsarg,ptr))
3541             return -TARGET_EFAULT;
3542         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3543         if (host_to_target_msqid_ds(ptr,&dsarg))
3544             return -TARGET_EFAULT;
3545         break;
3546     case IPC_RMID:
3547         ret = get_errno(msgctl(msgid, cmd, NULL));
3548         break;
3549     case IPC_INFO:
3550     case MSG_INFO:
3551         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3552         if (host_to_target_msginfo(ptr, &msginfo))
3553             return -TARGET_EFAULT;
3554         break;
3555     }
3556 
3557     return ret;
3558 }
3559 
3560 struct target_msgbuf {
3561     abi_long mtype;
3562     char	mtext[1];
3563 };
3564 
3565 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3566                                  ssize_t msgsz, int msgflg)
3567 {
3568     struct target_msgbuf *target_mb;
3569     struct msgbuf *host_mb;
3570     abi_long ret = 0;
3571 
3572     if (msgsz < 0) {
3573         return -TARGET_EINVAL;
3574     }
3575 
3576     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3577         return -TARGET_EFAULT;
3578     host_mb = g_try_malloc(msgsz + sizeof(long));
3579     if (!host_mb) {
3580         unlock_user_struct(target_mb, msgp, 0);
3581         return -TARGET_ENOMEM;
3582     }
3583     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3584     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3585     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3586     g_free(host_mb);
3587     unlock_user_struct(target_mb, msgp, 0);
3588 
3589     return ret;
3590 }
3591 
3592 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3593                                  ssize_t msgsz, abi_long msgtyp,
3594                                  int msgflg)
3595 {
3596     struct target_msgbuf *target_mb;
3597     char *target_mtext;
3598     struct msgbuf *host_mb;
3599     abi_long ret = 0;
3600 
3601     if (msgsz < 0) {
3602         return -TARGET_EINVAL;
3603     }
3604 
3605     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3606         return -TARGET_EFAULT;
3607 
3608     host_mb = g_try_malloc(msgsz + sizeof(long));
3609     if (!host_mb) {
3610         ret = -TARGET_ENOMEM;
3611         goto end;
3612     }
3613     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3614 
3615     if (ret > 0) {
3616         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3617         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3618         if (!target_mtext) {
3619             ret = -TARGET_EFAULT;
3620             goto end;
3621         }
3622         memcpy(target_mb->mtext, host_mb->mtext, ret);
3623         unlock_user(target_mtext, target_mtext_addr, ret);
3624     }
3625 
3626     target_mb->mtype = tswapal(host_mb->mtype);
3627 
3628 end:
3629     if (target_mb)
3630         unlock_user_struct(target_mb, msgp, 1);
3631     g_free(host_mb);
3632     return ret;
3633 }
3634 
3635 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3636                                                abi_ulong target_addr)
3637 {
3638     struct target_shmid_ds *target_sd;
3639 
3640     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3641         return -TARGET_EFAULT;
3642     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3643         return -TARGET_EFAULT;
3644     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3645     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3646     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3647     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3648     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3649     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3650     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3651     unlock_user_struct(target_sd, target_addr, 0);
3652     return 0;
3653 }
3654 
3655 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3656                                                struct shmid_ds *host_sd)
3657 {
3658     struct target_shmid_ds *target_sd;
3659 
3660     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3661         return -TARGET_EFAULT;
3662     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3663         return -TARGET_EFAULT;
3664     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3665     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3666     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3667     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3668     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3669     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3670     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3671     unlock_user_struct(target_sd, target_addr, 1);
3672     return 0;
3673 }
3674 
3675 struct  target_shminfo {
3676     abi_ulong shmmax;
3677     abi_ulong shmmin;
3678     abi_ulong shmmni;
3679     abi_ulong shmseg;
3680     abi_ulong shmall;
3681 };
3682 
3683 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3684                                               struct shminfo *host_shminfo)
3685 {
3686     struct target_shminfo *target_shminfo;
3687     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3688         return -TARGET_EFAULT;
3689     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3690     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3691     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3692     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3693     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3694     unlock_user_struct(target_shminfo, target_addr, 1);
3695     return 0;
3696 }
3697 
3698 struct target_shm_info {
3699     int used_ids;
3700     abi_ulong shm_tot;
3701     abi_ulong shm_rss;
3702     abi_ulong shm_swp;
3703     abi_ulong swap_attempts;
3704     abi_ulong swap_successes;
3705 };
3706 
3707 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3708                                                struct shm_info *host_shm_info)
3709 {
3710     struct target_shm_info *target_shm_info;
3711     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3712         return -TARGET_EFAULT;
3713     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3714     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3715     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3716     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3717     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3718     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3719     unlock_user_struct(target_shm_info, target_addr, 1);
3720     return 0;
3721 }
3722 
3723 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3724 {
3725     struct shmid_ds dsarg;
3726     struct shminfo shminfo;
3727     struct shm_info shm_info;
3728     abi_long ret = -TARGET_EINVAL;
3729 
3730     cmd &= 0xff;
3731 
3732     switch(cmd) {
3733     case IPC_STAT:
3734     case IPC_SET:
3735     case SHM_STAT:
3736         if (target_to_host_shmid_ds(&dsarg, buf))
3737             return -TARGET_EFAULT;
3738         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3739         if (host_to_target_shmid_ds(buf, &dsarg))
3740             return -TARGET_EFAULT;
3741         break;
3742     case IPC_INFO:
3743         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3744         if (host_to_target_shminfo(buf, &shminfo))
3745             return -TARGET_EFAULT;
3746         break;
3747     case SHM_INFO:
3748         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3749         if (host_to_target_shm_info(buf, &shm_info))
3750             return -TARGET_EFAULT;
3751         break;
3752     case IPC_RMID:
3753     case SHM_LOCK:
3754     case SHM_UNLOCK:
3755         ret = get_errno(shmctl(shmid, cmd, NULL));
3756         break;
3757     }
3758 
3759     return ret;
3760 }
3761 
3762 #ifndef TARGET_FORCE_SHMLBA
3763 /* For most architectures, SHMLBA is the same as the page size;
3764  * some architectures have larger values, in which case they should
3765  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3766  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3767  * and defining its own value for SHMLBA.
3768  *
3769  * The kernel also permits SHMLBA to be set by the architecture to a
3770  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3771  * this means that addresses are rounded to the large size if
3772  * SHM_RND is set but addresses not aligned to that size are not rejected
3773  * as long as they are at least page-aligned. Since the only architecture
3774  * which uses this is ia64 this code doesn't provide for that oddity.
3775  */
3776 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3777 {
3778     return TARGET_PAGE_SIZE;
3779 }
3780 #endif
3781 
3782 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3783                                  int shmid, abi_ulong shmaddr, int shmflg)
3784 {
3785     abi_long raddr;
3786     void *host_raddr;
3787     struct shmid_ds shm_info;
3788     int i,ret;
3789     abi_ulong shmlba;
3790 
3791     /* find out the length of the shared memory segment */
3792     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3793     if (is_error(ret)) {
3794         /* can't get length, bail out */
3795         return ret;
3796     }
3797 
3798     shmlba = target_shmlba(cpu_env);
3799 
3800     if (shmaddr & (shmlba - 1)) {
3801         if (shmflg & SHM_RND) {
3802             shmaddr &= ~(shmlba - 1);
3803         } else {
3804             return -TARGET_EINVAL;
3805         }
3806     }
3807     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3808         return -TARGET_EINVAL;
3809     }
3810 
3811     mmap_lock();
3812 
3813     if (shmaddr)
3814         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3815     else {
3816         abi_ulong mmap_start;
3817 
3818         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3819 
3820         if (mmap_start == -1) {
3821             errno = ENOMEM;
3822             host_raddr = (void *)-1;
3823         } else
3824             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3825     }
3826 
3827     if (host_raddr == (void *)-1) {
3828         mmap_unlock();
3829         return get_errno((long)host_raddr);
3830     }
3831     raddr=h2g((unsigned long)host_raddr);
3832 
3833     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3834                    PAGE_VALID | PAGE_READ |
3835                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3836 
3837     for (i = 0; i < N_SHM_REGIONS; i++) {
3838         if (!shm_regions[i].in_use) {
3839             shm_regions[i].in_use = true;
3840             shm_regions[i].start = raddr;
3841             shm_regions[i].size = shm_info.shm_segsz;
3842             break;
3843         }
3844     }
3845 
3846     mmap_unlock();
3847     return raddr;
3848 
3849 }
3850 
3851 static inline abi_long do_shmdt(abi_ulong shmaddr)
3852 {
3853     int i;
3854     abi_long rv;
3855 
3856     mmap_lock();
3857 
3858     for (i = 0; i < N_SHM_REGIONS; ++i) {
3859         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3860             shm_regions[i].in_use = false;
3861             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3862             break;
3863         }
3864     }
3865     rv = get_errno(shmdt(g2h(shmaddr)));
3866 
3867     mmap_unlock();
3868 
3869     return rv;
3870 }
3871 
3872 #ifdef TARGET_NR_ipc
3873 /* ??? This only works with linear mappings.  */
3874 /* do_ipc() must return target values and target errnos. */
3875 static abi_long do_ipc(CPUArchState *cpu_env,
3876                        unsigned int call, abi_long first,
3877                        abi_long second, abi_long third,
3878                        abi_long ptr, abi_long fifth)
3879 {
3880     int version;
3881     abi_long ret = 0;
3882 
3883     version = call >> 16;
3884     call &= 0xffff;
3885 
3886     switch (call) {
3887     case IPCOP_semop:
3888         ret = do_semop(first, ptr, second);
3889         break;
3890 
3891     case IPCOP_semget:
3892         ret = get_errno(semget(first, second, third));
3893         break;
3894 
3895     case IPCOP_semctl: {
3896         /* The semun argument to semctl is passed by value, so dereference the
3897          * ptr argument. */
3898         abi_ulong atptr;
3899         get_user_ual(atptr, ptr);
3900         ret = do_semctl(first, second, third, atptr);
3901         break;
3902     }
3903 
3904     case IPCOP_msgget:
3905         ret = get_errno(msgget(first, second));
3906         break;
3907 
3908     case IPCOP_msgsnd:
3909         ret = do_msgsnd(first, ptr, second, third);
3910         break;
3911 
3912     case IPCOP_msgctl:
3913         ret = do_msgctl(first, second, ptr);
3914         break;
3915 
3916     case IPCOP_msgrcv:
3917         switch (version) {
3918         case 0:
3919             {
3920                 struct target_ipc_kludge {
3921                     abi_long msgp;
3922                     abi_long msgtyp;
3923                 } *tmp;
3924 
3925                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3926                     ret = -TARGET_EFAULT;
3927                     break;
3928                 }
3929 
3930                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3931 
3932                 unlock_user_struct(tmp, ptr, 0);
3933                 break;
3934             }
3935         default:
3936             ret = do_msgrcv(first, ptr, second, fifth, third);
3937         }
3938         break;
3939 
3940     case IPCOP_shmat:
3941         switch (version) {
3942         default:
3943         {
3944             abi_ulong raddr;
3945             raddr = do_shmat(cpu_env, first, ptr, second);
3946             if (is_error(raddr))
3947                 return get_errno(raddr);
3948             if (put_user_ual(raddr, third))
3949                 return -TARGET_EFAULT;
3950             break;
3951         }
3952         case 1:
3953             ret = -TARGET_EINVAL;
3954             break;
3955         }
3956 	break;
3957     case IPCOP_shmdt:
3958         ret = do_shmdt(ptr);
3959 	break;
3960 
3961     case IPCOP_shmget:
3962 	/* IPC_* flag values are the same on all linux platforms */
3963 	ret = get_errno(shmget(first, second, third));
3964 	break;
3965 
3966 	/* IPC_* and SHM_* command values are the same on all linux platforms */
3967     case IPCOP_shmctl:
3968         ret = do_shmctl(first, second, ptr);
3969         break;
3970     default:
3971 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3972 	ret = -TARGET_ENOSYS;
3973 	break;
3974     }
3975     return ret;
3976 }
3977 #endif
3978 
3979 /* kernel structure types definitions */
3980 
3981 #define STRUCT(name, ...) STRUCT_ ## name,
3982 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3983 enum {
3984 #include "syscall_types.h"
3985 STRUCT_MAX
3986 };
3987 #undef STRUCT
3988 #undef STRUCT_SPECIAL
3989 
3990 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
3991 #define STRUCT_SPECIAL(name)
3992 #include "syscall_types.h"
3993 #undef STRUCT
3994 #undef STRUCT_SPECIAL
3995 
3996 typedef struct IOCTLEntry IOCTLEntry;
3997 
3998 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3999                              int fd, int cmd, abi_long arg);
4000 
4001 struct IOCTLEntry {
4002     int target_cmd;
4003     unsigned int host_cmd;
4004     const char *name;
4005     int access;
4006     do_ioctl_fn *do_ioctl;
4007     const argtype arg_type[5];
4008 };
4009 
4010 #define IOC_R 0x0001
4011 #define IOC_W 0x0002
4012 #define IOC_RW (IOC_R | IOC_W)
4013 
4014 #define MAX_STRUCT_SIZE 4096
4015 
4016 #ifdef CONFIG_FIEMAP
4017 /* So fiemap access checks don't overflow on 32 bit systems.
4018  * This is very slightly smaller than the limit imposed by
4019  * the underlying kernel.
4020  */
4021 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4022                             / sizeof(struct fiemap_extent))
4023 
4024 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4025                                        int fd, int cmd, abi_long arg)
4026 {
4027     /* The parameter for this ioctl is a struct fiemap followed
4028      * by an array of struct fiemap_extent whose size is set
4029      * in fiemap->fm_extent_count. The array is filled in by the
4030      * ioctl.
4031      */
4032     int target_size_in, target_size_out;
4033     struct fiemap *fm;
4034     const argtype *arg_type = ie->arg_type;
4035     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4036     void *argptr, *p;
4037     abi_long ret;
4038     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4039     uint32_t outbufsz;
4040     int free_fm = 0;
4041 
4042     assert(arg_type[0] == TYPE_PTR);
4043     assert(ie->access == IOC_RW);
4044     arg_type++;
4045     target_size_in = thunk_type_size(arg_type, 0);
4046     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4047     if (!argptr) {
4048         return -TARGET_EFAULT;
4049     }
4050     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4051     unlock_user(argptr, arg, 0);
4052     fm = (struct fiemap *)buf_temp;
4053     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4054         return -TARGET_EINVAL;
4055     }
4056 
4057     outbufsz = sizeof (*fm) +
4058         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4059 
4060     if (outbufsz > MAX_STRUCT_SIZE) {
4061         /* We can't fit all the extents into the fixed size buffer.
4062          * Allocate one that is large enough and use it instead.
4063          */
4064         fm = g_try_malloc(outbufsz);
4065         if (!fm) {
4066             return -TARGET_ENOMEM;
4067         }
4068         memcpy(fm, buf_temp, sizeof(struct fiemap));
4069         free_fm = 1;
4070     }
4071     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4072     if (!is_error(ret)) {
4073         target_size_out = target_size_in;
4074         /* An extent_count of 0 means we were only counting the extents
4075          * so there are no structs to copy
4076          */
4077         if (fm->fm_extent_count != 0) {
4078             target_size_out += fm->fm_mapped_extents * extent_size;
4079         }
4080         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4081         if (!argptr) {
4082             ret = -TARGET_EFAULT;
4083         } else {
4084             /* Convert the struct fiemap */
4085             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4086             if (fm->fm_extent_count != 0) {
4087                 p = argptr + target_size_in;
4088                 /* ...and then all the struct fiemap_extents */
4089                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4090                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4091                                   THUNK_TARGET);
4092                     p += extent_size;
4093                 }
4094             }
4095             unlock_user(argptr, arg, target_size_out);
4096         }
4097     }
4098     if (free_fm) {
4099         g_free(fm);
4100     }
4101     return ret;
4102 }
4103 #endif
4104 
4105 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4106                                 int fd, int cmd, abi_long arg)
4107 {
4108     const argtype *arg_type = ie->arg_type;
4109     int target_size;
4110     void *argptr;
4111     int ret;
4112     struct ifconf *host_ifconf;
4113     uint32_t outbufsz;
4114     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4115     int target_ifreq_size;
4116     int nb_ifreq;
4117     int free_buf = 0;
4118     int i;
4119     int target_ifc_len;
4120     abi_long target_ifc_buf;
4121     int host_ifc_len;
4122     char *host_ifc_buf;
4123 
4124     assert(arg_type[0] == TYPE_PTR);
4125     assert(ie->access == IOC_RW);
4126 
4127     arg_type++;
4128     target_size = thunk_type_size(arg_type, 0);
4129 
4130     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4131     if (!argptr)
4132         return -TARGET_EFAULT;
4133     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4134     unlock_user(argptr, arg, 0);
4135 
4136     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4137     target_ifc_len = host_ifconf->ifc_len;
4138     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4139 
4140     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4141     nb_ifreq = target_ifc_len / target_ifreq_size;
4142     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4143 
4144     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4145     if (outbufsz > MAX_STRUCT_SIZE) {
4146         /* We can't fit all the extents into the fixed size buffer.
4147          * Allocate one that is large enough and use it instead.
4148          */
4149         host_ifconf = malloc(outbufsz);
4150         if (!host_ifconf) {
4151             return -TARGET_ENOMEM;
4152         }
4153         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4154         free_buf = 1;
4155     }
4156     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4157 
4158     host_ifconf->ifc_len = host_ifc_len;
4159     host_ifconf->ifc_buf = host_ifc_buf;
4160 
4161     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4162     if (!is_error(ret)) {
4163 	/* convert host ifc_len to target ifc_len */
4164 
4165         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4166         target_ifc_len = nb_ifreq * target_ifreq_size;
4167         host_ifconf->ifc_len = target_ifc_len;
4168 
4169 	/* restore target ifc_buf */
4170 
4171         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4172 
4173 	/* copy struct ifconf to target user */
4174 
4175         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4176         if (!argptr)
4177             return -TARGET_EFAULT;
4178         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4179         unlock_user(argptr, arg, target_size);
4180 
4181 	/* copy ifreq[] to target user */
4182 
4183         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4184         for (i = 0; i < nb_ifreq ; i++) {
4185             thunk_convert(argptr + i * target_ifreq_size,
4186                           host_ifc_buf + i * sizeof(struct ifreq),
4187                           ifreq_arg_type, THUNK_TARGET);
4188         }
4189         unlock_user(argptr, target_ifc_buf, target_ifc_len);
4190     }
4191 
4192     if (free_buf) {
4193         free(host_ifconf);
4194     }
4195 
4196     return ret;
4197 }
4198 
4199 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4200                             int cmd, abi_long arg)
4201 {
4202     void *argptr;
4203     struct dm_ioctl *host_dm;
4204     abi_long guest_data;
4205     uint32_t guest_data_size;
4206     int target_size;
4207     const argtype *arg_type = ie->arg_type;
4208     abi_long ret;
4209     void *big_buf = NULL;
4210     char *host_data;
4211 
4212     arg_type++;
4213     target_size = thunk_type_size(arg_type, 0);
4214     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4215     if (!argptr) {
4216         ret = -TARGET_EFAULT;
4217         goto out;
4218     }
4219     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4220     unlock_user(argptr, arg, 0);
4221 
4222     /* buf_temp is too small, so fetch things into a bigger buffer */
4223     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4224     memcpy(big_buf, buf_temp, target_size);
4225     buf_temp = big_buf;
4226     host_dm = big_buf;
4227 
4228     guest_data = arg + host_dm->data_start;
4229     if ((guest_data - arg) < 0) {
4230         ret = -TARGET_EINVAL;
4231         goto out;
4232     }
4233     guest_data_size = host_dm->data_size - host_dm->data_start;
4234     host_data = (char*)host_dm + host_dm->data_start;
4235 
4236     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4237     if (!argptr) {
4238         ret = -TARGET_EFAULT;
4239         goto out;
4240     }
4241 
4242     switch (ie->host_cmd) {
4243     case DM_REMOVE_ALL:
4244     case DM_LIST_DEVICES:
4245     case DM_DEV_CREATE:
4246     case DM_DEV_REMOVE:
4247     case DM_DEV_SUSPEND:
4248     case DM_DEV_STATUS:
4249     case DM_DEV_WAIT:
4250     case DM_TABLE_STATUS:
4251     case DM_TABLE_CLEAR:
4252     case DM_TABLE_DEPS:
4253     case DM_LIST_VERSIONS:
4254         /* no input data */
4255         break;
4256     case DM_DEV_RENAME:
4257     case DM_DEV_SET_GEOMETRY:
4258         /* data contains only strings */
4259         memcpy(host_data, argptr, guest_data_size);
4260         break;
4261     case DM_TARGET_MSG:
4262         memcpy(host_data, argptr, guest_data_size);
4263         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4264         break;
4265     case DM_TABLE_LOAD:
4266     {
4267         void *gspec = argptr;
4268         void *cur_data = host_data;
4269         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4270         int spec_size = thunk_type_size(arg_type, 0);
4271         int i;
4272 
4273         for (i = 0; i < host_dm->target_count; i++) {
4274             struct dm_target_spec *spec = cur_data;
4275             uint32_t next;
4276             int slen;
4277 
4278             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4279             slen = strlen((char*)gspec + spec_size) + 1;
4280             next = spec->next;
4281             spec->next = sizeof(*spec) + slen;
4282             strcpy((char*)&spec[1], gspec + spec_size);
4283             gspec += next;
4284             cur_data += spec->next;
4285         }
4286         break;
4287     }
4288     default:
4289         ret = -TARGET_EINVAL;
4290         unlock_user(argptr, guest_data, 0);
4291         goto out;
4292     }
4293     unlock_user(argptr, guest_data, 0);
4294 
4295     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4296     if (!is_error(ret)) {
4297         guest_data = arg + host_dm->data_start;
4298         guest_data_size = host_dm->data_size - host_dm->data_start;
4299         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4300         switch (ie->host_cmd) {
4301         case DM_REMOVE_ALL:
4302         case DM_DEV_CREATE:
4303         case DM_DEV_REMOVE:
4304         case DM_DEV_RENAME:
4305         case DM_DEV_SUSPEND:
4306         case DM_DEV_STATUS:
4307         case DM_TABLE_LOAD:
4308         case DM_TABLE_CLEAR:
4309         case DM_TARGET_MSG:
4310         case DM_DEV_SET_GEOMETRY:
4311             /* no return data */
4312             break;
4313         case DM_LIST_DEVICES:
4314         {
4315             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4316             uint32_t remaining_data = guest_data_size;
4317             void *cur_data = argptr;
4318             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4319             int nl_size = 12; /* can't use thunk_size due to alignment */
4320 
4321             while (1) {
4322                 uint32_t next = nl->next;
4323                 if (next) {
4324                     nl->next = nl_size + (strlen(nl->name) + 1);
4325                 }
4326                 if (remaining_data < nl->next) {
4327                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4328                     break;
4329                 }
4330                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4331                 strcpy(cur_data + nl_size, nl->name);
4332                 cur_data += nl->next;
4333                 remaining_data -= nl->next;
4334                 if (!next) {
4335                     break;
4336                 }
4337                 nl = (void*)nl + next;
4338             }
4339             break;
4340         }
4341         case DM_DEV_WAIT:
4342         case DM_TABLE_STATUS:
4343         {
4344             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4345             void *cur_data = argptr;
4346             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4347             int spec_size = thunk_type_size(arg_type, 0);
4348             int i;
4349 
4350             for (i = 0; i < host_dm->target_count; i++) {
4351                 uint32_t next = spec->next;
4352                 int slen = strlen((char*)&spec[1]) + 1;
4353                 spec->next = (cur_data - argptr) + spec_size + slen;
4354                 if (guest_data_size < spec->next) {
4355                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4356                     break;
4357                 }
4358                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4359                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4360                 cur_data = argptr + spec->next;
4361                 spec = (void*)host_dm + host_dm->data_start + next;
4362             }
4363             break;
4364         }
4365         case DM_TABLE_DEPS:
4366         {
4367             void *hdata = (void*)host_dm + host_dm->data_start;
4368             int count = *(uint32_t*)hdata;
4369             uint64_t *hdev = hdata + 8;
4370             uint64_t *gdev = argptr + 8;
4371             int i;
4372 
4373             *(uint32_t*)argptr = tswap32(count);
4374             for (i = 0; i < count; i++) {
4375                 *gdev = tswap64(*hdev);
4376                 gdev++;
4377                 hdev++;
4378             }
4379             break;
4380         }
4381         case DM_LIST_VERSIONS:
4382         {
4383             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4384             uint32_t remaining_data = guest_data_size;
4385             void *cur_data = argptr;
4386             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4387             int vers_size = thunk_type_size(arg_type, 0);
4388 
4389             while (1) {
4390                 uint32_t next = vers->next;
4391                 if (next) {
4392                     vers->next = vers_size + (strlen(vers->name) + 1);
4393                 }
4394                 if (remaining_data < vers->next) {
4395                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4396                     break;
4397                 }
4398                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4399                 strcpy(cur_data + vers_size, vers->name);
4400                 cur_data += vers->next;
4401                 remaining_data -= vers->next;
4402                 if (!next) {
4403                     break;
4404                 }
4405                 vers = (void*)vers + next;
4406             }
4407             break;
4408         }
4409         default:
4410             unlock_user(argptr, guest_data, 0);
4411             ret = -TARGET_EINVAL;
4412             goto out;
4413         }
4414         unlock_user(argptr, guest_data, guest_data_size);
4415 
4416         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4417         if (!argptr) {
4418             ret = -TARGET_EFAULT;
4419             goto out;
4420         }
4421         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4422         unlock_user(argptr, arg, target_size);
4423     }
4424 out:
4425     g_free(big_buf);
4426     return ret;
4427 }
4428 
4429 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4430                                int cmd, abi_long arg)
4431 {
4432     void *argptr;
4433     int target_size;
4434     const argtype *arg_type = ie->arg_type;
4435     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4436     abi_long ret;
4437 
4438     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4439     struct blkpg_partition host_part;
4440 
4441     /* Read and convert blkpg */
4442     arg_type++;
4443     target_size = thunk_type_size(arg_type, 0);
4444     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4445     if (!argptr) {
4446         ret = -TARGET_EFAULT;
4447         goto out;
4448     }
4449     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4450     unlock_user(argptr, arg, 0);
4451 
4452     switch (host_blkpg->op) {
4453     case BLKPG_ADD_PARTITION:
4454     case BLKPG_DEL_PARTITION:
4455         /* payload is struct blkpg_partition */
4456         break;
4457     default:
4458         /* Unknown opcode */
4459         ret = -TARGET_EINVAL;
4460         goto out;
4461     }
4462 
4463     /* Read and convert blkpg->data */
4464     arg = (abi_long)(uintptr_t)host_blkpg->data;
4465     target_size = thunk_type_size(part_arg_type, 0);
4466     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4467     if (!argptr) {
4468         ret = -TARGET_EFAULT;
4469         goto out;
4470     }
4471     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4472     unlock_user(argptr, arg, 0);
4473 
4474     /* Swizzle the data pointer to our local copy and call! */
4475     host_blkpg->data = &host_part;
4476     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4477 
4478 out:
4479     return ret;
4480 }
4481 
4482 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4483                                 int fd, int cmd, abi_long arg)
4484 {
4485     const argtype *arg_type = ie->arg_type;
4486     const StructEntry *se;
4487     const argtype *field_types;
4488     const int *dst_offsets, *src_offsets;
4489     int target_size;
4490     void *argptr;
4491     abi_ulong *target_rt_dev_ptr;
4492     unsigned long *host_rt_dev_ptr;
4493     abi_long ret;
4494     int i;
4495 
4496     assert(ie->access == IOC_W);
4497     assert(*arg_type == TYPE_PTR);
4498     arg_type++;
4499     assert(*arg_type == TYPE_STRUCT);
4500     target_size = thunk_type_size(arg_type, 0);
4501     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4502     if (!argptr) {
4503         return -TARGET_EFAULT;
4504     }
4505     arg_type++;
4506     assert(*arg_type == (int)STRUCT_rtentry);
4507     se = struct_entries + *arg_type++;
4508     assert(se->convert[0] == NULL);
4509     /* convert struct here to be able to catch rt_dev string */
4510     field_types = se->field_types;
4511     dst_offsets = se->field_offsets[THUNK_HOST];
4512     src_offsets = se->field_offsets[THUNK_TARGET];
4513     for (i = 0; i < se->nb_fields; i++) {
4514         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4515             assert(*field_types == TYPE_PTRVOID);
4516             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4517             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4518             if (*target_rt_dev_ptr != 0) {
4519                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4520                                                   tswapal(*target_rt_dev_ptr));
4521                 if (!*host_rt_dev_ptr) {
4522                     unlock_user(argptr, arg, 0);
4523                     return -TARGET_EFAULT;
4524                 }
4525             } else {
4526                 *host_rt_dev_ptr = 0;
4527             }
4528             field_types++;
4529             continue;
4530         }
4531         field_types = thunk_convert(buf_temp + dst_offsets[i],
4532                                     argptr + src_offsets[i],
4533                                     field_types, THUNK_HOST);
4534     }
4535     unlock_user(argptr, arg, 0);
4536 
4537     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4538     if (*host_rt_dev_ptr != 0) {
4539         unlock_user((void *)*host_rt_dev_ptr,
4540                     *target_rt_dev_ptr, 0);
4541     }
4542     return ret;
4543 }
4544 
4545 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4546                                      int fd, int cmd, abi_long arg)
4547 {
4548     int sig = target_to_host_signal(arg);
4549     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4550 }
4551 
4552 #ifdef TIOCGPTPEER
4553 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4554                                      int fd, int cmd, abi_long arg)
4555 {
4556     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4557     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4558 }
4559 #endif
4560 
4561 static IOCTLEntry ioctl_entries[] = {
4562 #define IOCTL(cmd, access, ...) \
4563     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4564 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4565     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4566 #define IOCTL_IGNORE(cmd) \
4567     { TARGET_ ## cmd, 0, #cmd },
4568 #include "ioctls.h"
4569     { 0, 0, },
4570 };
4571 
4572 /* ??? Implement proper locking for ioctls.  */
4573 /* do_ioctl() Must return target values and target errnos. */
4574 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4575 {
4576     const IOCTLEntry *ie;
4577     const argtype *arg_type;
4578     abi_long ret;
4579     uint8_t buf_temp[MAX_STRUCT_SIZE];
4580     int target_size;
4581     void *argptr;
4582 
4583     ie = ioctl_entries;
4584     for(;;) {
4585         if (ie->target_cmd == 0) {
4586             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4587             return -TARGET_ENOSYS;
4588         }
4589         if (ie->target_cmd == cmd)
4590             break;
4591         ie++;
4592     }
4593     arg_type = ie->arg_type;
4594     if (ie->do_ioctl) {
4595         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4596     } else if (!ie->host_cmd) {
4597         /* Some architectures define BSD ioctls in their headers
4598            that are not implemented in Linux.  */
4599         return -TARGET_ENOSYS;
4600     }
4601 
4602     switch(arg_type[0]) {
4603     case TYPE_NULL:
4604         /* no argument */
4605         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4606         break;
4607     case TYPE_PTRVOID:
4608     case TYPE_INT:
4609         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4610         break;
4611     case TYPE_PTR:
4612         arg_type++;
4613         target_size = thunk_type_size(arg_type, 0);
4614         switch(ie->access) {
4615         case IOC_R:
4616             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4617             if (!is_error(ret)) {
4618                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4619                 if (!argptr)
4620                     return -TARGET_EFAULT;
4621                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4622                 unlock_user(argptr, arg, target_size);
4623             }
4624             break;
4625         case IOC_W:
4626             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4627             if (!argptr)
4628                 return -TARGET_EFAULT;
4629             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4630             unlock_user(argptr, arg, 0);
4631             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4632             break;
4633         default:
4634         case IOC_RW:
4635             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4636             if (!argptr)
4637                 return -TARGET_EFAULT;
4638             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4639             unlock_user(argptr, arg, 0);
4640             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4641             if (!is_error(ret)) {
4642                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4643                 if (!argptr)
4644                     return -TARGET_EFAULT;
4645                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4646                 unlock_user(argptr, arg, target_size);
4647             }
4648             break;
4649         }
4650         break;
4651     default:
4652         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4653                  (long)cmd, arg_type[0]);
4654         ret = -TARGET_ENOSYS;
4655         break;
4656     }
4657     return ret;
4658 }
4659 
4660 static const bitmask_transtbl iflag_tbl[] = {
4661         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4662         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4663         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4664         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4665         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4666         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4667         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4668         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4669         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4670         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4671         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4672         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4673         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4674         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4675         { 0, 0, 0, 0 }
4676 };
4677 
4678 static const bitmask_transtbl oflag_tbl[] = {
4679 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4680 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4681 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4682 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4683 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4684 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4685 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4686 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4687 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4688 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4689 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4690 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4691 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4692 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4693 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4694 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4695 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4696 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4697 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4698 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4699 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4700 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4701 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4702 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4703 	{ 0, 0, 0, 0 }
4704 };
4705 
4706 static const bitmask_transtbl cflag_tbl[] = {
4707 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4708 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4709 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4710 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4711 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4712 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4713 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4714 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4715 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4716 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4717 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4718 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4719 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4720 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4721 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4722 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4723 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4724 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4725 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4726 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4727 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4728 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4729 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4730 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4731 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4732 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4733 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4734 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4735 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4736 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4737 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4738 	{ 0, 0, 0, 0 }
4739 };
4740 
4741 static const bitmask_transtbl lflag_tbl[] = {
4742 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4743 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4744 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4745 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4746 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4747 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4748 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4749 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4750 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4751 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4752 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4753 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4754 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4755 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4756 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4757 	{ 0, 0, 0, 0 }
4758 };
4759 
4760 static void target_to_host_termios (void *dst, const void *src)
4761 {
4762     struct host_termios *host = dst;
4763     const struct target_termios *target = src;
4764 
4765     host->c_iflag =
4766         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4767     host->c_oflag =
4768         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4769     host->c_cflag =
4770         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4771     host->c_lflag =
4772         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4773     host->c_line = target->c_line;
4774 
4775     memset(host->c_cc, 0, sizeof(host->c_cc));
4776     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4777     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4778     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4779     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4780     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4781     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4782     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4783     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4784     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4785     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4786     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4787     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4788     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4789     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4790     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4791     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4792     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4793 }
4794 
4795 static void host_to_target_termios (void *dst, const void *src)
4796 {
4797     struct target_termios *target = dst;
4798     const struct host_termios *host = src;
4799 
4800     target->c_iflag =
4801         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4802     target->c_oflag =
4803         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4804     target->c_cflag =
4805         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4806     target->c_lflag =
4807         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4808     target->c_line = host->c_line;
4809 
4810     memset(target->c_cc, 0, sizeof(target->c_cc));
4811     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4812     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4813     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4814     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4815     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4816     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4817     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4818     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4819     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4820     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4821     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4822     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4823     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4824     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4825     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4826     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4827     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4828 }
4829 
4830 static const StructEntry struct_termios_def = {
4831     .convert = { host_to_target_termios, target_to_host_termios },
4832     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4833     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4834 };
4835 
4836 static bitmask_transtbl mmap_flags_tbl[] = {
4837     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4838     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4839     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4840     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
4841       MAP_ANONYMOUS, MAP_ANONYMOUS },
4842     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
4843       MAP_GROWSDOWN, MAP_GROWSDOWN },
4844     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
4845       MAP_DENYWRITE, MAP_DENYWRITE },
4846     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
4847       MAP_EXECUTABLE, MAP_EXECUTABLE },
4848     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4849     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
4850       MAP_NORESERVE, MAP_NORESERVE },
4851     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
4852     /* MAP_STACK had been ignored by the kernel for quite some time.
4853        Recognize it for the target insofar as we do not want to pass
4854        it through to the host.  */
4855     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
4856     { 0, 0, 0, 0 }
4857 };
4858 
4859 #if defined(TARGET_I386)
4860 
4861 /* NOTE: there is really one LDT for all the threads */
4862 static uint8_t *ldt_table;
4863 
4864 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4865 {
4866     int size;
4867     void *p;
4868 
4869     if (!ldt_table)
4870         return 0;
4871     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4872     if (size > bytecount)
4873         size = bytecount;
4874     p = lock_user(VERIFY_WRITE, ptr, size, 0);
4875     if (!p)
4876         return -TARGET_EFAULT;
4877     /* ??? Should this by byteswapped?  */
4878     memcpy(p, ldt_table, size);
4879     unlock_user(p, ptr, size);
4880     return size;
4881 }
4882 
4883 /* XXX: add locking support */
4884 static abi_long write_ldt(CPUX86State *env,
4885                           abi_ulong ptr, unsigned long bytecount, int oldmode)
4886 {
4887     struct target_modify_ldt_ldt_s ldt_info;
4888     struct target_modify_ldt_ldt_s *target_ldt_info;
4889     int seg_32bit, contents, read_exec_only, limit_in_pages;
4890     int seg_not_present, useable, lm;
4891     uint32_t *lp, entry_1, entry_2;
4892 
4893     if (bytecount != sizeof(ldt_info))
4894         return -TARGET_EINVAL;
4895     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4896         return -TARGET_EFAULT;
4897     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4898     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4899     ldt_info.limit = tswap32(target_ldt_info->limit);
4900     ldt_info.flags = tswap32(target_ldt_info->flags);
4901     unlock_user_struct(target_ldt_info, ptr, 0);
4902 
4903     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4904         return -TARGET_EINVAL;
4905     seg_32bit = ldt_info.flags & 1;
4906     contents = (ldt_info.flags >> 1) & 3;
4907     read_exec_only = (ldt_info.flags >> 3) & 1;
4908     limit_in_pages = (ldt_info.flags >> 4) & 1;
4909     seg_not_present = (ldt_info.flags >> 5) & 1;
4910     useable = (ldt_info.flags >> 6) & 1;
4911 #ifdef TARGET_ABI32
4912     lm = 0;
4913 #else
4914     lm = (ldt_info.flags >> 7) & 1;
4915 #endif
4916     if (contents == 3) {
4917         if (oldmode)
4918             return -TARGET_EINVAL;
4919         if (seg_not_present == 0)
4920             return -TARGET_EINVAL;
4921     }
4922     /* allocate the LDT */
4923     if (!ldt_table) {
4924         env->ldt.base = target_mmap(0,
4925                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4926                                     PROT_READ|PROT_WRITE,
4927                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4928         if (env->ldt.base == -1)
4929             return -TARGET_ENOMEM;
4930         memset(g2h(env->ldt.base), 0,
4931                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4932         env->ldt.limit = 0xffff;
4933         ldt_table = g2h(env->ldt.base);
4934     }
4935 
4936     /* NOTE: same code as Linux kernel */
4937     /* Allow LDTs to be cleared by the user. */
4938     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4939         if (oldmode ||
4940             (contents == 0		&&
4941              read_exec_only == 1	&&
4942              seg_32bit == 0		&&
4943              limit_in_pages == 0	&&
4944              seg_not_present == 1	&&
4945              useable == 0 )) {
4946             entry_1 = 0;
4947             entry_2 = 0;
4948             goto install;
4949         }
4950     }
4951 
4952     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4953         (ldt_info.limit & 0x0ffff);
4954     entry_2 = (ldt_info.base_addr & 0xff000000) |
4955         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4956         (ldt_info.limit & 0xf0000) |
4957         ((read_exec_only ^ 1) << 9) |
4958         (contents << 10) |
4959         ((seg_not_present ^ 1) << 15) |
4960         (seg_32bit << 22) |
4961         (limit_in_pages << 23) |
4962         (lm << 21) |
4963         0x7000;
4964     if (!oldmode)
4965         entry_2 |= (useable << 20);
4966 
4967     /* Install the new entry ...  */
4968 install:
4969     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4970     lp[0] = tswap32(entry_1);
4971     lp[1] = tswap32(entry_2);
4972     return 0;
4973 }
4974 
4975 /* specific and weird i386 syscalls */
4976 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4977                               unsigned long bytecount)
4978 {
4979     abi_long ret;
4980 
4981     switch (func) {
4982     case 0:
4983         ret = read_ldt(ptr, bytecount);
4984         break;
4985     case 1:
4986         ret = write_ldt(env, ptr, bytecount, 1);
4987         break;
4988     case 0x11:
4989         ret = write_ldt(env, ptr, bytecount, 0);
4990         break;
4991     default:
4992         ret = -TARGET_ENOSYS;
4993         break;
4994     }
4995     return ret;
4996 }
4997 
4998 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4999 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5000 {
5001     uint64_t *gdt_table = g2h(env->gdt.base);
5002     struct target_modify_ldt_ldt_s ldt_info;
5003     struct target_modify_ldt_ldt_s *target_ldt_info;
5004     int seg_32bit, contents, read_exec_only, limit_in_pages;
5005     int seg_not_present, useable, lm;
5006     uint32_t *lp, entry_1, entry_2;
5007     int i;
5008 
5009     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5010     if (!target_ldt_info)
5011         return -TARGET_EFAULT;
5012     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5013     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5014     ldt_info.limit = tswap32(target_ldt_info->limit);
5015     ldt_info.flags = tswap32(target_ldt_info->flags);
5016     if (ldt_info.entry_number == -1) {
5017         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5018             if (gdt_table[i] == 0) {
5019                 ldt_info.entry_number = i;
5020                 target_ldt_info->entry_number = tswap32(i);
5021                 break;
5022             }
5023         }
5024     }
5025     unlock_user_struct(target_ldt_info, ptr, 1);
5026 
5027     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5028         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5029            return -TARGET_EINVAL;
5030     seg_32bit = ldt_info.flags & 1;
5031     contents = (ldt_info.flags >> 1) & 3;
5032     read_exec_only = (ldt_info.flags >> 3) & 1;
5033     limit_in_pages = (ldt_info.flags >> 4) & 1;
5034     seg_not_present = (ldt_info.flags >> 5) & 1;
5035     useable = (ldt_info.flags >> 6) & 1;
5036 #ifdef TARGET_ABI32
5037     lm = 0;
5038 #else
5039     lm = (ldt_info.flags >> 7) & 1;
5040 #endif
5041 
5042     if (contents == 3) {
5043         if (seg_not_present == 0)
5044             return -TARGET_EINVAL;
5045     }
5046 
5047     /* NOTE: same code as Linux kernel */
5048     /* Allow LDTs to be cleared by the user. */
5049     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5050         if ((contents == 0             &&
5051              read_exec_only == 1       &&
5052              seg_32bit == 0            &&
5053              limit_in_pages == 0       &&
5054              seg_not_present == 1      &&
5055              useable == 0 )) {
5056             entry_1 = 0;
5057             entry_2 = 0;
5058             goto install;
5059         }
5060     }
5061 
5062     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5063         (ldt_info.limit & 0x0ffff);
5064     entry_2 = (ldt_info.base_addr & 0xff000000) |
5065         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5066         (ldt_info.limit & 0xf0000) |
5067         ((read_exec_only ^ 1) << 9) |
5068         (contents << 10) |
5069         ((seg_not_present ^ 1) << 15) |
5070         (seg_32bit << 22) |
5071         (limit_in_pages << 23) |
5072         (useable << 20) |
5073         (lm << 21) |
5074         0x7000;
5075 
5076     /* Install the new entry ...  */
5077 install:
5078     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5079     lp[0] = tswap32(entry_1);
5080     lp[1] = tswap32(entry_2);
5081     return 0;
5082 }
5083 
5084 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5085 {
5086     struct target_modify_ldt_ldt_s *target_ldt_info;
5087     uint64_t *gdt_table = g2h(env->gdt.base);
5088     uint32_t base_addr, limit, flags;
5089     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5090     int seg_not_present, useable, lm;
5091     uint32_t *lp, entry_1, entry_2;
5092 
5093     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5094     if (!target_ldt_info)
5095         return -TARGET_EFAULT;
5096     idx = tswap32(target_ldt_info->entry_number);
5097     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5098         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5099         unlock_user_struct(target_ldt_info, ptr, 1);
5100         return -TARGET_EINVAL;
5101     }
5102     lp = (uint32_t *)(gdt_table + idx);
5103     entry_1 = tswap32(lp[0]);
5104     entry_2 = tswap32(lp[1]);
5105 
5106     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5107     contents = (entry_2 >> 10) & 3;
5108     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5109     seg_32bit = (entry_2 >> 22) & 1;
5110     limit_in_pages = (entry_2 >> 23) & 1;
5111     useable = (entry_2 >> 20) & 1;
5112 #ifdef TARGET_ABI32
5113     lm = 0;
5114 #else
5115     lm = (entry_2 >> 21) & 1;
5116 #endif
5117     flags = (seg_32bit << 0) | (contents << 1) |
5118         (read_exec_only << 3) | (limit_in_pages << 4) |
5119         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5120     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5121     base_addr = (entry_1 >> 16) |
5122         (entry_2 & 0xff000000) |
5123         ((entry_2 & 0xff) << 16);
5124     target_ldt_info->base_addr = tswapal(base_addr);
5125     target_ldt_info->limit = tswap32(limit);
5126     target_ldt_info->flags = tswap32(flags);
5127     unlock_user_struct(target_ldt_info, ptr, 1);
5128     return 0;
5129 }
5130 #endif /* TARGET_I386 && TARGET_ABI32 */
5131 
5132 #ifndef TARGET_ABI32
5133 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5134 {
5135     abi_long ret = 0;
5136     abi_ulong val;
5137     int idx;
5138 
5139     switch(code) {
5140     case TARGET_ARCH_SET_GS:
5141     case TARGET_ARCH_SET_FS:
5142         if (code == TARGET_ARCH_SET_GS)
5143             idx = R_GS;
5144         else
5145             idx = R_FS;
5146         cpu_x86_load_seg(env, idx, 0);
5147         env->segs[idx].base = addr;
5148         break;
5149     case TARGET_ARCH_GET_GS:
5150     case TARGET_ARCH_GET_FS:
5151         if (code == TARGET_ARCH_GET_GS)
5152             idx = R_GS;
5153         else
5154             idx = R_FS;
5155         val = env->segs[idx].base;
5156         if (put_user(val, addr, abi_ulong))
5157             ret = -TARGET_EFAULT;
5158         break;
5159     default:
5160         ret = -TARGET_EINVAL;
5161         break;
5162     }
5163     return ret;
5164 }
5165 #endif
5166 
5167 #endif /* defined(TARGET_I386) */
5168 
5169 #define NEW_STACK_SIZE 0x40000
5170 
5171 
5172 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5173 typedef struct {
5174     CPUArchState *env;
5175     pthread_mutex_t mutex;
5176     pthread_cond_t cond;
5177     pthread_t thread;
5178     uint32_t tid;
5179     abi_ulong child_tidptr;
5180     abi_ulong parent_tidptr;
5181     sigset_t sigmask;
5182 } new_thread_info;
5183 
5184 static void *clone_func(void *arg)
5185 {
5186     new_thread_info *info = arg;
5187     CPUArchState *env;
5188     CPUState *cpu;
5189     TaskState *ts;
5190 
5191     rcu_register_thread();
5192     tcg_register_thread();
5193     env = info->env;
5194     cpu = ENV_GET_CPU(env);
5195     thread_cpu = cpu;
5196     ts = (TaskState *)cpu->opaque;
5197     info->tid = gettid();
5198     task_settid(ts);
5199     if (info->child_tidptr)
5200         put_user_u32(info->tid, info->child_tidptr);
5201     if (info->parent_tidptr)
5202         put_user_u32(info->tid, info->parent_tidptr);
5203     /* Enable signals.  */
5204     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5205     /* Signal to the parent that we're ready.  */
5206     pthread_mutex_lock(&info->mutex);
5207     pthread_cond_broadcast(&info->cond);
5208     pthread_mutex_unlock(&info->mutex);
5209     /* Wait until the parent has finished initializing the tls state.  */
5210     pthread_mutex_lock(&clone_lock);
5211     pthread_mutex_unlock(&clone_lock);
5212     cpu_loop(env);
5213     /* never exits */
5214     return NULL;
5215 }
5216 
5217 /* do_fork() Must return host values and target errnos (unlike most
5218    do_*() functions). */
5219 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5220                    abi_ulong parent_tidptr, target_ulong newtls,
5221                    abi_ulong child_tidptr)
5222 {
5223     CPUState *cpu = ENV_GET_CPU(env);
5224     int ret;
5225     TaskState *ts;
5226     CPUState *new_cpu;
5227     CPUArchState *new_env;
5228     sigset_t sigmask;
5229 
5230     flags &= ~CLONE_IGNORED_FLAGS;
5231 
5232     /* Emulate vfork() with fork() */
5233     if (flags & CLONE_VFORK)
5234         flags &= ~(CLONE_VFORK | CLONE_VM);
5235 
5236     if (flags & CLONE_VM) {
5237         TaskState *parent_ts = (TaskState *)cpu->opaque;
5238         new_thread_info info;
5239         pthread_attr_t attr;
5240 
5241         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5242             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5243             return -TARGET_EINVAL;
5244         }
5245 
5246         ts = g_new0(TaskState, 1);
5247         init_task_state(ts);
5248 
5249         /* Grab a mutex so that thread setup appears atomic.  */
5250         pthread_mutex_lock(&clone_lock);
5251 
5252         /* we create a new CPU instance. */
5253         new_env = cpu_copy(env);
5254         /* Init regs that differ from the parent.  */
5255         cpu_clone_regs(new_env, newsp);
5256         new_cpu = ENV_GET_CPU(new_env);
5257         new_cpu->opaque = ts;
5258         ts->bprm = parent_ts->bprm;
5259         ts->info = parent_ts->info;
5260         ts->signal_mask = parent_ts->signal_mask;
5261 
5262         if (flags & CLONE_CHILD_CLEARTID) {
5263             ts->child_tidptr = child_tidptr;
5264         }
5265 
5266         if (flags & CLONE_SETTLS) {
5267             cpu_set_tls (new_env, newtls);
5268         }
5269 
5270         memset(&info, 0, sizeof(info));
5271         pthread_mutex_init(&info.mutex, NULL);
5272         pthread_mutex_lock(&info.mutex);
5273         pthread_cond_init(&info.cond, NULL);
5274         info.env = new_env;
5275         if (flags & CLONE_CHILD_SETTID) {
5276             info.child_tidptr = child_tidptr;
5277         }
5278         if (flags & CLONE_PARENT_SETTID) {
5279             info.parent_tidptr = parent_tidptr;
5280         }
5281 
5282         ret = pthread_attr_init(&attr);
5283         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5284         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5285         /* It is not safe to deliver signals until the child has finished
5286            initializing, so temporarily block all signals.  */
5287         sigfillset(&sigmask);
5288         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5289 
5290         /* If this is our first additional thread, we need to ensure we
5291          * generate code for parallel execution and flush old translations.
5292          */
5293         if (!parallel_cpus) {
5294             parallel_cpus = true;
5295             tb_flush(cpu);
5296         }
5297 
5298         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5299         /* TODO: Free new CPU state if thread creation failed.  */
5300 
5301         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5302         pthread_attr_destroy(&attr);
5303         if (ret == 0) {
5304             /* Wait for the child to initialize.  */
5305             pthread_cond_wait(&info.cond, &info.mutex);
5306             ret = info.tid;
5307         } else {
5308             ret = -1;
5309         }
5310         pthread_mutex_unlock(&info.mutex);
5311         pthread_cond_destroy(&info.cond);
5312         pthread_mutex_destroy(&info.mutex);
5313         pthread_mutex_unlock(&clone_lock);
5314     } else {
5315         /* if no CLONE_VM, we consider it is a fork */
5316         if (flags & CLONE_INVALID_FORK_FLAGS) {
5317             return -TARGET_EINVAL;
5318         }
5319 
5320         /* We can't support custom termination signals */
5321         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5322             return -TARGET_EINVAL;
5323         }
5324 
5325         if (block_signals()) {
5326             return -TARGET_ERESTARTSYS;
5327         }
5328 
5329         fork_start();
5330         ret = fork();
5331         if (ret == 0) {
5332             /* Child Process.  */
5333             cpu_clone_regs(env, newsp);
5334             fork_end(1);
5335             /* There is a race condition here.  The parent process could
5336                theoretically read the TID in the child process before the child
5337                tid is set.  This would require using either ptrace
5338                (not implemented) or having *_tidptr to point at a shared memory
5339                mapping.  We can't repeat the spinlock hack used above because
5340                the child process gets its own copy of the lock.  */
5341             if (flags & CLONE_CHILD_SETTID)
5342                 put_user_u32(gettid(), child_tidptr);
5343             if (flags & CLONE_PARENT_SETTID)
5344                 put_user_u32(gettid(), parent_tidptr);
5345             ts = (TaskState *)cpu->opaque;
5346             if (flags & CLONE_SETTLS)
5347                 cpu_set_tls (env, newtls);
5348             if (flags & CLONE_CHILD_CLEARTID)
5349                 ts->child_tidptr = child_tidptr;
5350         } else {
5351             fork_end(0);
5352         }
5353     }
5354     return ret;
5355 }
5356 
5357 /* warning : doesn't handle linux specific flags... */
5358 static int target_to_host_fcntl_cmd(int cmd)
5359 {
5360     int ret;
5361 
5362     switch(cmd) {
5363     case TARGET_F_DUPFD:
5364     case TARGET_F_GETFD:
5365     case TARGET_F_SETFD:
5366     case TARGET_F_GETFL:
5367     case TARGET_F_SETFL:
5368         ret = cmd;
5369         break;
5370     case TARGET_F_GETLK:
5371         ret = F_GETLK64;
5372         break;
5373     case TARGET_F_SETLK:
5374         ret = F_SETLK64;
5375         break;
5376     case TARGET_F_SETLKW:
5377         ret = F_SETLKW64;
5378         break;
5379     case TARGET_F_GETOWN:
5380         ret = F_GETOWN;
5381         break;
5382     case TARGET_F_SETOWN:
5383         ret = F_SETOWN;
5384         break;
5385     case TARGET_F_GETSIG:
5386         ret = F_GETSIG;
5387         break;
5388     case TARGET_F_SETSIG:
5389         ret = F_SETSIG;
5390         break;
5391 #if TARGET_ABI_BITS == 32
5392     case TARGET_F_GETLK64:
5393         ret = F_GETLK64;
5394         break;
5395     case TARGET_F_SETLK64:
5396         ret = F_SETLK64;
5397         break;
5398     case TARGET_F_SETLKW64:
5399         ret = F_SETLKW64;
5400         break;
5401 #endif
5402     case TARGET_F_SETLEASE:
5403         ret = F_SETLEASE;
5404         break;
5405     case TARGET_F_GETLEASE:
5406         ret = F_GETLEASE;
5407         break;
5408 #ifdef F_DUPFD_CLOEXEC
5409     case TARGET_F_DUPFD_CLOEXEC:
5410         ret = F_DUPFD_CLOEXEC;
5411         break;
5412 #endif
5413     case TARGET_F_NOTIFY:
5414         ret = F_NOTIFY;
5415         break;
5416 #ifdef F_GETOWN_EX
5417     case TARGET_F_GETOWN_EX:
5418         ret = F_GETOWN_EX;
5419         break;
5420 #endif
5421 #ifdef F_SETOWN_EX
5422     case TARGET_F_SETOWN_EX:
5423         ret = F_SETOWN_EX;
5424         break;
5425 #endif
5426 #ifdef F_SETPIPE_SZ
5427     case TARGET_F_SETPIPE_SZ:
5428         ret = F_SETPIPE_SZ;
5429         break;
5430     case TARGET_F_GETPIPE_SZ:
5431         ret = F_GETPIPE_SZ;
5432         break;
5433 #endif
5434     default:
5435         ret = -TARGET_EINVAL;
5436         break;
5437     }
5438 
5439 #if defined(__powerpc64__)
5440     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5441      * is not supported by kernel. The glibc fcntl call actually adjusts
5442      * them to 5, 6 and 7 before making the syscall(). Since we make the
5443      * syscall directly, adjust to what is supported by the kernel.
5444      */
5445     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5446         ret -= F_GETLK64 - 5;
5447     }
5448 #endif
5449 
5450     return ret;
5451 }
5452 
5453 #define FLOCK_TRANSTBL \
5454     switch (type) { \
5455     TRANSTBL_CONVERT(F_RDLCK); \
5456     TRANSTBL_CONVERT(F_WRLCK); \
5457     TRANSTBL_CONVERT(F_UNLCK); \
5458     TRANSTBL_CONVERT(F_EXLCK); \
5459     TRANSTBL_CONVERT(F_SHLCK); \
5460     }
5461 
5462 static int target_to_host_flock(int type)
5463 {
5464 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5465     FLOCK_TRANSTBL
5466 #undef  TRANSTBL_CONVERT
5467     return -TARGET_EINVAL;
5468 }
5469 
5470 static int host_to_target_flock(int type)
5471 {
5472 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5473     FLOCK_TRANSTBL
5474 #undef  TRANSTBL_CONVERT
5475     /* if we don't know how to convert the value coming
5476      * from the host we copy to the target field as-is
5477      */
5478     return type;
5479 }
5480 
5481 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5482                                             abi_ulong target_flock_addr)
5483 {
5484     struct target_flock *target_fl;
5485     int l_type;
5486 
5487     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5488         return -TARGET_EFAULT;
5489     }
5490 
5491     __get_user(l_type, &target_fl->l_type);
5492     l_type = target_to_host_flock(l_type);
5493     if (l_type < 0) {
5494         return l_type;
5495     }
5496     fl->l_type = l_type;
5497     __get_user(fl->l_whence, &target_fl->l_whence);
5498     __get_user(fl->l_start, &target_fl->l_start);
5499     __get_user(fl->l_len, &target_fl->l_len);
5500     __get_user(fl->l_pid, &target_fl->l_pid);
5501     unlock_user_struct(target_fl, target_flock_addr, 0);
5502     return 0;
5503 }
5504 
5505 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5506                                           const struct flock64 *fl)
5507 {
5508     struct target_flock *target_fl;
5509     short l_type;
5510 
5511     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5512         return -TARGET_EFAULT;
5513     }
5514 
5515     l_type = host_to_target_flock(fl->l_type);
5516     __put_user(l_type, &target_fl->l_type);
5517     __put_user(fl->l_whence, &target_fl->l_whence);
5518     __put_user(fl->l_start, &target_fl->l_start);
5519     __put_user(fl->l_len, &target_fl->l_len);
5520     __put_user(fl->l_pid, &target_fl->l_pid);
5521     unlock_user_struct(target_fl, target_flock_addr, 1);
5522     return 0;
5523 }
5524 
5525 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5526 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5527 
5528 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5529 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5530                                                    abi_ulong target_flock_addr)
5531 {
5532     struct target_oabi_flock64 *target_fl;
5533     int l_type;
5534 
5535     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5536         return -TARGET_EFAULT;
5537     }
5538 
5539     __get_user(l_type, &target_fl->l_type);
5540     l_type = target_to_host_flock(l_type);
5541     if (l_type < 0) {
5542         return l_type;
5543     }
5544     fl->l_type = l_type;
5545     __get_user(fl->l_whence, &target_fl->l_whence);
5546     __get_user(fl->l_start, &target_fl->l_start);
5547     __get_user(fl->l_len, &target_fl->l_len);
5548     __get_user(fl->l_pid, &target_fl->l_pid);
5549     unlock_user_struct(target_fl, target_flock_addr, 0);
5550     return 0;
5551 }
5552 
5553 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5554                                                  const struct flock64 *fl)
5555 {
5556     struct target_oabi_flock64 *target_fl;
5557     short l_type;
5558 
5559     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5560         return -TARGET_EFAULT;
5561     }
5562 
5563     l_type = host_to_target_flock(fl->l_type);
5564     __put_user(l_type, &target_fl->l_type);
5565     __put_user(fl->l_whence, &target_fl->l_whence);
5566     __put_user(fl->l_start, &target_fl->l_start);
5567     __put_user(fl->l_len, &target_fl->l_len);
5568     __put_user(fl->l_pid, &target_fl->l_pid);
5569     unlock_user_struct(target_fl, target_flock_addr, 1);
5570     return 0;
5571 }
5572 #endif
5573 
5574 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5575                                               abi_ulong target_flock_addr)
5576 {
5577     struct target_flock64 *target_fl;
5578     int l_type;
5579 
5580     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5581         return -TARGET_EFAULT;
5582     }
5583 
5584     __get_user(l_type, &target_fl->l_type);
5585     l_type = target_to_host_flock(l_type);
5586     if (l_type < 0) {
5587         return l_type;
5588     }
5589     fl->l_type = l_type;
5590     __get_user(fl->l_whence, &target_fl->l_whence);
5591     __get_user(fl->l_start, &target_fl->l_start);
5592     __get_user(fl->l_len, &target_fl->l_len);
5593     __get_user(fl->l_pid, &target_fl->l_pid);
5594     unlock_user_struct(target_fl, target_flock_addr, 0);
5595     return 0;
5596 }
5597 
5598 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5599                                             const struct flock64 *fl)
5600 {
5601     struct target_flock64 *target_fl;
5602     short l_type;
5603 
5604     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5605         return -TARGET_EFAULT;
5606     }
5607 
5608     l_type = host_to_target_flock(fl->l_type);
5609     __put_user(l_type, &target_fl->l_type);
5610     __put_user(fl->l_whence, &target_fl->l_whence);
5611     __put_user(fl->l_start, &target_fl->l_start);
5612     __put_user(fl->l_len, &target_fl->l_len);
5613     __put_user(fl->l_pid, &target_fl->l_pid);
5614     unlock_user_struct(target_fl, target_flock_addr, 1);
5615     return 0;
5616 }
5617 
5618 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5619 {
5620     struct flock64 fl64;
5621 #ifdef F_GETOWN_EX
5622     struct f_owner_ex fox;
5623     struct target_f_owner_ex *target_fox;
5624 #endif
5625     abi_long ret;
5626     int host_cmd = target_to_host_fcntl_cmd(cmd);
5627 
5628     if (host_cmd == -TARGET_EINVAL)
5629 	    return host_cmd;
5630 
5631     switch(cmd) {
5632     case TARGET_F_GETLK:
5633         ret = copy_from_user_flock(&fl64, arg);
5634         if (ret) {
5635             return ret;
5636         }
5637         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5638         if (ret == 0) {
5639             ret = copy_to_user_flock(arg, &fl64);
5640         }
5641         break;
5642 
5643     case TARGET_F_SETLK:
5644     case TARGET_F_SETLKW:
5645         ret = copy_from_user_flock(&fl64, arg);
5646         if (ret) {
5647             return ret;
5648         }
5649         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5650         break;
5651 
5652     case TARGET_F_GETLK64:
5653         ret = copy_from_user_flock64(&fl64, arg);
5654         if (ret) {
5655             return ret;
5656         }
5657         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5658         if (ret == 0) {
5659             ret = copy_to_user_flock64(arg, &fl64);
5660         }
5661         break;
5662     case TARGET_F_SETLK64:
5663     case TARGET_F_SETLKW64:
5664         ret = copy_from_user_flock64(&fl64, arg);
5665         if (ret) {
5666             return ret;
5667         }
5668         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5669         break;
5670 
5671     case TARGET_F_GETFL:
5672         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5673         if (ret >= 0) {
5674             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5675         }
5676         break;
5677 
5678     case TARGET_F_SETFL:
5679         ret = get_errno(safe_fcntl(fd, host_cmd,
5680                                    target_to_host_bitmask(arg,
5681                                                           fcntl_flags_tbl)));
5682         break;
5683 
5684 #ifdef F_GETOWN_EX
5685     case TARGET_F_GETOWN_EX:
5686         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5687         if (ret >= 0) {
5688             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5689                 return -TARGET_EFAULT;
5690             target_fox->type = tswap32(fox.type);
5691             target_fox->pid = tswap32(fox.pid);
5692             unlock_user_struct(target_fox, arg, 1);
5693         }
5694         break;
5695 #endif
5696 
5697 #ifdef F_SETOWN_EX
5698     case TARGET_F_SETOWN_EX:
5699         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5700             return -TARGET_EFAULT;
5701         fox.type = tswap32(target_fox->type);
5702         fox.pid = tswap32(target_fox->pid);
5703         unlock_user_struct(target_fox, arg, 0);
5704         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5705         break;
5706 #endif
5707 
5708     case TARGET_F_SETOWN:
5709     case TARGET_F_GETOWN:
5710     case TARGET_F_SETSIG:
5711     case TARGET_F_GETSIG:
5712     case TARGET_F_SETLEASE:
5713     case TARGET_F_GETLEASE:
5714     case TARGET_F_SETPIPE_SZ:
5715     case TARGET_F_GETPIPE_SZ:
5716         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5717         break;
5718 
5719     default:
5720         ret = get_errno(safe_fcntl(fd, cmd, arg));
5721         break;
5722     }
5723     return ret;
5724 }
5725 
5726 #ifdef USE_UID16
5727 
5728 static inline int high2lowuid(int uid)
5729 {
5730     if (uid > 65535)
5731         return 65534;
5732     else
5733         return uid;
5734 }
5735 
5736 static inline int high2lowgid(int gid)
5737 {
5738     if (gid > 65535)
5739         return 65534;
5740     else
5741         return gid;
5742 }
5743 
5744 static inline int low2highuid(int uid)
5745 {
5746     if ((int16_t)uid == -1)
5747         return -1;
5748     else
5749         return uid;
5750 }
5751 
5752 static inline int low2highgid(int gid)
5753 {
5754     if ((int16_t)gid == -1)
5755         return -1;
5756     else
5757         return gid;
5758 }
5759 static inline int tswapid(int id)
5760 {
5761     return tswap16(id);
5762 }
5763 
5764 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5765 
5766 #else /* !USE_UID16 */
5767 static inline int high2lowuid(int uid)
5768 {
5769     return uid;
5770 }
5771 static inline int high2lowgid(int gid)
5772 {
5773     return gid;
5774 }
5775 static inline int low2highuid(int uid)
5776 {
5777     return uid;
5778 }
5779 static inline int low2highgid(int gid)
5780 {
5781     return gid;
5782 }
5783 static inline int tswapid(int id)
5784 {
5785     return tswap32(id);
5786 }
5787 
5788 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5789 
5790 #endif /* USE_UID16 */
5791 
5792 /* We must do direct syscalls for setting UID/GID, because we want to
5793  * implement the Linux system call semantics of "change only for this thread",
5794  * not the libc/POSIX semantics of "change for all threads in process".
5795  * (See http://ewontfix.com/17/ for more details.)
5796  * We use the 32-bit version of the syscalls if present; if it is not
5797  * then either the host architecture supports 32-bit UIDs natively with
5798  * the standard syscall, or the 16-bit UID is the best we can do.
5799  */
5800 #ifdef __NR_setuid32
5801 #define __NR_sys_setuid __NR_setuid32
5802 #else
5803 #define __NR_sys_setuid __NR_setuid
5804 #endif
5805 #ifdef __NR_setgid32
5806 #define __NR_sys_setgid __NR_setgid32
5807 #else
5808 #define __NR_sys_setgid __NR_setgid
5809 #endif
5810 #ifdef __NR_setresuid32
5811 #define __NR_sys_setresuid __NR_setresuid32
5812 #else
5813 #define __NR_sys_setresuid __NR_setresuid
5814 #endif
5815 #ifdef __NR_setresgid32
5816 #define __NR_sys_setresgid __NR_setresgid32
5817 #else
5818 #define __NR_sys_setresgid __NR_setresgid
5819 #endif
5820 
5821 _syscall1(int, sys_setuid, uid_t, uid)
5822 _syscall1(int, sys_setgid, gid_t, gid)
5823 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5824 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5825 
5826 void syscall_init(void)
5827 {
5828     IOCTLEntry *ie;
5829     const argtype *arg_type;
5830     int size;
5831     int i;
5832 
5833     thunk_init(STRUCT_MAX);
5834 
5835 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5836 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5837 #include "syscall_types.h"
5838 #undef STRUCT
5839 #undef STRUCT_SPECIAL
5840 
5841     /* Build target_to_host_errno_table[] table from
5842      * host_to_target_errno_table[]. */
5843     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5844         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5845     }
5846 
5847     /* we patch the ioctl size if necessary. We rely on the fact that
5848        no ioctl has all the bits at '1' in the size field */
5849     ie = ioctl_entries;
5850     while (ie->target_cmd != 0) {
5851         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5852             TARGET_IOC_SIZEMASK) {
5853             arg_type = ie->arg_type;
5854             if (arg_type[0] != TYPE_PTR) {
5855                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5856                         ie->target_cmd);
5857                 exit(1);
5858             }
5859             arg_type++;
5860             size = thunk_type_size(arg_type, 0);
5861             ie->target_cmd = (ie->target_cmd &
5862                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5863                 (size << TARGET_IOC_SIZESHIFT);
5864         }
5865 
5866         /* automatic consistency check if same arch */
5867 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5868     (defined(__x86_64__) && defined(TARGET_X86_64))
5869         if (unlikely(ie->target_cmd != ie->host_cmd)) {
5870             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5871                     ie->name, ie->target_cmd, ie->host_cmd);
5872         }
5873 #endif
5874         ie++;
5875     }
5876 }
5877 
5878 #if TARGET_ABI_BITS == 32
5879 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5880 {
5881 #ifdef TARGET_WORDS_BIGENDIAN
5882     return ((uint64_t)word0 << 32) | word1;
5883 #else
5884     return ((uint64_t)word1 << 32) | word0;
5885 #endif
5886 }
5887 #else /* TARGET_ABI_BITS == 32 */
5888 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5889 {
5890     return word0;
5891 }
5892 #endif /* TARGET_ABI_BITS != 32 */
5893 
5894 #ifdef TARGET_NR_truncate64
5895 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5896                                          abi_long arg2,
5897                                          abi_long arg3,
5898                                          abi_long arg4)
5899 {
5900     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
5901         arg2 = arg3;
5902         arg3 = arg4;
5903     }
5904     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5905 }
5906 #endif
5907 
5908 #ifdef TARGET_NR_ftruncate64
5909 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5910                                           abi_long arg2,
5911                                           abi_long arg3,
5912                                           abi_long arg4)
5913 {
5914     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
5915         arg2 = arg3;
5916         arg3 = arg4;
5917     }
5918     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5919 }
5920 #endif
5921 
5922 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5923                                                abi_ulong target_addr)
5924 {
5925     struct target_timespec *target_ts;
5926 
5927     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5928         return -TARGET_EFAULT;
5929     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5930     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5931     unlock_user_struct(target_ts, target_addr, 0);
5932     return 0;
5933 }
5934 
5935 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5936                                                struct timespec *host_ts)
5937 {
5938     struct target_timespec *target_ts;
5939 
5940     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5941         return -TARGET_EFAULT;
5942     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5943     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5944     unlock_user_struct(target_ts, target_addr, 1);
5945     return 0;
5946 }
5947 
5948 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5949                                                  abi_ulong target_addr)
5950 {
5951     struct target_itimerspec *target_itspec;
5952 
5953     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5954         return -TARGET_EFAULT;
5955     }
5956 
5957     host_itspec->it_interval.tv_sec =
5958                             tswapal(target_itspec->it_interval.tv_sec);
5959     host_itspec->it_interval.tv_nsec =
5960                             tswapal(target_itspec->it_interval.tv_nsec);
5961     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5962     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5963 
5964     unlock_user_struct(target_itspec, target_addr, 1);
5965     return 0;
5966 }
5967 
5968 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5969                                                struct itimerspec *host_its)
5970 {
5971     struct target_itimerspec *target_itspec;
5972 
5973     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5974         return -TARGET_EFAULT;
5975     }
5976 
5977     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5978     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5979 
5980     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5981     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5982 
5983     unlock_user_struct(target_itspec, target_addr, 0);
5984     return 0;
5985 }
5986 
5987 static inline abi_long target_to_host_timex(struct timex *host_tx,
5988                                             abi_long target_addr)
5989 {
5990     struct target_timex *target_tx;
5991 
5992     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
5993         return -TARGET_EFAULT;
5994     }
5995 
5996     __get_user(host_tx->modes, &target_tx->modes);
5997     __get_user(host_tx->offset, &target_tx->offset);
5998     __get_user(host_tx->freq, &target_tx->freq);
5999     __get_user(host_tx->maxerror, &target_tx->maxerror);
6000     __get_user(host_tx->esterror, &target_tx->esterror);
6001     __get_user(host_tx->status, &target_tx->status);
6002     __get_user(host_tx->constant, &target_tx->constant);
6003     __get_user(host_tx->precision, &target_tx->precision);
6004     __get_user(host_tx->tolerance, &target_tx->tolerance);
6005     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6006     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6007     __get_user(host_tx->tick, &target_tx->tick);
6008     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6009     __get_user(host_tx->jitter, &target_tx->jitter);
6010     __get_user(host_tx->shift, &target_tx->shift);
6011     __get_user(host_tx->stabil, &target_tx->stabil);
6012     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6013     __get_user(host_tx->calcnt, &target_tx->calcnt);
6014     __get_user(host_tx->errcnt, &target_tx->errcnt);
6015     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6016     __get_user(host_tx->tai, &target_tx->tai);
6017 
6018     unlock_user_struct(target_tx, target_addr, 0);
6019     return 0;
6020 }
6021 
6022 static inline abi_long host_to_target_timex(abi_long target_addr,
6023                                             struct timex *host_tx)
6024 {
6025     struct target_timex *target_tx;
6026 
6027     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6028         return -TARGET_EFAULT;
6029     }
6030 
6031     __put_user(host_tx->modes, &target_tx->modes);
6032     __put_user(host_tx->offset, &target_tx->offset);
6033     __put_user(host_tx->freq, &target_tx->freq);
6034     __put_user(host_tx->maxerror, &target_tx->maxerror);
6035     __put_user(host_tx->esterror, &target_tx->esterror);
6036     __put_user(host_tx->status, &target_tx->status);
6037     __put_user(host_tx->constant, &target_tx->constant);
6038     __put_user(host_tx->precision, &target_tx->precision);
6039     __put_user(host_tx->tolerance, &target_tx->tolerance);
6040     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6041     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6042     __put_user(host_tx->tick, &target_tx->tick);
6043     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6044     __put_user(host_tx->jitter, &target_tx->jitter);
6045     __put_user(host_tx->shift, &target_tx->shift);
6046     __put_user(host_tx->stabil, &target_tx->stabil);
6047     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6048     __put_user(host_tx->calcnt, &target_tx->calcnt);
6049     __put_user(host_tx->errcnt, &target_tx->errcnt);
6050     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6051     __put_user(host_tx->tai, &target_tx->tai);
6052 
6053     unlock_user_struct(target_tx, target_addr, 1);
6054     return 0;
6055 }
6056 
6057 
6058 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6059                                                abi_ulong target_addr)
6060 {
6061     struct target_sigevent *target_sevp;
6062 
6063     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6064         return -TARGET_EFAULT;
6065     }
6066 
6067     /* This union is awkward on 64 bit systems because it has a 32 bit
6068      * integer and a pointer in it; we follow the conversion approach
6069      * used for handling sigval types in signal.c so the guest should get
6070      * the correct value back even if we did a 64 bit byteswap and it's
6071      * using the 32 bit integer.
6072      */
6073     host_sevp->sigev_value.sival_ptr =
6074         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6075     host_sevp->sigev_signo =
6076         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6077     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6078     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6079 
6080     unlock_user_struct(target_sevp, target_addr, 1);
6081     return 0;
6082 }
6083 
6084 #if defined(TARGET_NR_mlockall)
6085 static inline int target_to_host_mlockall_arg(int arg)
6086 {
6087     int result = 0;
6088 
6089     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6090         result |= MCL_CURRENT;
6091     }
6092     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6093         result |= MCL_FUTURE;
6094     }
6095     return result;
6096 }
6097 #endif
6098 
6099 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6100      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6101      defined(TARGET_NR_newfstatat))
6102 static inline abi_long host_to_target_stat64(void *cpu_env,
6103                                              abi_ulong target_addr,
6104                                              struct stat *host_st)
6105 {
6106 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6107     if (((CPUARMState *)cpu_env)->eabi) {
6108         struct target_eabi_stat64 *target_st;
6109 
6110         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6111             return -TARGET_EFAULT;
6112         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6113         __put_user(host_st->st_dev, &target_st->st_dev);
6114         __put_user(host_st->st_ino, &target_st->st_ino);
6115 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6116         __put_user(host_st->st_ino, &target_st->__st_ino);
6117 #endif
6118         __put_user(host_st->st_mode, &target_st->st_mode);
6119         __put_user(host_st->st_nlink, &target_st->st_nlink);
6120         __put_user(host_st->st_uid, &target_st->st_uid);
6121         __put_user(host_st->st_gid, &target_st->st_gid);
6122         __put_user(host_st->st_rdev, &target_st->st_rdev);
6123         __put_user(host_st->st_size, &target_st->st_size);
6124         __put_user(host_st->st_blksize, &target_st->st_blksize);
6125         __put_user(host_st->st_blocks, &target_st->st_blocks);
6126         __put_user(host_st->st_atime, &target_st->target_st_atime);
6127         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6128         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6129         unlock_user_struct(target_st, target_addr, 1);
6130     } else
6131 #endif
6132     {
6133 #if defined(TARGET_HAS_STRUCT_STAT64)
6134         struct target_stat64 *target_st;
6135 #else
6136         struct target_stat *target_st;
6137 #endif
6138 
6139         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6140             return -TARGET_EFAULT;
6141         memset(target_st, 0, sizeof(*target_st));
6142         __put_user(host_st->st_dev, &target_st->st_dev);
6143         __put_user(host_st->st_ino, &target_st->st_ino);
6144 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6145         __put_user(host_st->st_ino, &target_st->__st_ino);
6146 #endif
6147         __put_user(host_st->st_mode, &target_st->st_mode);
6148         __put_user(host_st->st_nlink, &target_st->st_nlink);
6149         __put_user(host_st->st_uid, &target_st->st_uid);
6150         __put_user(host_st->st_gid, &target_st->st_gid);
6151         __put_user(host_st->st_rdev, &target_st->st_rdev);
6152         /* XXX: better use of kernel struct */
6153         __put_user(host_st->st_size, &target_st->st_size);
6154         __put_user(host_st->st_blksize, &target_st->st_blksize);
6155         __put_user(host_st->st_blocks, &target_st->st_blocks);
6156         __put_user(host_st->st_atime, &target_st->target_st_atime);
6157         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6158         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6159         unlock_user_struct(target_st, target_addr, 1);
6160     }
6161 
6162     return 0;
6163 }
6164 #endif
6165 
6166 /* ??? Using host futex calls even when target atomic operations
6167    are not really atomic probably breaks things.  However implementing
6168    futexes locally would make futexes shared between multiple processes
6169    tricky.  However they're probably useless because guest atomic
6170    operations won't work either.  */
6171 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6172                     target_ulong uaddr2, int val3)
6173 {
6174     struct timespec ts, *pts;
6175     int base_op;
6176 
6177     /* ??? We assume FUTEX_* constants are the same on both host
6178        and target.  */
6179 #ifdef FUTEX_CMD_MASK
6180     base_op = op & FUTEX_CMD_MASK;
6181 #else
6182     base_op = op;
6183 #endif
6184     switch (base_op) {
6185     case FUTEX_WAIT:
6186     case FUTEX_WAIT_BITSET:
6187         if (timeout) {
6188             pts = &ts;
6189             target_to_host_timespec(pts, timeout);
6190         } else {
6191             pts = NULL;
6192         }
6193         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6194                          pts, NULL, val3));
6195     case FUTEX_WAKE:
6196         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6197     case FUTEX_FD:
6198         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6199     case FUTEX_REQUEUE:
6200     case FUTEX_CMP_REQUEUE:
6201     case FUTEX_WAKE_OP:
6202         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6203            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6204            But the prototype takes a `struct timespec *'; insert casts
6205            to satisfy the compiler.  We do not need to tswap TIMEOUT
6206            since it's not compared to guest memory.  */
6207         pts = (struct timespec *)(uintptr_t) timeout;
6208         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6209                                     g2h(uaddr2),
6210                                     (base_op == FUTEX_CMP_REQUEUE
6211                                      ? tswap32(val3)
6212                                      : val3)));
6213     default:
6214         return -TARGET_ENOSYS;
6215     }
6216 }
6217 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6218 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6219                                      abi_long handle, abi_long mount_id,
6220                                      abi_long flags)
6221 {
6222     struct file_handle *target_fh;
6223     struct file_handle *fh;
6224     int mid = 0;
6225     abi_long ret;
6226     char *name;
6227     unsigned int size, total_size;
6228 
6229     if (get_user_s32(size, handle)) {
6230         return -TARGET_EFAULT;
6231     }
6232 
6233     name = lock_user_string(pathname);
6234     if (!name) {
6235         return -TARGET_EFAULT;
6236     }
6237 
6238     total_size = sizeof(struct file_handle) + size;
6239     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6240     if (!target_fh) {
6241         unlock_user(name, pathname, 0);
6242         return -TARGET_EFAULT;
6243     }
6244 
6245     fh = g_malloc0(total_size);
6246     fh->handle_bytes = size;
6247 
6248     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6249     unlock_user(name, pathname, 0);
6250 
6251     /* man name_to_handle_at(2):
6252      * Other than the use of the handle_bytes field, the caller should treat
6253      * the file_handle structure as an opaque data type
6254      */
6255 
6256     memcpy(target_fh, fh, total_size);
6257     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6258     target_fh->handle_type = tswap32(fh->handle_type);
6259     g_free(fh);
6260     unlock_user(target_fh, handle, total_size);
6261 
6262     if (put_user_s32(mid, mount_id)) {
6263         return -TARGET_EFAULT;
6264     }
6265 
6266     return ret;
6267 
6268 }
6269 #endif
6270 
6271 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6272 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6273                                      abi_long flags)
6274 {
6275     struct file_handle *target_fh;
6276     struct file_handle *fh;
6277     unsigned int size, total_size;
6278     abi_long ret;
6279 
6280     if (get_user_s32(size, handle)) {
6281         return -TARGET_EFAULT;
6282     }
6283 
6284     total_size = sizeof(struct file_handle) + size;
6285     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6286     if (!target_fh) {
6287         return -TARGET_EFAULT;
6288     }
6289 
6290     fh = g_memdup(target_fh, total_size);
6291     fh->handle_bytes = size;
6292     fh->handle_type = tswap32(target_fh->handle_type);
6293 
6294     ret = get_errno(open_by_handle_at(mount_fd, fh,
6295                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6296 
6297     g_free(fh);
6298 
6299     unlock_user(target_fh, handle, total_size);
6300 
6301     return ret;
6302 }
6303 #endif
6304 
6305 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6306 
6307 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6308 {
6309     int host_flags;
6310     target_sigset_t *target_mask;
6311     sigset_t host_mask;
6312     abi_long ret;
6313 
6314     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6315         return -TARGET_EINVAL;
6316     }
6317     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6318         return -TARGET_EFAULT;
6319     }
6320 
6321     target_to_host_sigset(&host_mask, target_mask);
6322 
6323     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6324 
6325     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6326     if (ret >= 0) {
6327         fd_trans_register(ret, &target_signalfd_trans);
6328     }
6329 
6330     unlock_user_struct(target_mask, mask, 0);
6331 
6332     return ret;
6333 }
6334 #endif
6335 
6336 /* Map host to target signal numbers for the wait family of syscalls.
6337    Assume all other status bits are the same.  */
6338 int host_to_target_waitstatus(int status)
6339 {
6340     if (WIFSIGNALED(status)) {
6341         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6342     }
6343     if (WIFSTOPPED(status)) {
6344         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6345                | (status & 0xff);
6346     }
6347     return status;
6348 }
6349 
6350 static int open_self_cmdline(void *cpu_env, int fd)
6351 {
6352     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6353     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6354     int i;
6355 
6356     for (i = 0; i < bprm->argc; i++) {
6357         size_t len = strlen(bprm->argv[i]) + 1;
6358 
6359         if (write(fd, bprm->argv[i], len) != len) {
6360             return -1;
6361         }
6362     }
6363 
6364     return 0;
6365 }
6366 
6367 static int open_self_maps(void *cpu_env, int fd)
6368 {
6369     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6370     TaskState *ts = cpu->opaque;
6371     FILE *fp;
6372     char *line = NULL;
6373     size_t len = 0;
6374     ssize_t read;
6375 
6376     fp = fopen("/proc/self/maps", "r");
6377     if (fp == NULL) {
6378         return -1;
6379     }
6380 
6381     while ((read = getline(&line, &len, fp)) != -1) {
6382         int fields, dev_maj, dev_min, inode;
6383         uint64_t min, max, offset;
6384         char flag_r, flag_w, flag_x, flag_p;
6385         char path[512] = "";
6386         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6387                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6388                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6389 
6390         if ((fields < 10) || (fields > 11)) {
6391             continue;
6392         }
6393         if (h2g_valid(min)) {
6394             int flags = page_get_flags(h2g(min));
6395             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6396             if (page_check_range(h2g(min), max - min, flags) == -1) {
6397                 continue;
6398             }
6399             if (h2g(min) == ts->info->stack_limit) {
6400                 pstrcpy(path, sizeof(path), "      [stack]");
6401             }
6402             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6403                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6404                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6405                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6406                     path[0] ? "         " : "", path);
6407         }
6408     }
6409 
6410     free(line);
6411     fclose(fp);
6412 
6413     return 0;
6414 }
6415 
6416 static int open_self_stat(void *cpu_env, int fd)
6417 {
6418     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6419     TaskState *ts = cpu->opaque;
6420     abi_ulong start_stack = ts->info->start_stack;
6421     int i;
6422 
6423     for (i = 0; i < 44; i++) {
6424       char buf[128];
6425       int len;
6426       uint64_t val = 0;
6427 
6428       if (i == 0) {
6429         /* pid */
6430         val = getpid();
6431         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6432       } else if (i == 1) {
6433         /* app name */
6434         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6435       } else if (i == 27) {
6436         /* stack bottom */
6437         val = start_stack;
6438         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6439       } else {
6440         /* for the rest, there is MasterCard */
6441         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6442       }
6443 
6444       len = strlen(buf);
6445       if (write(fd, buf, len) != len) {
6446           return -1;
6447       }
6448     }
6449 
6450     return 0;
6451 }
6452 
6453 static int open_self_auxv(void *cpu_env, int fd)
6454 {
6455     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6456     TaskState *ts = cpu->opaque;
6457     abi_ulong auxv = ts->info->saved_auxv;
6458     abi_ulong len = ts->info->auxv_len;
6459     char *ptr;
6460 
6461     /*
6462      * Auxiliary vector is stored in target process stack.
6463      * read in whole auxv vector and copy it to file
6464      */
6465     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6466     if (ptr != NULL) {
6467         while (len > 0) {
6468             ssize_t r;
6469             r = write(fd, ptr, len);
6470             if (r <= 0) {
6471                 break;
6472             }
6473             len -= r;
6474             ptr += r;
6475         }
6476         lseek(fd, 0, SEEK_SET);
6477         unlock_user(ptr, auxv, len);
6478     }
6479 
6480     return 0;
6481 }
6482 
6483 static int is_proc_myself(const char *filename, const char *entry)
6484 {
6485     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6486         filename += strlen("/proc/");
6487         if (!strncmp(filename, "self/", strlen("self/"))) {
6488             filename += strlen("self/");
6489         } else if (*filename >= '1' && *filename <= '9') {
6490             char myself[80];
6491             snprintf(myself, sizeof(myself), "%d/", getpid());
6492             if (!strncmp(filename, myself, strlen(myself))) {
6493                 filename += strlen(myself);
6494             } else {
6495                 return 0;
6496             }
6497         } else {
6498             return 0;
6499         }
6500         if (!strcmp(filename, entry)) {
6501             return 1;
6502         }
6503     }
6504     return 0;
6505 }
6506 
6507 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6508 static int is_proc(const char *filename, const char *entry)
6509 {
6510     return strcmp(filename, entry) == 0;
6511 }
6512 
6513 static int open_net_route(void *cpu_env, int fd)
6514 {
6515     FILE *fp;
6516     char *line = NULL;
6517     size_t len = 0;
6518     ssize_t read;
6519 
6520     fp = fopen("/proc/net/route", "r");
6521     if (fp == NULL) {
6522         return -1;
6523     }
6524 
6525     /* read header */
6526 
6527     read = getline(&line, &len, fp);
6528     dprintf(fd, "%s", line);
6529 
6530     /* read routes */
6531 
6532     while ((read = getline(&line, &len, fp)) != -1) {
6533         char iface[16];
6534         uint32_t dest, gw, mask;
6535         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6536         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6537                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6538                      &mask, &mtu, &window, &irtt);
6539         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6540                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6541                 metric, tswap32(mask), mtu, window, irtt);
6542     }
6543 
6544     free(line);
6545     fclose(fp);
6546 
6547     return 0;
6548 }
6549 #endif
6550 
6551 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6552 {
6553     struct fake_open {
6554         const char *filename;
6555         int (*fill)(void *cpu_env, int fd);
6556         int (*cmp)(const char *s1, const char *s2);
6557     };
6558     const struct fake_open *fake_open;
6559     static const struct fake_open fakes[] = {
6560         { "maps", open_self_maps, is_proc_myself },
6561         { "stat", open_self_stat, is_proc_myself },
6562         { "auxv", open_self_auxv, is_proc_myself },
6563         { "cmdline", open_self_cmdline, is_proc_myself },
6564 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6565         { "/proc/net/route", open_net_route, is_proc },
6566 #endif
6567         { NULL, NULL, NULL }
6568     };
6569 
6570     if (is_proc_myself(pathname, "exe")) {
6571         int execfd = qemu_getauxval(AT_EXECFD);
6572         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6573     }
6574 
6575     for (fake_open = fakes; fake_open->filename; fake_open++) {
6576         if (fake_open->cmp(pathname, fake_open->filename)) {
6577             break;
6578         }
6579     }
6580 
6581     if (fake_open->filename) {
6582         const char *tmpdir;
6583         char filename[PATH_MAX];
6584         int fd, r;
6585 
6586         /* create temporary file to map stat to */
6587         tmpdir = getenv("TMPDIR");
6588         if (!tmpdir)
6589             tmpdir = "/tmp";
6590         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6591         fd = mkstemp(filename);
6592         if (fd < 0) {
6593             return fd;
6594         }
6595         unlink(filename);
6596 
6597         if ((r = fake_open->fill(cpu_env, fd))) {
6598             int e = errno;
6599             close(fd);
6600             errno = e;
6601             return r;
6602         }
6603         lseek(fd, 0, SEEK_SET);
6604 
6605         return fd;
6606     }
6607 
6608     return safe_openat(dirfd, path(pathname), flags, mode);
6609 }
6610 
6611 #define TIMER_MAGIC 0x0caf0000
6612 #define TIMER_MAGIC_MASK 0xffff0000
6613 
6614 /* Convert QEMU provided timer ID back to internal 16bit index format */
6615 static target_timer_t get_timer_id(abi_long arg)
6616 {
6617     target_timer_t timerid = arg;
6618 
6619     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6620         return -TARGET_EINVAL;
6621     }
6622 
6623     timerid &= 0xffff;
6624 
6625     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6626         return -TARGET_EINVAL;
6627     }
6628 
6629     return timerid;
6630 }
6631 
6632 static int target_to_host_cpu_mask(unsigned long *host_mask,
6633                                    size_t host_size,
6634                                    abi_ulong target_addr,
6635                                    size_t target_size)
6636 {
6637     unsigned target_bits = sizeof(abi_ulong) * 8;
6638     unsigned host_bits = sizeof(*host_mask) * 8;
6639     abi_ulong *target_mask;
6640     unsigned i, j;
6641 
6642     assert(host_size >= target_size);
6643 
6644     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6645     if (!target_mask) {
6646         return -TARGET_EFAULT;
6647     }
6648     memset(host_mask, 0, host_size);
6649 
6650     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6651         unsigned bit = i * target_bits;
6652         abi_ulong val;
6653 
6654         __get_user(val, &target_mask[i]);
6655         for (j = 0; j < target_bits; j++, bit++) {
6656             if (val & (1UL << j)) {
6657                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6658             }
6659         }
6660     }
6661 
6662     unlock_user(target_mask, target_addr, 0);
6663     return 0;
6664 }
6665 
6666 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6667                                    size_t host_size,
6668                                    abi_ulong target_addr,
6669                                    size_t target_size)
6670 {
6671     unsigned target_bits = sizeof(abi_ulong) * 8;
6672     unsigned host_bits = sizeof(*host_mask) * 8;
6673     abi_ulong *target_mask;
6674     unsigned i, j;
6675 
6676     assert(host_size >= target_size);
6677 
6678     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6679     if (!target_mask) {
6680         return -TARGET_EFAULT;
6681     }
6682 
6683     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6684         unsigned bit = i * target_bits;
6685         abi_ulong val = 0;
6686 
6687         for (j = 0; j < target_bits; j++, bit++) {
6688             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6689                 val |= 1UL << j;
6690             }
6691         }
6692         __put_user(val, &target_mask[i]);
6693     }
6694 
6695     unlock_user(target_mask, target_addr, target_size);
6696     return 0;
6697 }
6698 
6699 /* This is an internal helper for do_syscall so that it is easier
6700  * to have a single return point, so that actions, such as logging
6701  * of syscall results, can be performed.
6702  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6703  */
6704 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6705                             abi_long arg2, abi_long arg3, abi_long arg4,
6706                             abi_long arg5, abi_long arg6, abi_long arg7,
6707                             abi_long arg8)
6708 {
6709     CPUState *cpu = ENV_GET_CPU(cpu_env);
6710     abi_long ret;
6711 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6712     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6713     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6714     struct stat st;
6715 #endif
6716 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6717     || defined(TARGET_NR_fstatfs)
6718     struct statfs stfs;
6719 #endif
6720     void *p;
6721 
6722     switch(num) {
6723     case TARGET_NR_exit:
6724         /* In old applications this may be used to implement _exit(2).
6725            However in threaded applictions it is used for thread termination,
6726            and _exit_group is used for application termination.
6727            Do thread termination if we have more then one thread.  */
6728 
6729         if (block_signals()) {
6730             return -TARGET_ERESTARTSYS;
6731         }
6732 
6733         cpu_list_lock();
6734 
6735         if (CPU_NEXT(first_cpu)) {
6736             TaskState *ts;
6737 
6738             /* Remove the CPU from the list.  */
6739             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6740 
6741             cpu_list_unlock();
6742 
6743             ts = cpu->opaque;
6744             if (ts->child_tidptr) {
6745                 put_user_u32(0, ts->child_tidptr);
6746                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6747                           NULL, NULL, 0);
6748             }
6749             thread_cpu = NULL;
6750             object_unref(OBJECT(cpu));
6751             g_free(ts);
6752             rcu_unregister_thread();
6753             pthread_exit(NULL);
6754         }
6755 
6756         cpu_list_unlock();
6757         preexit_cleanup(cpu_env, arg1);
6758         _exit(arg1);
6759         return 0; /* avoid warning */
6760     case TARGET_NR_read:
6761         if (arg3 == 0) {
6762             return 0;
6763         } else {
6764             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6765                 return -TARGET_EFAULT;
6766             ret = get_errno(safe_read(arg1, p, arg3));
6767             if (ret >= 0 &&
6768                 fd_trans_host_to_target_data(arg1)) {
6769                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6770             }
6771             unlock_user(p, arg2, ret);
6772         }
6773         return ret;
6774     case TARGET_NR_write:
6775         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6776             return -TARGET_EFAULT;
6777         if (fd_trans_target_to_host_data(arg1)) {
6778             void *copy = g_malloc(arg3);
6779             memcpy(copy, p, arg3);
6780             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
6781             if (ret >= 0) {
6782                 ret = get_errno(safe_write(arg1, copy, ret));
6783             }
6784             g_free(copy);
6785         } else {
6786             ret = get_errno(safe_write(arg1, p, arg3));
6787         }
6788         unlock_user(p, arg2, 0);
6789         return ret;
6790 
6791 #ifdef TARGET_NR_open
6792     case TARGET_NR_open:
6793         if (!(p = lock_user_string(arg1)))
6794             return -TARGET_EFAULT;
6795         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6796                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
6797                                   arg3));
6798         fd_trans_unregister(ret);
6799         unlock_user(p, arg1, 0);
6800         return ret;
6801 #endif
6802     case TARGET_NR_openat:
6803         if (!(p = lock_user_string(arg2)))
6804             return -TARGET_EFAULT;
6805         ret = get_errno(do_openat(cpu_env, arg1, p,
6806                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
6807                                   arg4));
6808         fd_trans_unregister(ret);
6809         unlock_user(p, arg2, 0);
6810         return ret;
6811 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6812     case TARGET_NR_name_to_handle_at:
6813         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6814         return ret;
6815 #endif
6816 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6817     case TARGET_NR_open_by_handle_at:
6818         ret = do_open_by_handle_at(arg1, arg2, arg3);
6819         fd_trans_unregister(ret);
6820         return ret;
6821 #endif
6822     case TARGET_NR_close:
6823         fd_trans_unregister(arg1);
6824         return get_errno(close(arg1));
6825 
6826     case TARGET_NR_brk:
6827         return do_brk(arg1);
6828 #ifdef TARGET_NR_fork
6829     case TARGET_NR_fork:
6830         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
6831 #endif
6832 #ifdef TARGET_NR_waitpid
6833     case TARGET_NR_waitpid:
6834         {
6835             int status;
6836             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6837             if (!is_error(ret) && arg2 && ret
6838                 && put_user_s32(host_to_target_waitstatus(status), arg2))
6839                 return -TARGET_EFAULT;
6840         }
6841         return ret;
6842 #endif
6843 #ifdef TARGET_NR_waitid
6844     case TARGET_NR_waitid:
6845         {
6846             siginfo_t info;
6847             info.si_pid = 0;
6848             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6849             if (!is_error(ret) && arg3 && info.si_pid != 0) {
6850                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6851                     return -TARGET_EFAULT;
6852                 host_to_target_siginfo(p, &info);
6853                 unlock_user(p, arg3, sizeof(target_siginfo_t));
6854             }
6855         }
6856         return ret;
6857 #endif
6858 #ifdef TARGET_NR_creat /* not on alpha */
6859     case TARGET_NR_creat:
6860         if (!(p = lock_user_string(arg1)))
6861             return -TARGET_EFAULT;
6862         ret = get_errno(creat(p, arg2));
6863         fd_trans_unregister(ret);
6864         unlock_user(p, arg1, 0);
6865         return ret;
6866 #endif
6867 #ifdef TARGET_NR_link
6868     case TARGET_NR_link:
6869         {
6870             void * p2;
6871             p = lock_user_string(arg1);
6872             p2 = lock_user_string(arg2);
6873             if (!p || !p2)
6874                 ret = -TARGET_EFAULT;
6875             else
6876                 ret = get_errno(link(p, p2));
6877             unlock_user(p2, arg2, 0);
6878             unlock_user(p, arg1, 0);
6879         }
6880         return ret;
6881 #endif
6882 #if defined(TARGET_NR_linkat)
6883     case TARGET_NR_linkat:
6884         {
6885             void * p2 = NULL;
6886             if (!arg2 || !arg4)
6887                 return -TARGET_EFAULT;
6888             p  = lock_user_string(arg2);
6889             p2 = lock_user_string(arg4);
6890             if (!p || !p2)
6891                 ret = -TARGET_EFAULT;
6892             else
6893                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6894             unlock_user(p, arg2, 0);
6895             unlock_user(p2, arg4, 0);
6896         }
6897         return ret;
6898 #endif
6899 #ifdef TARGET_NR_unlink
6900     case TARGET_NR_unlink:
6901         if (!(p = lock_user_string(arg1)))
6902             return -TARGET_EFAULT;
6903         ret = get_errno(unlink(p));
6904         unlock_user(p, arg1, 0);
6905         return ret;
6906 #endif
6907 #if defined(TARGET_NR_unlinkat)
6908     case TARGET_NR_unlinkat:
6909         if (!(p = lock_user_string(arg2)))
6910             return -TARGET_EFAULT;
6911         ret = get_errno(unlinkat(arg1, p, arg3));
6912         unlock_user(p, arg2, 0);
6913         return ret;
6914 #endif
6915     case TARGET_NR_execve:
6916         {
6917             char **argp, **envp;
6918             int argc, envc;
6919             abi_ulong gp;
6920             abi_ulong guest_argp;
6921             abi_ulong guest_envp;
6922             abi_ulong addr;
6923             char **q;
6924             int total_size = 0;
6925 
6926             argc = 0;
6927             guest_argp = arg2;
6928             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6929                 if (get_user_ual(addr, gp))
6930                     return -TARGET_EFAULT;
6931                 if (!addr)
6932                     break;
6933                 argc++;
6934             }
6935             envc = 0;
6936             guest_envp = arg3;
6937             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6938                 if (get_user_ual(addr, gp))
6939                     return -TARGET_EFAULT;
6940                 if (!addr)
6941                     break;
6942                 envc++;
6943             }
6944 
6945             argp = g_new0(char *, argc + 1);
6946             envp = g_new0(char *, envc + 1);
6947 
6948             for (gp = guest_argp, q = argp; gp;
6949                   gp += sizeof(abi_ulong), q++) {
6950                 if (get_user_ual(addr, gp))
6951                     goto execve_efault;
6952                 if (!addr)
6953                     break;
6954                 if (!(*q = lock_user_string(addr)))
6955                     goto execve_efault;
6956                 total_size += strlen(*q) + 1;
6957             }
6958             *q = NULL;
6959 
6960             for (gp = guest_envp, q = envp; gp;
6961                   gp += sizeof(abi_ulong), q++) {
6962                 if (get_user_ual(addr, gp))
6963                     goto execve_efault;
6964                 if (!addr)
6965                     break;
6966                 if (!(*q = lock_user_string(addr)))
6967                     goto execve_efault;
6968                 total_size += strlen(*q) + 1;
6969             }
6970             *q = NULL;
6971 
6972             if (!(p = lock_user_string(arg1)))
6973                 goto execve_efault;
6974             /* Although execve() is not an interruptible syscall it is
6975              * a special case where we must use the safe_syscall wrapper:
6976              * if we allow a signal to happen before we make the host
6977              * syscall then we will 'lose' it, because at the point of
6978              * execve the process leaves QEMU's control. So we use the
6979              * safe syscall wrapper to ensure that we either take the
6980              * signal as a guest signal, or else it does not happen
6981              * before the execve completes and makes it the other
6982              * program's problem.
6983              */
6984             ret = get_errno(safe_execve(p, argp, envp));
6985             unlock_user(p, arg1, 0);
6986 
6987             goto execve_end;
6988 
6989         execve_efault:
6990             ret = -TARGET_EFAULT;
6991 
6992         execve_end:
6993             for (gp = guest_argp, q = argp; *q;
6994                   gp += sizeof(abi_ulong), q++) {
6995                 if (get_user_ual(addr, gp)
6996                     || !addr)
6997                     break;
6998                 unlock_user(*q, addr, 0);
6999             }
7000             for (gp = guest_envp, q = envp; *q;
7001                   gp += sizeof(abi_ulong), q++) {
7002                 if (get_user_ual(addr, gp)
7003                     || !addr)
7004                     break;
7005                 unlock_user(*q, addr, 0);
7006             }
7007 
7008             g_free(argp);
7009             g_free(envp);
7010         }
7011         return ret;
7012     case TARGET_NR_chdir:
7013         if (!(p = lock_user_string(arg1)))
7014             return -TARGET_EFAULT;
7015         ret = get_errno(chdir(p));
7016         unlock_user(p, arg1, 0);
7017         return ret;
7018 #ifdef TARGET_NR_time
7019     case TARGET_NR_time:
7020         {
7021             time_t host_time;
7022             ret = get_errno(time(&host_time));
7023             if (!is_error(ret)
7024                 && arg1
7025                 && put_user_sal(host_time, arg1))
7026                 return -TARGET_EFAULT;
7027         }
7028         return ret;
7029 #endif
7030 #ifdef TARGET_NR_mknod
7031     case TARGET_NR_mknod:
7032         if (!(p = lock_user_string(arg1)))
7033             return -TARGET_EFAULT;
7034         ret = get_errno(mknod(p, arg2, arg3));
7035         unlock_user(p, arg1, 0);
7036         return ret;
7037 #endif
7038 #if defined(TARGET_NR_mknodat)
7039     case TARGET_NR_mknodat:
7040         if (!(p = lock_user_string(arg2)))
7041             return -TARGET_EFAULT;
7042         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7043         unlock_user(p, arg2, 0);
7044         return ret;
7045 #endif
7046 #ifdef TARGET_NR_chmod
7047     case TARGET_NR_chmod:
7048         if (!(p = lock_user_string(arg1)))
7049             return -TARGET_EFAULT;
7050         ret = get_errno(chmod(p, arg2));
7051         unlock_user(p, arg1, 0);
7052         return ret;
7053 #endif
7054 #ifdef TARGET_NR_lseek
7055     case TARGET_NR_lseek:
7056         return get_errno(lseek(arg1, arg2, arg3));
7057 #endif
7058 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7059     /* Alpha specific */
7060     case TARGET_NR_getxpid:
7061         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7062         return get_errno(getpid());
7063 #endif
7064 #ifdef TARGET_NR_getpid
7065     case TARGET_NR_getpid:
7066         return get_errno(getpid());
7067 #endif
7068     case TARGET_NR_mount:
7069         {
7070             /* need to look at the data field */
7071             void *p2, *p3;
7072 
7073             if (arg1) {
7074                 p = lock_user_string(arg1);
7075                 if (!p) {
7076                     return -TARGET_EFAULT;
7077                 }
7078             } else {
7079                 p = NULL;
7080             }
7081 
7082             p2 = lock_user_string(arg2);
7083             if (!p2) {
7084                 if (arg1) {
7085                     unlock_user(p, arg1, 0);
7086                 }
7087                 return -TARGET_EFAULT;
7088             }
7089 
7090             if (arg3) {
7091                 p3 = lock_user_string(arg3);
7092                 if (!p3) {
7093                     if (arg1) {
7094                         unlock_user(p, arg1, 0);
7095                     }
7096                     unlock_user(p2, arg2, 0);
7097                     return -TARGET_EFAULT;
7098                 }
7099             } else {
7100                 p3 = NULL;
7101             }
7102 
7103             /* FIXME - arg5 should be locked, but it isn't clear how to
7104              * do that since it's not guaranteed to be a NULL-terminated
7105              * string.
7106              */
7107             if (!arg5) {
7108                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7109             } else {
7110                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7111             }
7112             ret = get_errno(ret);
7113 
7114             if (arg1) {
7115                 unlock_user(p, arg1, 0);
7116             }
7117             unlock_user(p2, arg2, 0);
7118             if (arg3) {
7119                 unlock_user(p3, arg3, 0);
7120             }
7121         }
7122         return ret;
7123 #ifdef TARGET_NR_umount
7124     case TARGET_NR_umount:
7125         if (!(p = lock_user_string(arg1)))
7126             return -TARGET_EFAULT;
7127         ret = get_errno(umount(p));
7128         unlock_user(p, arg1, 0);
7129         return ret;
7130 #endif
7131 #ifdef TARGET_NR_stime /* not on alpha */
7132     case TARGET_NR_stime:
7133         {
7134             time_t host_time;
7135             if (get_user_sal(host_time, arg1))
7136                 return -TARGET_EFAULT;
7137             return get_errno(stime(&host_time));
7138         }
7139 #endif
7140 #ifdef TARGET_NR_alarm /* not on alpha */
7141     case TARGET_NR_alarm:
7142         return alarm(arg1);
7143 #endif
7144 #ifdef TARGET_NR_pause /* not on alpha */
7145     case TARGET_NR_pause:
7146         if (!block_signals()) {
7147             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7148         }
7149         return -TARGET_EINTR;
7150 #endif
7151 #ifdef TARGET_NR_utime
7152     case TARGET_NR_utime:
7153         {
7154             struct utimbuf tbuf, *host_tbuf;
7155             struct target_utimbuf *target_tbuf;
7156             if (arg2) {
7157                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7158                     return -TARGET_EFAULT;
7159                 tbuf.actime = tswapal(target_tbuf->actime);
7160                 tbuf.modtime = tswapal(target_tbuf->modtime);
7161                 unlock_user_struct(target_tbuf, arg2, 0);
7162                 host_tbuf = &tbuf;
7163             } else {
7164                 host_tbuf = NULL;
7165             }
7166             if (!(p = lock_user_string(arg1)))
7167                 return -TARGET_EFAULT;
7168             ret = get_errno(utime(p, host_tbuf));
7169             unlock_user(p, arg1, 0);
7170         }
7171         return ret;
7172 #endif
7173 #ifdef TARGET_NR_utimes
7174     case TARGET_NR_utimes:
7175         {
7176             struct timeval *tvp, tv[2];
7177             if (arg2) {
7178                 if (copy_from_user_timeval(&tv[0], arg2)
7179                     || copy_from_user_timeval(&tv[1],
7180                                               arg2 + sizeof(struct target_timeval)))
7181                     return -TARGET_EFAULT;
7182                 tvp = tv;
7183             } else {
7184                 tvp = NULL;
7185             }
7186             if (!(p = lock_user_string(arg1)))
7187                 return -TARGET_EFAULT;
7188             ret = get_errno(utimes(p, tvp));
7189             unlock_user(p, arg1, 0);
7190         }
7191         return ret;
7192 #endif
7193 #if defined(TARGET_NR_futimesat)
7194     case TARGET_NR_futimesat:
7195         {
7196             struct timeval *tvp, tv[2];
7197             if (arg3) {
7198                 if (copy_from_user_timeval(&tv[0], arg3)
7199                     || copy_from_user_timeval(&tv[1],
7200                                               arg3 + sizeof(struct target_timeval)))
7201                     return -TARGET_EFAULT;
7202                 tvp = tv;
7203             } else {
7204                 tvp = NULL;
7205             }
7206             if (!(p = lock_user_string(arg2))) {
7207                 return -TARGET_EFAULT;
7208             }
7209             ret = get_errno(futimesat(arg1, path(p), tvp));
7210             unlock_user(p, arg2, 0);
7211         }
7212         return ret;
7213 #endif
7214 #ifdef TARGET_NR_access
7215     case TARGET_NR_access:
7216         if (!(p = lock_user_string(arg1))) {
7217             return -TARGET_EFAULT;
7218         }
7219         ret = get_errno(access(path(p), arg2));
7220         unlock_user(p, arg1, 0);
7221         return ret;
7222 #endif
7223 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7224     case TARGET_NR_faccessat:
7225         if (!(p = lock_user_string(arg2))) {
7226             return -TARGET_EFAULT;
7227         }
7228         ret = get_errno(faccessat(arg1, p, arg3, 0));
7229         unlock_user(p, arg2, 0);
7230         return ret;
7231 #endif
7232 #ifdef TARGET_NR_nice /* not on alpha */
7233     case TARGET_NR_nice:
7234         return get_errno(nice(arg1));
7235 #endif
7236     case TARGET_NR_sync:
7237         sync();
7238         return 0;
7239 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7240     case TARGET_NR_syncfs:
7241         return get_errno(syncfs(arg1));
7242 #endif
7243     case TARGET_NR_kill:
7244         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7245 #ifdef TARGET_NR_rename
7246     case TARGET_NR_rename:
7247         {
7248             void *p2;
7249             p = lock_user_string(arg1);
7250             p2 = lock_user_string(arg2);
7251             if (!p || !p2)
7252                 ret = -TARGET_EFAULT;
7253             else
7254                 ret = get_errno(rename(p, p2));
7255             unlock_user(p2, arg2, 0);
7256             unlock_user(p, arg1, 0);
7257         }
7258         return ret;
7259 #endif
7260 #if defined(TARGET_NR_renameat)
7261     case TARGET_NR_renameat:
7262         {
7263             void *p2;
7264             p  = lock_user_string(arg2);
7265             p2 = lock_user_string(arg4);
7266             if (!p || !p2)
7267                 ret = -TARGET_EFAULT;
7268             else
7269                 ret = get_errno(renameat(arg1, p, arg3, p2));
7270             unlock_user(p2, arg4, 0);
7271             unlock_user(p, arg2, 0);
7272         }
7273         return ret;
7274 #endif
7275 #if defined(TARGET_NR_renameat2)
7276     case TARGET_NR_renameat2:
7277         {
7278             void *p2;
7279             p  = lock_user_string(arg2);
7280             p2 = lock_user_string(arg4);
7281             if (!p || !p2) {
7282                 ret = -TARGET_EFAULT;
7283             } else {
7284                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7285             }
7286             unlock_user(p2, arg4, 0);
7287             unlock_user(p, arg2, 0);
7288         }
7289         return ret;
7290 #endif
7291 #ifdef TARGET_NR_mkdir
7292     case TARGET_NR_mkdir:
7293         if (!(p = lock_user_string(arg1)))
7294             return -TARGET_EFAULT;
7295         ret = get_errno(mkdir(p, arg2));
7296         unlock_user(p, arg1, 0);
7297         return ret;
7298 #endif
7299 #if defined(TARGET_NR_mkdirat)
7300     case TARGET_NR_mkdirat:
7301         if (!(p = lock_user_string(arg2)))
7302             return -TARGET_EFAULT;
7303         ret = get_errno(mkdirat(arg1, p, arg3));
7304         unlock_user(p, arg2, 0);
7305         return ret;
7306 #endif
7307 #ifdef TARGET_NR_rmdir
7308     case TARGET_NR_rmdir:
7309         if (!(p = lock_user_string(arg1)))
7310             return -TARGET_EFAULT;
7311         ret = get_errno(rmdir(p));
7312         unlock_user(p, arg1, 0);
7313         return ret;
7314 #endif
7315     case TARGET_NR_dup:
7316         ret = get_errno(dup(arg1));
7317         if (ret >= 0) {
7318             fd_trans_dup(arg1, ret);
7319         }
7320         return ret;
7321 #ifdef TARGET_NR_pipe
7322     case TARGET_NR_pipe:
7323         return do_pipe(cpu_env, arg1, 0, 0);
7324 #endif
7325 #ifdef TARGET_NR_pipe2
7326     case TARGET_NR_pipe2:
7327         return do_pipe(cpu_env, arg1,
7328                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7329 #endif
7330     case TARGET_NR_times:
7331         {
7332             struct target_tms *tmsp;
7333             struct tms tms;
7334             ret = get_errno(times(&tms));
7335             if (arg1) {
7336                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7337                 if (!tmsp)
7338                     return -TARGET_EFAULT;
7339                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7340                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7341                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7342                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7343             }
7344             if (!is_error(ret))
7345                 ret = host_to_target_clock_t(ret);
7346         }
7347         return ret;
7348     case TARGET_NR_acct:
7349         if (arg1 == 0) {
7350             ret = get_errno(acct(NULL));
7351         } else {
7352             if (!(p = lock_user_string(arg1))) {
7353                 return -TARGET_EFAULT;
7354             }
7355             ret = get_errno(acct(path(p)));
7356             unlock_user(p, arg1, 0);
7357         }
7358         return ret;
7359 #ifdef TARGET_NR_umount2
7360     case TARGET_NR_umount2:
7361         if (!(p = lock_user_string(arg1)))
7362             return -TARGET_EFAULT;
7363         ret = get_errno(umount2(p, arg2));
7364         unlock_user(p, arg1, 0);
7365         return ret;
7366 #endif
7367     case TARGET_NR_ioctl:
7368         return do_ioctl(arg1, arg2, arg3);
7369 #ifdef TARGET_NR_fcntl
7370     case TARGET_NR_fcntl:
7371         return do_fcntl(arg1, arg2, arg3);
7372 #endif
7373     case TARGET_NR_setpgid:
7374         return get_errno(setpgid(arg1, arg2));
7375     case TARGET_NR_umask:
7376         return get_errno(umask(arg1));
7377     case TARGET_NR_chroot:
7378         if (!(p = lock_user_string(arg1)))
7379             return -TARGET_EFAULT;
7380         ret = get_errno(chroot(p));
7381         unlock_user(p, arg1, 0);
7382         return ret;
7383 #ifdef TARGET_NR_dup2
7384     case TARGET_NR_dup2:
7385         ret = get_errno(dup2(arg1, arg2));
7386         if (ret >= 0) {
7387             fd_trans_dup(arg1, arg2);
7388         }
7389         return ret;
7390 #endif
7391 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7392     case TARGET_NR_dup3:
7393     {
7394         int host_flags;
7395 
7396         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7397             return -EINVAL;
7398         }
7399         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7400         ret = get_errno(dup3(arg1, arg2, host_flags));
7401         if (ret >= 0) {
7402             fd_trans_dup(arg1, arg2);
7403         }
7404         return ret;
7405     }
7406 #endif
7407 #ifdef TARGET_NR_getppid /* not on alpha */
7408     case TARGET_NR_getppid:
7409         return get_errno(getppid());
7410 #endif
7411 #ifdef TARGET_NR_getpgrp
7412     case TARGET_NR_getpgrp:
7413         return get_errno(getpgrp());
7414 #endif
7415     case TARGET_NR_setsid:
7416         return get_errno(setsid());
7417 #ifdef TARGET_NR_sigaction
7418     case TARGET_NR_sigaction:
7419         {
7420 #if defined(TARGET_ALPHA)
7421             struct target_sigaction act, oact, *pact = 0;
7422             struct target_old_sigaction *old_act;
7423             if (arg2) {
7424                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7425                     return -TARGET_EFAULT;
7426                 act._sa_handler = old_act->_sa_handler;
7427                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7428                 act.sa_flags = old_act->sa_flags;
7429                 act.sa_restorer = 0;
7430                 unlock_user_struct(old_act, arg2, 0);
7431                 pact = &act;
7432             }
7433             ret = get_errno(do_sigaction(arg1, pact, &oact));
7434             if (!is_error(ret) && arg3) {
7435                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7436                     return -TARGET_EFAULT;
7437                 old_act->_sa_handler = oact._sa_handler;
7438                 old_act->sa_mask = oact.sa_mask.sig[0];
7439                 old_act->sa_flags = oact.sa_flags;
7440                 unlock_user_struct(old_act, arg3, 1);
7441             }
7442 #elif defined(TARGET_MIPS)
7443 	    struct target_sigaction act, oact, *pact, *old_act;
7444 
7445 	    if (arg2) {
7446                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7447                     return -TARGET_EFAULT;
7448 		act._sa_handler = old_act->_sa_handler;
7449 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7450 		act.sa_flags = old_act->sa_flags;
7451 		unlock_user_struct(old_act, arg2, 0);
7452 		pact = &act;
7453 	    } else {
7454 		pact = NULL;
7455 	    }
7456 
7457 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7458 
7459 	    if (!is_error(ret) && arg3) {
7460                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7461                     return -TARGET_EFAULT;
7462 		old_act->_sa_handler = oact._sa_handler;
7463 		old_act->sa_flags = oact.sa_flags;
7464 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7465 		old_act->sa_mask.sig[1] = 0;
7466 		old_act->sa_mask.sig[2] = 0;
7467 		old_act->sa_mask.sig[3] = 0;
7468 		unlock_user_struct(old_act, arg3, 1);
7469 	    }
7470 #else
7471             struct target_old_sigaction *old_act;
7472             struct target_sigaction act, oact, *pact;
7473             if (arg2) {
7474                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7475                     return -TARGET_EFAULT;
7476                 act._sa_handler = old_act->_sa_handler;
7477                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7478                 act.sa_flags = old_act->sa_flags;
7479                 act.sa_restorer = old_act->sa_restorer;
7480 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7481                 act.ka_restorer = 0;
7482 #endif
7483                 unlock_user_struct(old_act, arg2, 0);
7484                 pact = &act;
7485             } else {
7486                 pact = NULL;
7487             }
7488             ret = get_errno(do_sigaction(arg1, pact, &oact));
7489             if (!is_error(ret) && arg3) {
7490                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7491                     return -TARGET_EFAULT;
7492                 old_act->_sa_handler = oact._sa_handler;
7493                 old_act->sa_mask = oact.sa_mask.sig[0];
7494                 old_act->sa_flags = oact.sa_flags;
7495                 old_act->sa_restorer = oact.sa_restorer;
7496                 unlock_user_struct(old_act, arg3, 1);
7497             }
7498 #endif
7499         }
7500         return ret;
7501 #endif
7502     case TARGET_NR_rt_sigaction:
7503         {
7504 #if defined(TARGET_ALPHA)
7505             /* For Alpha and SPARC this is a 5 argument syscall, with
7506              * a 'restorer' parameter which must be copied into the
7507              * sa_restorer field of the sigaction struct.
7508              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7509              * and arg5 is the sigsetsize.
7510              * Alpha also has a separate rt_sigaction struct that it uses
7511              * here; SPARC uses the usual sigaction struct.
7512              */
7513             struct target_rt_sigaction *rt_act;
7514             struct target_sigaction act, oact, *pact = 0;
7515 
7516             if (arg4 != sizeof(target_sigset_t)) {
7517                 return -TARGET_EINVAL;
7518             }
7519             if (arg2) {
7520                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7521                     return -TARGET_EFAULT;
7522                 act._sa_handler = rt_act->_sa_handler;
7523                 act.sa_mask = rt_act->sa_mask;
7524                 act.sa_flags = rt_act->sa_flags;
7525                 act.sa_restorer = arg5;
7526                 unlock_user_struct(rt_act, arg2, 0);
7527                 pact = &act;
7528             }
7529             ret = get_errno(do_sigaction(arg1, pact, &oact));
7530             if (!is_error(ret) && arg3) {
7531                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7532                     return -TARGET_EFAULT;
7533                 rt_act->_sa_handler = oact._sa_handler;
7534                 rt_act->sa_mask = oact.sa_mask;
7535                 rt_act->sa_flags = oact.sa_flags;
7536                 unlock_user_struct(rt_act, arg3, 1);
7537             }
7538 #else
7539 #ifdef TARGET_SPARC
7540             target_ulong restorer = arg4;
7541             target_ulong sigsetsize = arg5;
7542 #else
7543             target_ulong sigsetsize = arg4;
7544 #endif
7545             struct target_sigaction *act;
7546             struct target_sigaction *oact;
7547 
7548             if (sigsetsize != sizeof(target_sigset_t)) {
7549                 return -TARGET_EINVAL;
7550             }
7551             if (arg2) {
7552                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7553                     return -TARGET_EFAULT;
7554                 }
7555 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7556                 act->ka_restorer = restorer;
7557 #endif
7558             } else {
7559                 act = NULL;
7560             }
7561             if (arg3) {
7562                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7563                     ret = -TARGET_EFAULT;
7564                     goto rt_sigaction_fail;
7565                 }
7566             } else
7567                 oact = NULL;
7568             ret = get_errno(do_sigaction(arg1, act, oact));
7569 	rt_sigaction_fail:
7570             if (act)
7571                 unlock_user_struct(act, arg2, 0);
7572             if (oact)
7573                 unlock_user_struct(oact, arg3, 1);
7574 #endif
7575         }
7576         return ret;
7577 #ifdef TARGET_NR_sgetmask /* not on alpha */
7578     case TARGET_NR_sgetmask:
7579         {
7580             sigset_t cur_set;
7581             abi_ulong target_set;
7582             ret = do_sigprocmask(0, NULL, &cur_set);
7583             if (!ret) {
7584                 host_to_target_old_sigset(&target_set, &cur_set);
7585                 ret = target_set;
7586             }
7587         }
7588         return ret;
7589 #endif
7590 #ifdef TARGET_NR_ssetmask /* not on alpha */
7591     case TARGET_NR_ssetmask:
7592         {
7593             sigset_t set, oset;
7594             abi_ulong target_set = arg1;
7595             target_to_host_old_sigset(&set, &target_set);
7596             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7597             if (!ret) {
7598                 host_to_target_old_sigset(&target_set, &oset);
7599                 ret = target_set;
7600             }
7601         }
7602         return ret;
7603 #endif
7604 #ifdef TARGET_NR_sigprocmask
7605     case TARGET_NR_sigprocmask:
7606         {
7607 #if defined(TARGET_ALPHA)
7608             sigset_t set, oldset;
7609             abi_ulong mask;
7610             int how;
7611 
7612             switch (arg1) {
7613             case TARGET_SIG_BLOCK:
7614                 how = SIG_BLOCK;
7615                 break;
7616             case TARGET_SIG_UNBLOCK:
7617                 how = SIG_UNBLOCK;
7618                 break;
7619             case TARGET_SIG_SETMASK:
7620                 how = SIG_SETMASK;
7621                 break;
7622             default:
7623                 return -TARGET_EINVAL;
7624             }
7625             mask = arg2;
7626             target_to_host_old_sigset(&set, &mask);
7627 
7628             ret = do_sigprocmask(how, &set, &oldset);
7629             if (!is_error(ret)) {
7630                 host_to_target_old_sigset(&mask, &oldset);
7631                 ret = mask;
7632                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7633             }
7634 #else
7635             sigset_t set, oldset, *set_ptr;
7636             int how;
7637 
7638             if (arg2) {
7639                 switch (arg1) {
7640                 case TARGET_SIG_BLOCK:
7641                     how = SIG_BLOCK;
7642                     break;
7643                 case TARGET_SIG_UNBLOCK:
7644                     how = SIG_UNBLOCK;
7645                     break;
7646                 case TARGET_SIG_SETMASK:
7647                     how = SIG_SETMASK;
7648                     break;
7649                 default:
7650                     return -TARGET_EINVAL;
7651                 }
7652                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7653                     return -TARGET_EFAULT;
7654                 target_to_host_old_sigset(&set, p);
7655                 unlock_user(p, arg2, 0);
7656                 set_ptr = &set;
7657             } else {
7658                 how = 0;
7659                 set_ptr = NULL;
7660             }
7661             ret = do_sigprocmask(how, set_ptr, &oldset);
7662             if (!is_error(ret) && arg3) {
7663                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7664                     return -TARGET_EFAULT;
7665                 host_to_target_old_sigset(p, &oldset);
7666                 unlock_user(p, arg3, sizeof(target_sigset_t));
7667             }
7668 #endif
7669         }
7670         return ret;
7671 #endif
7672     case TARGET_NR_rt_sigprocmask:
7673         {
7674             int how = arg1;
7675             sigset_t set, oldset, *set_ptr;
7676 
7677             if (arg4 != sizeof(target_sigset_t)) {
7678                 return -TARGET_EINVAL;
7679             }
7680 
7681             if (arg2) {
7682                 switch(how) {
7683                 case TARGET_SIG_BLOCK:
7684                     how = SIG_BLOCK;
7685                     break;
7686                 case TARGET_SIG_UNBLOCK:
7687                     how = SIG_UNBLOCK;
7688                     break;
7689                 case TARGET_SIG_SETMASK:
7690                     how = SIG_SETMASK;
7691                     break;
7692                 default:
7693                     return -TARGET_EINVAL;
7694                 }
7695                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7696                     return -TARGET_EFAULT;
7697                 target_to_host_sigset(&set, p);
7698                 unlock_user(p, arg2, 0);
7699                 set_ptr = &set;
7700             } else {
7701                 how = 0;
7702                 set_ptr = NULL;
7703             }
7704             ret = do_sigprocmask(how, set_ptr, &oldset);
7705             if (!is_error(ret) && arg3) {
7706                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7707                     return -TARGET_EFAULT;
7708                 host_to_target_sigset(p, &oldset);
7709                 unlock_user(p, arg3, sizeof(target_sigset_t));
7710             }
7711         }
7712         return ret;
7713 #ifdef TARGET_NR_sigpending
7714     case TARGET_NR_sigpending:
7715         {
7716             sigset_t set;
7717             ret = get_errno(sigpending(&set));
7718             if (!is_error(ret)) {
7719                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7720                     return -TARGET_EFAULT;
7721                 host_to_target_old_sigset(p, &set);
7722                 unlock_user(p, arg1, sizeof(target_sigset_t));
7723             }
7724         }
7725         return ret;
7726 #endif
7727     case TARGET_NR_rt_sigpending:
7728         {
7729             sigset_t set;
7730 
7731             /* Yes, this check is >, not != like most. We follow the kernel's
7732              * logic and it does it like this because it implements
7733              * NR_sigpending through the same code path, and in that case
7734              * the old_sigset_t is smaller in size.
7735              */
7736             if (arg2 > sizeof(target_sigset_t)) {
7737                 return -TARGET_EINVAL;
7738             }
7739 
7740             ret = get_errno(sigpending(&set));
7741             if (!is_error(ret)) {
7742                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7743                     return -TARGET_EFAULT;
7744                 host_to_target_sigset(p, &set);
7745                 unlock_user(p, arg1, sizeof(target_sigset_t));
7746             }
7747         }
7748         return ret;
7749 #ifdef TARGET_NR_sigsuspend
7750     case TARGET_NR_sigsuspend:
7751         {
7752             TaskState *ts = cpu->opaque;
7753 #if defined(TARGET_ALPHA)
7754             abi_ulong mask = arg1;
7755             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7756 #else
7757             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7758                 return -TARGET_EFAULT;
7759             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7760             unlock_user(p, arg1, 0);
7761 #endif
7762             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7763                                                SIGSET_T_SIZE));
7764             if (ret != -TARGET_ERESTARTSYS) {
7765                 ts->in_sigsuspend = 1;
7766             }
7767         }
7768         return ret;
7769 #endif
7770     case TARGET_NR_rt_sigsuspend:
7771         {
7772             TaskState *ts = cpu->opaque;
7773 
7774             if (arg2 != sizeof(target_sigset_t)) {
7775                 return -TARGET_EINVAL;
7776             }
7777             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7778                 return -TARGET_EFAULT;
7779             target_to_host_sigset(&ts->sigsuspend_mask, p);
7780             unlock_user(p, arg1, 0);
7781             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7782                                                SIGSET_T_SIZE));
7783             if (ret != -TARGET_ERESTARTSYS) {
7784                 ts->in_sigsuspend = 1;
7785             }
7786         }
7787         return ret;
7788     case TARGET_NR_rt_sigtimedwait:
7789         {
7790             sigset_t set;
7791             struct timespec uts, *puts;
7792             siginfo_t uinfo;
7793 
7794             if (arg4 != sizeof(target_sigset_t)) {
7795                 return -TARGET_EINVAL;
7796             }
7797 
7798             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7799                 return -TARGET_EFAULT;
7800             target_to_host_sigset(&set, p);
7801             unlock_user(p, arg1, 0);
7802             if (arg3) {
7803                 puts = &uts;
7804                 target_to_host_timespec(puts, arg3);
7805             } else {
7806                 puts = NULL;
7807             }
7808             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7809                                                  SIGSET_T_SIZE));
7810             if (!is_error(ret)) {
7811                 if (arg2) {
7812                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7813                                   0);
7814                     if (!p) {
7815                         return -TARGET_EFAULT;
7816                     }
7817                     host_to_target_siginfo(p, &uinfo);
7818                     unlock_user(p, arg2, sizeof(target_siginfo_t));
7819                 }
7820                 ret = host_to_target_signal(ret);
7821             }
7822         }
7823         return ret;
7824     case TARGET_NR_rt_sigqueueinfo:
7825         {
7826             siginfo_t uinfo;
7827 
7828             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
7829             if (!p) {
7830                 return -TARGET_EFAULT;
7831             }
7832             target_to_host_siginfo(&uinfo, p);
7833             unlock_user(p, arg3, 0);
7834             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7835         }
7836         return ret;
7837     case TARGET_NR_rt_tgsigqueueinfo:
7838         {
7839             siginfo_t uinfo;
7840 
7841             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
7842             if (!p) {
7843                 return -TARGET_EFAULT;
7844             }
7845             target_to_host_siginfo(&uinfo, p);
7846             unlock_user(p, arg4, 0);
7847             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
7848         }
7849         return ret;
7850 #ifdef TARGET_NR_sigreturn
7851     case TARGET_NR_sigreturn:
7852         if (block_signals()) {
7853             return -TARGET_ERESTARTSYS;
7854         }
7855         return do_sigreturn(cpu_env);
7856 #endif
7857     case TARGET_NR_rt_sigreturn:
7858         if (block_signals()) {
7859             return -TARGET_ERESTARTSYS;
7860         }
7861         return do_rt_sigreturn(cpu_env);
7862     case TARGET_NR_sethostname:
7863         if (!(p = lock_user_string(arg1)))
7864             return -TARGET_EFAULT;
7865         ret = get_errno(sethostname(p, arg2));
7866         unlock_user(p, arg1, 0);
7867         return ret;
7868 #ifdef TARGET_NR_setrlimit
7869     case TARGET_NR_setrlimit:
7870         {
7871             int resource = target_to_host_resource(arg1);
7872             struct target_rlimit *target_rlim;
7873             struct rlimit rlim;
7874             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7875                 return -TARGET_EFAULT;
7876             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7877             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7878             unlock_user_struct(target_rlim, arg2, 0);
7879             return get_errno(setrlimit(resource, &rlim));
7880         }
7881 #endif
7882 #ifdef TARGET_NR_getrlimit
7883     case TARGET_NR_getrlimit:
7884         {
7885             int resource = target_to_host_resource(arg1);
7886             struct target_rlimit *target_rlim;
7887             struct rlimit rlim;
7888 
7889             ret = get_errno(getrlimit(resource, &rlim));
7890             if (!is_error(ret)) {
7891                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7892                     return -TARGET_EFAULT;
7893                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7894                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7895                 unlock_user_struct(target_rlim, arg2, 1);
7896             }
7897         }
7898         return ret;
7899 #endif
7900     case TARGET_NR_getrusage:
7901         {
7902             struct rusage rusage;
7903             ret = get_errno(getrusage(arg1, &rusage));
7904             if (!is_error(ret)) {
7905                 ret = host_to_target_rusage(arg2, &rusage);
7906             }
7907         }
7908         return ret;
7909     case TARGET_NR_gettimeofday:
7910         {
7911             struct timeval tv;
7912             ret = get_errno(gettimeofday(&tv, NULL));
7913             if (!is_error(ret)) {
7914                 if (copy_to_user_timeval(arg1, &tv))
7915                     return -TARGET_EFAULT;
7916             }
7917         }
7918         return ret;
7919     case TARGET_NR_settimeofday:
7920         {
7921             struct timeval tv, *ptv = NULL;
7922             struct timezone tz, *ptz = NULL;
7923 
7924             if (arg1) {
7925                 if (copy_from_user_timeval(&tv, arg1)) {
7926                     return -TARGET_EFAULT;
7927                 }
7928                 ptv = &tv;
7929             }
7930 
7931             if (arg2) {
7932                 if (copy_from_user_timezone(&tz, arg2)) {
7933                     return -TARGET_EFAULT;
7934                 }
7935                 ptz = &tz;
7936             }
7937 
7938             return get_errno(settimeofday(ptv, ptz));
7939         }
7940 #if defined(TARGET_NR_select)
7941     case TARGET_NR_select:
7942 #if defined(TARGET_WANT_NI_OLD_SELECT)
7943         /* some architectures used to have old_select here
7944          * but now ENOSYS it.
7945          */
7946         ret = -TARGET_ENOSYS;
7947 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
7948         ret = do_old_select(arg1);
7949 #else
7950         ret = do_select(arg1, arg2, arg3, arg4, arg5);
7951 #endif
7952         return ret;
7953 #endif
7954 #ifdef TARGET_NR_pselect6
7955     case TARGET_NR_pselect6:
7956         {
7957             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7958             fd_set rfds, wfds, efds;
7959             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7960             struct timespec ts, *ts_ptr;
7961 
7962             /*
7963              * The 6th arg is actually two args smashed together,
7964              * so we cannot use the C library.
7965              */
7966             sigset_t set;
7967             struct {
7968                 sigset_t *set;
7969                 size_t size;
7970             } sig, *sig_ptr;
7971 
7972             abi_ulong arg_sigset, arg_sigsize, *arg7;
7973             target_sigset_t *target_sigset;
7974 
7975             n = arg1;
7976             rfd_addr = arg2;
7977             wfd_addr = arg3;
7978             efd_addr = arg4;
7979             ts_addr = arg5;
7980 
7981             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7982             if (ret) {
7983                 return ret;
7984             }
7985             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7986             if (ret) {
7987                 return ret;
7988             }
7989             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7990             if (ret) {
7991                 return ret;
7992             }
7993 
7994             /*
7995              * This takes a timespec, and not a timeval, so we cannot
7996              * use the do_select() helper ...
7997              */
7998             if (ts_addr) {
7999                 if (target_to_host_timespec(&ts, ts_addr)) {
8000                     return -TARGET_EFAULT;
8001                 }
8002                 ts_ptr = &ts;
8003             } else {
8004                 ts_ptr = NULL;
8005             }
8006 
8007             /* Extract the two packed args for the sigset */
8008             if (arg6) {
8009                 sig_ptr = &sig;
8010                 sig.size = SIGSET_T_SIZE;
8011 
8012                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8013                 if (!arg7) {
8014                     return -TARGET_EFAULT;
8015                 }
8016                 arg_sigset = tswapal(arg7[0]);
8017                 arg_sigsize = tswapal(arg7[1]);
8018                 unlock_user(arg7, arg6, 0);
8019 
8020                 if (arg_sigset) {
8021                     sig.set = &set;
8022                     if (arg_sigsize != sizeof(*target_sigset)) {
8023                         /* Like the kernel, we enforce correct size sigsets */
8024                         return -TARGET_EINVAL;
8025                     }
8026                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8027                                               sizeof(*target_sigset), 1);
8028                     if (!target_sigset) {
8029                         return -TARGET_EFAULT;
8030                     }
8031                     target_to_host_sigset(&set, target_sigset);
8032                     unlock_user(target_sigset, arg_sigset, 0);
8033                 } else {
8034                     sig.set = NULL;
8035                 }
8036             } else {
8037                 sig_ptr = NULL;
8038             }
8039 
8040             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8041                                           ts_ptr, sig_ptr));
8042 
8043             if (!is_error(ret)) {
8044                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8045                     return -TARGET_EFAULT;
8046                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8047                     return -TARGET_EFAULT;
8048                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8049                     return -TARGET_EFAULT;
8050 
8051                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8052                     return -TARGET_EFAULT;
8053             }
8054         }
8055         return ret;
8056 #endif
8057 #ifdef TARGET_NR_symlink
8058     case TARGET_NR_symlink:
8059         {
8060             void *p2;
8061             p = lock_user_string(arg1);
8062             p2 = lock_user_string(arg2);
8063             if (!p || !p2)
8064                 ret = -TARGET_EFAULT;
8065             else
8066                 ret = get_errno(symlink(p, p2));
8067             unlock_user(p2, arg2, 0);
8068             unlock_user(p, arg1, 0);
8069         }
8070         return ret;
8071 #endif
8072 #if defined(TARGET_NR_symlinkat)
8073     case TARGET_NR_symlinkat:
8074         {
8075             void *p2;
8076             p  = lock_user_string(arg1);
8077             p2 = lock_user_string(arg3);
8078             if (!p || !p2)
8079                 ret = -TARGET_EFAULT;
8080             else
8081                 ret = get_errno(symlinkat(p, arg2, p2));
8082             unlock_user(p2, arg3, 0);
8083             unlock_user(p, arg1, 0);
8084         }
8085         return ret;
8086 #endif
8087 #ifdef TARGET_NR_readlink
8088     case TARGET_NR_readlink:
8089         {
8090             void *p2;
8091             p = lock_user_string(arg1);
8092             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8093             if (!p || !p2) {
8094                 ret = -TARGET_EFAULT;
8095             } else if (!arg3) {
8096                 /* Short circuit this for the magic exe check. */
8097                 ret = -TARGET_EINVAL;
8098             } else if (is_proc_myself((const char *)p, "exe")) {
8099                 char real[PATH_MAX], *temp;
8100                 temp = realpath(exec_path, real);
8101                 /* Return value is # of bytes that we wrote to the buffer. */
8102                 if (temp == NULL) {
8103                     ret = get_errno(-1);
8104                 } else {
8105                     /* Don't worry about sign mismatch as earlier mapping
8106                      * logic would have thrown a bad address error. */
8107                     ret = MIN(strlen(real), arg3);
8108                     /* We cannot NUL terminate the string. */
8109                     memcpy(p2, real, ret);
8110                 }
8111             } else {
8112                 ret = get_errno(readlink(path(p), p2, arg3));
8113             }
8114             unlock_user(p2, arg2, ret);
8115             unlock_user(p, arg1, 0);
8116         }
8117         return ret;
8118 #endif
8119 #if defined(TARGET_NR_readlinkat)
8120     case TARGET_NR_readlinkat:
8121         {
8122             void *p2;
8123             p  = lock_user_string(arg2);
8124             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8125             if (!p || !p2) {
8126                 ret = -TARGET_EFAULT;
8127             } else if (is_proc_myself((const char *)p, "exe")) {
8128                 char real[PATH_MAX], *temp;
8129                 temp = realpath(exec_path, real);
8130                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8131                 snprintf((char *)p2, arg4, "%s", real);
8132             } else {
8133                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8134             }
8135             unlock_user(p2, arg3, ret);
8136             unlock_user(p, arg2, 0);
8137         }
8138         return ret;
8139 #endif
8140 #ifdef TARGET_NR_swapon
8141     case TARGET_NR_swapon:
8142         if (!(p = lock_user_string(arg1)))
8143             return -TARGET_EFAULT;
8144         ret = get_errno(swapon(p, arg2));
8145         unlock_user(p, arg1, 0);
8146         return ret;
8147 #endif
8148     case TARGET_NR_reboot:
8149         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8150            /* arg4 must be ignored in all other cases */
8151            p = lock_user_string(arg4);
8152            if (!p) {
8153                return -TARGET_EFAULT;
8154            }
8155            ret = get_errno(reboot(arg1, arg2, arg3, p));
8156            unlock_user(p, arg4, 0);
8157         } else {
8158            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8159         }
8160         return ret;
8161 #ifdef TARGET_NR_mmap
8162     case TARGET_NR_mmap:
8163 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8164     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8165     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8166     || defined(TARGET_S390X)
8167         {
8168             abi_ulong *v;
8169             abi_ulong v1, v2, v3, v4, v5, v6;
8170             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8171                 return -TARGET_EFAULT;
8172             v1 = tswapal(v[0]);
8173             v2 = tswapal(v[1]);
8174             v3 = tswapal(v[2]);
8175             v4 = tswapal(v[3]);
8176             v5 = tswapal(v[4]);
8177             v6 = tswapal(v[5]);
8178             unlock_user(v, arg1, 0);
8179             ret = get_errno(target_mmap(v1, v2, v3,
8180                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8181                                         v5, v6));
8182         }
8183 #else
8184         ret = get_errno(target_mmap(arg1, arg2, arg3,
8185                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8186                                     arg5,
8187                                     arg6));
8188 #endif
8189         return ret;
8190 #endif
8191 #ifdef TARGET_NR_mmap2
8192     case TARGET_NR_mmap2:
8193 #ifndef MMAP_SHIFT
8194 #define MMAP_SHIFT 12
8195 #endif
8196         ret = target_mmap(arg1, arg2, arg3,
8197                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8198                           arg5, arg6 << MMAP_SHIFT);
8199         return get_errno(ret);
8200 #endif
8201     case TARGET_NR_munmap:
8202         return get_errno(target_munmap(arg1, arg2));
8203     case TARGET_NR_mprotect:
8204         {
8205             TaskState *ts = cpu->opaque;
8206             /* Special hack to detect libc making the stack executable.  */
8207             if ((arg3 & PROT_GROWSDOWN)
8208                 && arg1 >= ts->info->stack_limit
8209                 && arg1 <= ts->info->start_stack) {
8210                 arg3 &= ~PROT_GROWSDOWN;
8211                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8212                 arg1 = ts->info->stack_limit;
8213             }
8214         }
8215         return get_errno(target_mprotect(arg1, arg2, arg3));
8216 #ifdef TARGET_NR_mremap
8217     case TARGET_NR_mremap:
8218         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8219 #endif
8220         /* ??? msync/mlock/munlock are broken for softmmu.  */
8221 #ifdef TARGET_NR_msync
8222     case TARGET_NR_msync:
8223         return get_errno(msync(g2h(arg1), arg2, arg3));
8224 #endif
8225 #ifdef TARGET_NR_mlock
8226     case TARGET_NR_mlock:
8227         return get_errno(mlock(g2h(arg1), arg2));
8228 #endif
8229 #ifdef TARGET_NR_munlock
8230     case TARGET_NR_munlock:
8231         return get_errno(munlock(g2h(arg1), arg2));
8232 #endif
8233 #ifdef TARGET_NR_mlockall
8234     case TARGET_NR_mlockall:
8235         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8236 #endif
8237 #ifdef TARGET_NR_munlockall
8238     case TARGET_NR_munlockall:
8239         return get_errno(munlockall());
8240 #endif
8241 #ifdef TARGET_NR_truncate
8242     case TARGET_NR_truncate:
8243         if (!(p = lock_user_string(arg1)))
8244             return -TARGET_EFAULT;
8245         ret = get_errno(truncate(p, arg2));
8246         unlock_user(p, arg1, 0);
8247         return ret;
8248 #endif
8249 #ifdef TARGET_NR_ftruncate
8250     case TARGET_NR_ftruncate:
8251         return get_errno(ftruncate(arg1, arg2));
8252 #endif
8253     case TARGET_NR_fchmod:
8254         return get_errno(fchmod(arg1, arg2));
8255 #if defined(TARGET_NR_fchmodat)
8256     case TARGET_NR_fchmodat:
8257         if (!(p = lock_user_string(arg2)))
8258             return -TARGET_EFAULT;
8259         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8260         unlock_user(p, arg2, 0);
8261         return ret;
8262 #endif
8263     case TARGET_NR_getpriority:
8264         /* Note that negative values are valid for getpriority, so we must
8265            differentiate based on errno settings.  */
8266         errno = 0;
8267         ret = getpriority(arg1, arg2);
8268         if (ret == -1 && errno != 0) {
8269             return -host_to_target_errno(errno);
8270         }
8271 #ifdef TARGET_ALPHA
8272         /* Return value is the unbiased priority.  Signal no error.  */
8273         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8274 #else
8275         /* Return value is a biased priority to avoid negative numbers.  */
8276         ret = 20 - ret;
8277 #endif
8278         return ret;
8279     case TARGET_NR_setpriority:
8280         return get_errno(setpriority(arg1, arg2, arg3));
8281 #ifdef TARGET_NR_statfs
8282     case TARGET_NR_statfs:
8283         if (!(p = lock_user_string(arg1))) {
8284             return -TARGET_EFAULT;
8285         }
8286         ret = get_errno(statfs(path(p), &stfs));
8287         unlock_user(p, arg1, 0);
8288     convert_statfs:
8289         if (!is_error(ret)) {
8290             struct target_statfs *target_stfs;
8291 
8292             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8293                 return -TARGET_EFAULT;
8294             __put_user(stfs.f_type, &target_stfs->f_type);
8295             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8296             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8297             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8298             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8299             __put_user(stfs.f_files, &target_stfs->f_files);
8300             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8301             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8302             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8303             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8304             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8305 #ifdef _STATFS_F_FLAGS
8306             __put_user(stfs.f_flags, &target_stfs->f_flags);
8307 #else
8308             __put_user(0, &target_stfs->f_flags);
8309 #endif
8310             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8311             unlock_user_struct(target_stfs, arg2, 1);
8312         }
8313         return ret;
8314 #endif
8315 #ifdef TARGET_NR_fstatfs
8316     case TARGET_NR_fstatfs:
8317         ret = get_errno(fstatfs(arg1, &stfs));
8318         goto convert_statfs;
8319 #endif
8320 #ifdef TARGET_NR_statfs64
8321     case TARGET_NR_statfs64:
8322         if (!(p = lock_user_string(arg1))) {
8323             return -TARGET_EFAULT;
8324         }
8325         ret = get_errno(statfs(path(p), &stfs));
8326         unlock_user(p, arg1, 0);
8327     convert_statfs64:
8328         if (!is_error(ret)) {
8329             struct target_statfs64 *target_stfs;
8330 
8331             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8332                 return -TARGET_EFAULT;
8333             __put_user(stfs.f_type, &target_stfs->f_type);
8334             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8335             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8336             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8337             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8338             __put_user(stfs.f_files, &target_stfs->f_files);
8339             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8340             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8341             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8342             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8343             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8344             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8345             unlock_user_struct(target_stfs, arg3, 1);
8346         }
8347         return ret;
8348     case TARGET_NR_fstatfs64:
8349         ret = get_errno(fstatfs(arg1, &stfs));
8350         goto convert_statfs64;
8351 #endif
8352 #ifdef TARGET_NR_socketcall
8353     case TARGET_NR_socketcall:
8354         return do_socketcall(arg1, arg2);
8355 #endif
8356 #ifdef TARGET_NR_accept
8357     case TARGET_NR_accept:
8358         return do_accept4(arg1, arg2, arg3, 0);
8359 #endif
8360 #ifdef TARGET_NR_accept4
8361     case TARGET_NR_accept4:
8362         return do_accept4(arg1, arg2, arg3, arg4);
8363 #endif
8364 #ifdef TARGET_NR_bind
8365     case TARGET_NR_bind:
8366         return do_bind(arg1, arg2, arg3);
8367 #endif
8368 #ifdef TARGET_NR_connect
8369     case TARGET_NR_connect:
8370         return do_connect(arg1, arg2, arg3);
8371 #endif
8372 #ifdef TARGET_NR_getpeername
8373     case TARGET_NR_getpeername:
8374         return do_getpeername(arg1, arg2, arg3);
8375 #endif
8376 #ifdef TARGET_NR_getsockname
8377     case TARGET_NR_getsockname:
8378         return do_getsockname(arg1, arg2, arg3);
8379 #endif
8380 #ifdef TARGET_NR_getsockopt
8381     case TARGET_NR_getsockopt:
8382         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8383 #endif
8384 #ifdef TARGET_NR_listen
8385     case TARGET_NR_listen:
8386         return get_errno(listen(arg1, arg2));
8387 #endif
8388 #ifdef TARGET_NR_recv
8389     case TARGET_NR_recv:
8390         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8391 #endif
8392 #ifdef TARGET_NR_recvfrom
8393     case TARGET_NR_recvfrom:
8394         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8395 #endif
8396 #ifdef TARGET_NR_recvmsg
8397     case TARGET_NR_recvmsg:
8398         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8399 #endif
8400 #ifdef TARGET_NR_send
8401     case TARGET_NR_send:
8402         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8403 #endif
8404 #ifdef TARGET_NR_sendmsg
8405     case TARGET_NR_sendmsg:
8406         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8407 #endif
8408 #ifdef TARGET_NR_sendmmsg
8409     case TARGET_NR_sendmmsg:
8410         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8411     case TARGET_NR_recvmmsg:
8412         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8413 #endif
8414 #ifdef TARGET_NR_sendto
8415     case TARGET_NR_sendto:
8416         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8417 #endif
8418 #ifdef TARGET_NR_shutdown
8419     case TARGET_NR_shutdown:
8420         return get_errno(shutdown(arg1, arg2));
8421 #endif
8422 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8423     case TARGET_NR_getrandom:
8424         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8425         if (!p) {
8426             return -TARGET_EFAULT;
8427         }
8428         ret = get_errno(getrandom(p, arg2, arg3));
8429         unlock_user(p, arg1, ret);
8430         return ret;
8431 #endif
8432 #ifdef TARGET_NR_socket
8433     case TARGET_NR_socket:
8434         return do_socket(arg1, arg2, arg3);
8435 #endif
8436 #ifdef TARGET_NR_socketpair
8437     case TARGET_NR_socketpair:
8438         return do_socketpair(arg1, arg2, arg3, arg4);
8439 #endif
8440 #ifdef TARGET_NR_setsockopt
8441     case TARGET_NR_setsockopt:
8442         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8443 #endif
8444 #if defined(TARGET_NR_syslog)
8445     case TARGET_NR_syslog:
8446         {
8447             int len = arg2;
8448 
8449             switch (arg1) {
8450             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8451             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8452             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8453             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8454             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8455             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8456             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8457             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8458                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8459             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8460             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8461             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8462                 {
8463                     if (len < 0) {
8464                         return -TARGET_EINVAL;
8465                     }
8466                     if (len == 0) {
8467                         return 0;
8468                     }
8469                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8470                     if (!p) {
8471                         return -TARGET_EFAULT;
8472                     }
8473                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8474                     unlock_user(p, arg2, arg3);
8475                 }
8476                 return ret;
8477             default:
8478                 return -TARGET_EINVAL;
8479             }
8480         }
8481         break;
8482 #endif
8483     case TARGET_NR_setitimer:
8484         {
8485             struct itimerval value, ovalue, *pvalue;
8486 
8487             if (arg2) {
8488                 pvalue = &value;
8489                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8490                     || copy_from_user_timeval(&pvalue->it_value,
8491                                               arg2 + sizeof(struct target_timeval)))
8492                     return -TARGET_EFAULT;
8493             } else {
8494                 pvalue = NULL;
8495             }
8496             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8497             if (!is_error(ret) && arg3) {
8498                 if (copy_to_user_timeval(arg3,
8499                                          &ovalue.it_interval)
8500                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8501                                             &ovalue.it_value))
8502                     return -TARGET_EFAULT;
8503             }
8504         }
8505         return ret;
8506     case TARGET_NR_getitimer:
8507         {
8508             struct itimerval value;
8509 
8510             ret = get_errno(getitimer(arg1, &value));
8511             if (!is_error(ret) && arg2) {
8512                 if (copy_to_user_timeval(arg2,
8513                                          &value.it_interval)
8514                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8515                                             &value.it_value))
8516                     return -TARGET_EFAULT;
8517             }
8518         }
8519         return ret;
8520 #ifdef TARGET_NR_stat
8521     case TARGET_NR_stat:
8522         if (!(p = lock_user_string(arg1))) {
8523             return -TARGET_EFAULT;
8524         }
8525         ret = get_errno(stat(path(p), &st));
8526         unlock_user(p, arg1, 0);
8527         goto do_stat;
8528 #endif
8529 #ifdef TARGET_NR_lstat
8530     case TARGET_NR_lstat:
8531         if (!(p = lock_user_string(arg1))) {
8532             return -TARGET_EFAULT;
8533         }
8534         ret = get_errno(lstat(path(p), &st));
8535         unlock_user(p, arg1, 0);
8536         goto do_stat;
8537 #endif
8538 #ifdef TARGET_NR_fstat
8539     case TARGET_NR_fstat:
8540         {
8541             ret = get_errno(fstat(arg1, &st));
8542 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8543         do_stat:
8544 #endif
8545             if (!is_error(ret)) {
8546                 struct target_stat *target_st;
8547 
8548                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8549                     return -TARGET_EFAULT;
8550                 memset(target_st, 0, sizeof(*target_st));
8551                 __put_user(st.st_dev, &target_st->st_dev);
8552                 __put_user(st.st_ino, &target_st->st_ino);
8553                 __put_user(st.st_mode, &target_st->st_mode);
8554                 __put_user(st.st_uid, &target_st->st_uid);
8555                 __put_user(st.st_gid, &target_st->st_gid);
8556                 __put_user(st.st_nlink, &target_st->st_nlink);
8557                 __put_user(st.st_rdev, &target_st->st_rdev);
8558                 __put_user(st.st_size, &target_st->st_size);
8559                 __put_user(st.st_blksize, &target_st->st_blksize);
8560                 __put_user(st.st_blocks, &target_st->st_blocks);
8561                 __put_user(st.st_atime, &target_st->target_st_atime);
8562                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8563                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8564                 unlock_user_struct(target_st, arg2, 1);
8565             }
8566         }
8567         return ret;
8568 #endif
8569     case TARGET_NR_vhangup:
8570         return get_errno(vhangup());
8571 #ifdef TARGET_NR_syscall
8572     case TARGET_NR_syscall:
8573         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8574                           arg6, arg7, arg8, 0);
8575 #endif
8576     case TARGET_NR_wait4:
8577         {
8578             int status;
8579             abi_long status_ptr = arg2;
8580             struct rusage rusage, *rusage_ptr;
8581             abi_ulong target_rusage = arg4;
8582             abi_long rusage_err;
8583             if (target_rusage)
8584                 rusage_ptr = &rusage;
8585             else
8586                 rusage_ptr = NULL;
8587             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8588             if (!is_error(ret)) {
8589                 if (status_ptr && ret) {
8590                     status = host_to_target_waitstatus(status);
8591                     if (put_user_s32(status, status_ptr))
8592                         return -TARGET_EFAULT;
8593                 }
8594                 if (target_rusage) {
8595                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8596                     if (rusage_err) {
8597                         ret = rusage_err;
8598                     }
8599                 }
8600             }
8601         }
8602         return ret;
8603 #ifdef TARGET_NR_swapoff
8604     case TARGET_NR_swapoff:
8605         if (!(p = lock_user_string(arg1)))
8606             return -TARGET_EFAULT;
8607         ret = get_errno(swapoff(p));
8608         unlock_user(p, arg1, 0);
8609         return ret;
8610 #endif
8611     case TARGET_NR_sysinfo:
8612         {
8613             struct target_sysinfo *target_value;
8614             struct sysinfo value;
8615             ret = get_errno(sysinfo(&value));
8616             if (!is_error(ret) && arg1)
8617             {
8618                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8619                     return -TARGET_EFAULT;
8620                 __put_user(value.uptime, &target_value->uptime);
8621                 __put_user(value.loads[0], &target_value->loads[0]);
8622                 __put_user(value.loads[1], &target_value->loads[1]);
8623                 __put_user(value.loads[2], &target_value->loads[2]);
8624                 __put_user(value.totalram, &target_value->totalram);
8625                 __put_user(value.freeram, &target_value->freeram);
8626                 __put_user(value.sharedram, &target_value->sharedram);
8627                 __put_user(value.bufferram, &target_value->bufferram);
8628                 __put_user(value.totalswap, &target_value->totalswap);
8629                 __put_user(value.freeswap, &target_value->freeswap);
8630                 __put_user(value.procs, &target_value->procs);
8631                 __put_user(value.totalhigh, &target_value->totalhigh);
8632                 __put_user(value.freehigh, &target_value->freehigh);
8633                 __put_user(value.mem_unit, &target_value->mem_unit);
8634                 unlock_user_struct(target_value, arg1, 1);
8635             }
8636         }
8637         return ret;
8638 #ifdef TARGET_NR_ipc
8639     case TARGET_NR_ipc:
8640         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8641 #endif
8642 #ifdef TARGET_NR_semget
8643     case TARGET_NR_semget:
8644         return get_errno(semget(arg1, arg2, arg3));
8645 #endif
8646 #ifdef TARGET_NR_semop
8647     case TARGET_NR_semop:
8648         return do_semop(arg1, arg2, arg3);
8649 #endif
8650 #ifdef TARGET_NR_semctl
8651     case TARGET_NR_semctl:
8652         return do_semctl(arg1, arg2, arg3, arg4);
8653 #endif
8654 #ifdef TARGET_NR_msgctl
8655     case TARGET_NR_msgctl:
8656         return do_msgctl(arg1, arg2, arg3);
8657 #endif
8658 #ifdef TARGET_NR_msgget
8659     case TARGET_NR_msgget:
8660         return get_errno(msgget(arg1, arg2));
8661 #endif
8662 #ifdef TARGET_NR_msgrcv
8663     case TARGET_NR_msgrcv:
8664         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8665 #endif
8666 #ifdef TARGET_NR_msgsnd
8667     case TARGET_NR_msgsnd:
8668         return do_msgsnd(arg1, arg2, arg3, arg4);
8669 #endif
8670 #ifdef TARGET_NR_shmget
8671     case TARGET_NR_shmget:
8672         return get_errno(shmget(arg1, arg2, arg3));
8673 #endif
8674 #ifdef TARGET_NR_shmctl
8675     case TARGET_NR_shmctl:
8676         return do_shmctl(arg1, arg2, arg3);
8677 #endif
8678 #ifdef TARGET_NR_shmat
8679     case TARGET_NR_shmat:
8680         return do_shmat(cpu_env, arg1, arg2, arg3);
8681 #endif
8682 #ifdef TARGET_NR_shmdt
8683     case TARGET_NR_shmdt:
8684         return do_shmdt(arg1);
8685 #endif
8686     case TARGET_NR_fsync:
8687         return get_errno(fsync(arg1));
8688     case TARGET_NR_clone:
8689         /* Linux manages to have three different orderings for its
8690          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8691          * match the kernel's CONFIG_CLONE_* settings.
8692          * Microblaze is further special in that it uses a sixth
8693          * implicit argument to clone for the TLS pointer.
8694          */
8695 #if defined(TARGET_MICROBLAZE)
8696         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8697 #elif defined(TARGET_CLONE_BACKWARDS)
8698         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8699 #elif defined(TARGET_CLONE_BACKWARDS2)
8700         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8701 #else
8702         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8703 #endif
8704         return ret;
8705 #ifdef __NR_exit_group
8706         /* new thread calls */
8707     case TARGET_NR_exit_group:
8708         preexit_cleanup(cpu_env, arg1);
8709         return get_errno(exit_group(arg1));
8710 #endif
8711     case TARGET_NR_setdomainname:
8712         if (!(p = lock_user_string(arg1)))
8713             return -TARGET_EFAULT;
8714         ret = get_errno(setdomainname(p, arg2));
8715         unlock_user(p, arg1, 0);
8716         return ret;
8717     case TARGET_NR_uname:
8718         /* no need to transcode because we use the linux syscall */
8719         {
8720             struct new_utsname * buf;
8721 
8722             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8723                 return -TARGET_EFAULT;
8724             ret = get_errno(sys_uname(buf));
8725             if (!is_error(ret)) {
8726                 /* Overwrite the native machine name with whatever is being
8727                    emulated. */
8728                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8729                           sizeof(buf->machine));
8730                 /* Allow the user to override the reported release.  */
8731                 if (qemu_uname_release && *qemu_uname_release) {
8732                     g_strlcpy(buf->release, qemu_uname_release,
8733                               sizeof(buf->release));
8734                 }
8735             }
8736             unlock_user_struct(buf, arg1, 1);
8737         }
8738         return ret;
8739 #ifdef TARGET_I386
8740     case TARGET_NR_modify_ldt:
8741         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
8742 #if !defined(TARGET_X86_64)
8743     case TARGET_NR_vm86:
8744         return do_vm86(cpu_env, arg1, arg2);
8745 #endif
8746 #endif
8747     case TARGET_NR_adjtimex:
8748         {
8749             struct timex host_buf;
8750 
8751             if (target_to_host_timex(&host_buf, arg1) != 0) {
8752                 return -TARGET_EFAULT;
8753             }
8754             ret = get_errno(adjtimex(&host_buf));
8755             if (!is_error(ret)) {
8756                 if (host_to_target_timex(arg1, &host_buf) != 0) {
8757                     return -TARGET_EFAULT;
8758                 }
8759             }
8760         }
8761         return ret;
8762 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8763     case TARGET_NR_clock_adjtime:
8764         {
8765             struct timex htx, *phtx = &htx;
8766 
8767             if (target_to_host_timex(phtx, arg2) != 0) {
8768                 return -TARGET_EFAULT;
8769             }
8770             ret = get_errno(clock_adjtime(arg1, phtx));
8771             if (!is_error(ret) && phtx) {
8772                 if (host_to_target_timex(arg2, phtx) != 0) {
8773                     return -TARGET_EFAULT;
8774                 }
8775             }
8776         }
8777         return ret;
8778 #endif
8779     case TARGET_NR_getpgid:
8780         return get_errno(getpgid(arg1));
8781     case TARGET_NR_fchdir:
8782         return get_errno(fchdir(arg1));
8783     case TARGET_NR_personality:
8784         return get_errno(personality(arg1));
8785 #ifdef TARGET_NR__llseek /* Not on alpha */
8786     case TARGET_NR__llseek:
8787         {
8788             int64_t res;
8789 #if !defined(__NR_llseek)
8790             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
8791             if (res == -1) {
8792                 ret = get_errno(res);
8793             } else {
8794                 ret = 0;
8795             }
8796 #else
8797             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8798 #endif
8799             if ((ret == 0) && put_user_s64(res, arg4)) {
8800                 return -TARGET_EFAULT;
8801             }
8802         }
8803         return ret;
8804 #endif
8805 #ifdef TARGET_NR_getdents
8806     case TARGET_NR_getdents:
8807 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8808 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8809         {
8810             struct target_dirent *target_dirp;
8811             struct linux_dirent *dirp;
8812             abi_long count = arg3;
8813 
8814             dirp = g_try_malloc(count);
8815             if (!dirp) {
8816                 return -TARGET_ENOMEM;
8817             }
8818 
8819             ret = get_errno(sys_getdents(arg1, dirp, count));
8820             if (!is_error(ret)) {
8821                 struct linux_dirent *de;
8822 		struct target_dirent *tde;
8823                 int len = ret;
8824                 int reclen, treclen;
8825 		int count1, tnamelen;
8826 
8827 		count1 = 0;
8828                 de = dirp;
8829                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8830                     return -TARGET_EFAULT;
8831 		tde = target_dirp;
8832                 while (len > 0) {
8833                     reclen = de->d_reclen;
8834                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8835                     assert(tnamelen >= 0);
8836                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
8837                     assert(count1 + treclen <= count);
8838                     tde->d_reclen = tswap16(treclen);
8839                     tde->d_ino = tswapal(de->d_ino);
8840                     tde->d_off = tswapal(de->d_off);
8841                     memcpy(tde->d_name, de->d_name, tnamelen);
8842                     de = (struct linux_dirent *)((char *)de + reclen);
8843                     len -= reclen;
8844                     tde = (struct target_dirent *)((char *)tde + treclen);
8845 		    count1 += treclen;
8846                 }
8847 		ret = count1;
8848                 unlock_user(target_dirp, arg2, ret);
8849             }
8850             g_free(dirp);
8851         }
8852 #else
8853         {
8854             struct linux_dirent *dirp;
8855             abi_long count = arg3;
8856 
8857             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8858                 return -TARGET_EFAULT;
8859             ret = get_errno(sys_getdents(arg1, dirp, count));
8860             if (!is_error(ret)) {
8861                 struct linux_dirent *de;
8862                 int len = ret;
8863                 int reclen;
8864                 de = dirp;
8865                 while (len > 0) {
8866                     reclen = de->d_reclen;
8867                     if (reclen > len)
8868                         break;
8869                     de->d_reclen = tswap16(reclen);
8870                     tswapls(&de->d_ino);
8871                     tswapls(&de->d_off);
8872                     de = (struct linux_dirent *)((char *)de + reclen);
8873                     len -= reclen;
8874                 }
8875             }
8876             unlock_user(dirp, arg2, ret);
8877         }
8878 #endif
8879 #else
8880         /* Implement getdents in terms of getdents64 */
8881         {
8882             struct linux_dirent64 *dirp;
8883             abi_long count = arg3;
8884 
8885             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8886             if (!dirp) {
8887                 return -TARGET_EFAULT;
8888             }
8889             ret = get_errno(sys_getdents64(arg1, dirp, count));
8890             if (!is_error(ret)) {
8891                 /* Convert the dirent64 structs to target dirent.  We do this
8892                  * in-place, since we can guarantee that a target_dirent is no
8893                  * larger than a dirent64; however this means we have to be
8894                  * careful to read everything before writing in the new format.
8895                  */
8896                 struct linux_dirent64 *de;
8897                 struct target_dirent *tde;
8898                 int len = ret;
8899                 int tlen = 0;
8900 
8901                 de = dirp;
8902                 tde = (struct target_dirent *)dirp;
8903                 while (len > 0) {
8904                     int namelen, treclen;
8905                     int reclen = de->d_reclen;
8906                     uint64_t ino = de->d_ino;
8907                     int64_t off = de->d_off;
8908                     uint8_t type = de->d_type;
8909 
8910                     namelen = strlen(de->d_name);
8911                     treclen = offsetof(struct target_dirent, d_name)
8912                         + namelen + 2;
8913                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8914 
8915                     memmove(tde->d_name, de->d_name, namelen + 1);
8916                     tde->d_ino = tswapal(ino);
8917                     tde->d_off = tswapal(off);
8918                     tde->d_reclen = tswap16(treclen);
8919                     /* The target_dirent type is in what was formerly a padding
8920                      * byte at the end of the structure:
8921                      */
8922                     *(((char *)tde) + treclen - 1) = type;
8923 
8924                     de = (struct linux_dirent64 *)((char *)de + reclen);
8925                     tde = (struct target_dirent *)((char *)tde + treclen);
8926                     len -= reclen;
8927                     tlen += treclen;
8928                 }
8929                 ret = tlen;
8930             }
8931             unlock_user(dirp, arg2, ret);
8932         }
8933 #endif
8934         return ret;
8935 #endif /* TARGET_NR_getdents */
8936 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8937     case TARGET_NR_getdents64:
8938         {
8939             struct linux_dirent64 *dirp;
8940             abi_long count = arg3;
8941             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8942                 return -TARGET_EFAULT;
8943             ret = get_errno(sys_getdents64(arg1, dirp, count));
8944             if (!is_error(ret)) {
8945                 struct linux_dirent64 *de;
8946                 int len = ret;
8947                 int reclen;
8948                 de = dirp;
8949                 while (len > 0) {
8950                     reclen = de->d_reclen;
8951                     if (reclen > len)
8952                         break;
8953                     de->d_reclen = tswap16(reclen);
8954                     tswap64s((uint64_t *)&de->d_ino);
8955                     tswap64s((uint64_t *)&de->d_off);
8956                     de = (struct linux_dirent64 *)((char *)de + reclen);
8957                     len -= reclen;
8958                 }
8959             }
8960             unlock_user(dirp, arg2, ret);
8961         }
8962         return ret;
8963 #endif /* TARGET_NR_getdents64 */
8964 #if defined(TARGET_NR__newselect)
8965     case TARGET_NR__newselect:
8966         return do_select(arg1, arg2, arg3, arg4, arg5);
8967 #endif
8968 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8969 # ifdef TARGET_NR_poll
8970     case TARGET_NR_poll:
8971 # endif
8972 # ifdef TARGET_NR_ppoll
8973     case TARGET_NR_ppoll:
8974 # endif
8975         {
8976             struct target_pollfd *target_pfd;
8977             unsigned int nfds = arg2;
8978             struct pollfd *pfd;
8979             unsigned int i;
8980 
8981             pfd = NULL;
8982             target_pfd = NULL;
8983             if (nfds) {
8984                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
8985                     return -TARGET_EINVAL;
8986                 }
8987 
8988                 target_pfd = lock_user(VERIFY_WRITE, arg1,
8989                                        sizeof(struct target_pollfd) * nfds, 1);
8990                 if (!target_pfd) {
8991                     return -TARGET_EFAULT;
8992                 }
8993 
8994                 pfd = alloca(sizeof(struct pollfd) * nfds);
8995                 for (i = 0; i < nfds; i++) {
8996                     pfd[i].fd = tswap32(target_pfd[i].fd);
8997                     pfd[i].events = tswap16(target_pfd[i].events);
8998                 }
8999             }
9000 
9001             switch (num) {
9002 # ifdef TARGET_NR_ppoll
9003             case TARGET_NR_ppoll:
9004             {
9005                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9006                 target_sigset_t *target_set;
9007                 sigset_t _set, *set = &_set;
9008 
9009                 if (arg3) {
9010                     if (target_to_host_timespec(timeout_ts, arg3)) {
9011                         unlock_user(target_pfd, arg1, 0);
9012                         return -TARGET_EFAULT;
9013                     }
9014                 } else {
9015                     timeout_ts = NULL;
9016                 }
9017 
9018                 if (arg4) {
9019                     if (arg5 != sizeof(target_sigset_t)) {
9020                         unlock_user(target_pfd, arg1, 0);
9021                         return -TARGET_EINVAL;
9022                     }
9023 
9024                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9025                     if (!target_set) {
9026                         unlock_user(target_pfd, arg1, 0);
9027                         return -TARGET_EFAULT;
9028                     }
9029                     target_to_host_sigset(set, target_set);
9030                 } else {
9031                     set = NULL;
9032                 }
9033 
9034                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9035                                            set, SIGSET_T_SIZE));
9036 
9037                 if (!is_error(ret) && arg3) {
9038                     host_to_target_timespec(arg3, timeout_ts);
9039                 }
9040                 if (arg4) {
9041                     unlock_user(target_set, arg4, 0);
9042                 }
9043                 break;
9044             }
9045 # endif
9046 # ifdef TARGET_NR_poll
9047             case TARGET_NR_poll:
9048             {
9049                 struct timespec ts, *pts;
9050 
9051                 if (arg3 >= 0) {
9052                     /* Convert ms to secs, ns */
9053                     ts.tv_sec = arg3 / 1000;
9054                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9055                     pts = &ts;
9056                 } else {
9057                     /* -ve poll() timeout means "infinite" */
9058                     pts = NULL;
9059                 }
9060                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9061                 break;
9062             }
9063 # endif
9064             default:
9065                 g_assert_not_reached();
9066             }
9067 
9068             if (!is_error(ret)) {
9069                 for(i = 0; i < nfds; i++) {
9070                     target_pfd[i].revents = tswap16(pfd[i].revents);
9071                 }
9072             }
9073             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9074         }
9075         return ret;
9076 #endif
9077     case TARGET_NR_flock:
9078         /* NOTE: the flock constant seems to be the same for every
9079            Linux platform */
9080         return get_errno(safe_flock(arg1, arg2));
9081     case TARGET_NR_readv:
9082         {
9083             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9084             if (vec != NULL) {
9085                 ret = get_errno(safe_readv(arg1, vec, arg3));
9086                 unlock_iovec(vec, arg2, arg3, 1);
9087             } else {
9088                 ret = -host_to_target_errno(errno);
9089             }
9090         }
9091         return ret;
9092     case TARGET_NR_writev:
9093         {
9094             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9095             if (vec != NULL) {
9096                 ret = get_errno(safe_writev(arg1, vec, arg3));
9097                 unlock_iovec(vec, arg2, arg3, 0);
9098             } else {
9099                 ret = -host_to_target_errno(errno);
9100             }
9101         }
9102         return ret;
9103 #if defined(TARGET_NR_preadv)
9104     case TARGET_NR_preadv:
9105         {
9106             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9107             if (vec != NULL) {
9108                 unsigned long low, high;
9109 
9110                 target_to_host_low_high(arg4, arg5, &low, &high);
9111                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9112                 unlock_iovec(vec, arg2, arg3, 1);
9113             } else {
9114                 ret = -host_to_target_errno(errno);
9115            }
9116         }
9117         return ret;
9118 #endif
9119 #if defined(TARGET_NR_pwritev)
9120     case TARGET_NR_pwritev:
9121         {
9122             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9123             if (vec != NULL) {
9124                 unsigned long low, high;
9125 
9126                 target_to_host_low_high(arg4, arg5, &low, &high);
9127                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9128                 unlock_iovec(vec, arg2, arg3, 0);
9129             } else {
9130                 ret = -host_to_target_errno(errno);
9131            }
9132         }
9133         return ret;
9134 #endif
9135     case TARGET_NR_getsid:
9136         return get_errno(getsid(arg1));
9137 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9138     case TARGET_NR_fdatasync:
9139         return get_errno(fdatasync(arg1));
9140 #endif
9141 #ifdef TARGET_NR__sysctl
9142     case TARGET_NR__sysctl:
9143         /* We don't implement this, but ENOTDIR is always a safe
9144            return value. */
9145         return -TARGET_ENOTDIR;
9146 #endif
9147     case TARGET_NR_sched_getaffinity:
9148         {
9149             unsigned int mask_size;
9150             unsigned long *mask;
9151 
9152             /*
9153              * sched_getaffinity needs multiples of ulong, so need to take
9154              * care of mismatches between target ulong and host ulong sizes.
9155              */
9156             if (arg2 & (sizeof(abi_ulong) - 1)) {
9157                 return -TARGET_EINVAL;
9158             }
9159             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9160 
9161             mask = alloca(mask_size);
9162             memset(mask, 0, mask_size);
9163             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9164 
9165             if (!is_error(ret)) {
9166                 if (ret > arg2) {
9167                     /* More data returned than the caller's buffer will fit.
9168                      * This only happens if sizeof(abi_long) < sizeof(long)
9169                      * and the caller passed us a buffer holding an odd number
9170                      * of abi_longs. If the host kernel is actually using the
9171                      * extra 4 bytes then fail EINVAL; otherwise we can just
9172                      * ignore them and only copy the interesting part.
9173                      */
9174                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9175                     if (numcpus > arg2 * 8) {
9176                         return -TARGET_EINVAL;
9177                     }
9178                     ret = arg2;
9179                 }
9180 
9181                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9182                     return -TARGET_EFAULT;
9183                 }
9184             }
9185         }
9186         return ret;
9187     case TARGET_NR_sched_setaffinity:
9188         {
9189             unsigned int mask_size;
9190             unsigned long *mask;
9191 
9192             /*
9193              * sched_setaffinity needs multiples of ulong, so need to take
9194              * care of mismatches between target ulong and host ulong sizes.
9195              */
9196             if (arg2 & (sizeof(abi_ulong) - 1)) {
9197                 return -TARGET_EINVAL;
9198             }
9199             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9200             mask = alloca(mask_size);
9201 
9202             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9203             if (ret) {
9204                 return ret;
9205             }
9206 
9207             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9208         }
9209     case TARGET_NR_getcpu:
9210         {
9211             unsigned cpu, node;
9212             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9213                                        arg2 ? &node : NULL,
9214                                        NULL));
9215             if (is_error(ret)) {
9216                 return ret;
9217             }
9218             if (arg1 && put_user_u32(cpu, arg1)) {
9219                 return -TARGET_EFAULT;
9220             }
9221             if (arg2 && put_user_u32(node, arg2)) {
9222                 return -TARGET_EFAULT;
9223             }
9224         }
9225         return ret;
9226     case TARGET_NR_sched_setparam:
9227         {
9228             struct sched_param *target_schp;
9229             struct sched_param schp;
9230 
9231             if (arg2 == 0) {
9232                 return -TARGET_EINVAL;
9233             }
9234             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9235                 return -TARGET_EFAULT;
9236             schp.sched_priority = tswap32(target_schp->sched_priority);
9237             unlock_user_struct(target_schp, arg2, 0);
9238             return get_errno(sched_setparam(arg1, &schp));
9239         }
9240     case TARGET_NR_sched_getparam:
9241         {
9242             struct sched_param *target_schp;
9243             struct sched_param schp;
9244 
9245             if (arg2 == 0) {
9246                 return -TARGET_EINVAL;
9247             }
9248             ret = get_errno(sched_getparam(arg1, &schp));
9249             if (!is_error(ret)) {
9250                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9251                     return -TARGET_EFAULT;
9252                 target_schp->sched_priority = tswap32(schp.sched_priority);
9253                 unlock_user_struct(target_schp, arg2, 1);
9254             }
9255         }
9256         return ret;
9257     case TARGET_NR_sched_setscheduler:
9258         {
9259             struct sched_param *target_schp;
9260             struct sched_param schp;
9261             if (arg3 == 0) {
9262                 return -TARGET_EINVAL;
9263             }
9264             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9265                 return -TARGET_EFAULT;
9266             schp.sched_priority = tswap32(target_schp->sched_priority);
9267             unlock_user_struct(target_schp, arg3, 0);
9268             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9269         }
9270     case TARGET_NR_sched_getscheduler:
9271         return get_errno(sched_getscheduler(arg1));
9272     case TARGET_NR_sched_yield:
9273         return get_errno(sched_yield());
9274     case TARGET_NR_sched_get_priority_max:
9275         return get_errno(sched_get_priority_max(arg1));
9276     case TARGET_NR_sched_get_priority_min:
9277         return get_errno(sched_get_priority_min(arg1));
9278     case TARGET_NR_sched_rr_get_interval:
9279         {
9280             struct timespec ts;
9281             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9282             if (!is_error(ret)) {
9283                 ret = host_to_target_timespec(arg2, &ts);
9284             }
9285         }
9286         return ret;
9287     case TARGET_NR_nanosleep:
9288         {
9289             struct timespec req, rem;
9290             target_to_host_timespec(&req, arg1);
9291             ret = get_errno(safe_nanosleep(&req, &rem));
9292             if (is_error(ret) && arg2) {
9293                 host_to_target_timespec(arg2, &rem);
9294             }
9295         }
9296         return ret;
9297     case TARGET_NR_prctl:
9298         switch (arg1) {
9299         case PR_GET_PDEATHSIG:
9300         {
9301             int deathsig;
9302             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9303             if (!is_error(ret) && arg2
9304                 && put_user_ual(deathsig, arg2)) {
9305                 return -TARGET_EFAULT;
9306             }
9307             return ret;
9308         }
9309 #ifdef PR_GET_NAME
9310         case PR_GET_NAME:
9311         {
9312             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9313             if (!name) {
9314                 return -TARGET_EFAULT;
9315             }
9316             ret = get_errno(prctl(arg1, (unsigned long)name,
9317                                   arg3, arg4, arg5));
9318             unlock_user(name, arg2, 16);
9319             return ret;
9320         }
9321         case PR_SET_NAME:
9322         {
9323             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9324             if (!name) {
9325                 return -TARGET_EFAULT;
9326             }
9327             ret = get_errno(prctl(arg1, (unsigned long)name,
9328                                   arg3, arg4, arg5));
9329             unlock_user(name, arg2, 0);
9330             return ret;
9331         }
9332 #endif
9333 #ifdef TARGET_AARCH64
9334         case TARGET_PR_SVE_SET_VL:
9335             /*
9336              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9337              * PR_SVE_VL_INHERIT.  Note the kernel definition
9338              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9339              * even though the current architectural maximum is VQ=16.
9340              */
9341             ret = -TARGET_EINVAL;
9342             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
9343                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9344                 CPUARMState *env = cpu_env;
9345                 ARMCPU *cpu = arm_env_get_cpu(env);
9346                 uint32_t vq, old_vq;
9347 
9348                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9349                 vq = MAX(arg2 / 16, 1);
9350                 vq = MIN(vq, cpu->sve_max_vq);
9351 
9352                 if (vq < old_vq) {
9353                     aarch64_sve_narrow_vq(env, vq);
9354                 }
9355                 env->vfp.zcr_el[1] = vq - 1;
9356                 ret = vq * 16;
9357             }
9358             return ret;
9359         case TARGET_PR_SVE_GET_VL:
9360             ret = -TARGET_EINVAL;
9361             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
9362                 CPUARMState *env = cpu_env;
9363                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
9364             }
9365             return ret;
9366 #endif /* AARCH64 */
9367         case PR_GET_SECCOMP:
9368         case PR_SET_SECCOMP:
9369             /* Disable seccomp to prevent the target disabling syscalls we
9370              * need. */
9371             return -TARGET_EINVAL;
9372         default:
9373             /* Most prctl options have no pointer arguments */
9374             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9375         }
9376         break;
9377 #ifdef TARGET_NR_arch_prctl
9378     case TARGET_NR_arch_prctl:
9379 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9380         return do_arch_prctl(cpu_env, arg1, arg2);
9381 #else
9382 #error unreachable
9383 #endif
9384 #endif
9385 #ifdef TARGET_NR_pread64
9386     case TARGET_NR_pread64:
9387         if (regpairs_aligned(cpu_env, num)) {
9388             arg4 = arg5;
9389             arg5 = arg6;
9390         }
9391         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9392             return -TARGET_EFAULT;
9393         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9394         unlock_user(p, arg2, ret);
9395         return ret;
9396     case TARGET_NR_pwrite64:
9397         if (regpairs_aligned(cpu_env, num)) {
9398             arg4 = arg5;
9399             arg5 = arg6;
9400         }
9401         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9402             return -TARGET_EFAULT;
9403         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9404         unlock_user(p, arg2, 0);
9405         return ret;
9406 #endif
9407     case TARGET_NR_getcwd:
9408         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9409             return -TARGET_EFAULT;
9410         ret = get_errno(sys_getcwd1(p, arg2));
9411         unlock_user(p, arg1, ret);
9412         return ret;
9413     case TARGET_NR_capget:
9414     case TARGET_NR_capset:
9415     {
9416         struct target_user_cap_header *target_header;
9417         struct target_user_cap_data *target_data = NULL;
9418         struct __user_cap_header_struct header;
9419         struct __user_cap_data_struct data[2];
9420         struct __user_cap_data_struct *dataptr = NULL;
9421         int i, target_datalen;
9422         int data_items = 1;
9423 
9424         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9425             return -TARGET_EFAULT;
9426         }
9427         header.version = tswap32(target_header->version);
9428         header.pid = tswap32(target_header->pid);
9429 
9430         if (header.version != _LINUX_CAPABILITY_VERSION) {
9431             /* Version 2 and up takes pointer to two user_data structs */
9432             data_items = 2;
9433         }
9434 
9435         target_datalen = sizeof(*target_data) * data_items;
9436 
9437         if (arg2) {
9438             if (num == TARGET_NR_capget) {
9439                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9440             } else {
9441                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9442             }
9443             if (!target_data) {
9444                 unlock_user_struct(target_header, arg1, 0);
9445                 return -TARGET_EFAULT;
9446             }
9447 
9448             if (num == TARGET_NR_capset) {
9449                 for (i = 0; i < data_items; i++) {
9450                     data[i].effective = tswap32(target_data[i].effective);
9451                     data[i].permitted = tswap32(target_data[i].permitted);
9452                     data[i].inheritable = tswap32(target_data[i].inheritable);
9453                 }
9454             }
9455 
9456             dataptr = data;
9457         }
9458 
9459         if (num == TARGET_NR_capget) {
9460             ret = get_errno(capget(&header, dataptr));
9461         } else {
9462             ret = get_errno(capset(&header, dataptr));
9463         }
9464 
9465         /* The kernel always updates version for both capget and capset */
9466         target_header->version = tswap32(header.version);
9467         unlock_user_struct(target_header, arg1, 1);
9468 
9469         if (arg2) {
9470             if (num == TARGET_NR_capget) {
9471                 for (i = 0; i < data_items; i++) {
9472                     target_data[i].effective = tswap32(data[i].effective);
9473                     target_data[i].permitted = tswap32(data[i].permitted);
9474                     target_data[i].inheritable = tswap32(data[i].inheritable);
9475                 }
9476                 unlock_user(target_data, arg2, target_datalen);
9477             } else {
9478                 unlock_user(target_data, arg2, 0);
9479             }
9480         }
9481         return ret;
9482     }
9483     case TARGET_NR_sigaltstack:
9484         return do_sigaltstack(arg1, arg2,
9485                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9486 
9487 #ifdef CONFIG_SENDFILE
9488 #ifdef TARGET_NR_sendfile
9489     case TARGET_NR_sendfile:
9490     {
9491         off_t *offp = NULL;
9492         off_t off;
9493         if (arg3) {
9494             ret = get_user_sal(off, arg3);
9495             if (is_error(ret)) {
9496                 return ret;
9497             }
9498             offp = &off;
9499         }
9500         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9501         if (!is_error(ret) && arg3) {
9502             abi_long ret2 = put_user_sal(off, arg3);
9503             if (is_error(ret2)) {
9504                 ret = ret2;
9505             }
9506         }
9507         return ret;
9508     }
9509 #endif
9510 #ifdef TARGET_NR_sendfile64
9511     case TARGET_NR_sendfile64:
9512     {
9513         off_t *offp = NULL;
9514         off_t off;
9515         if (arg3) {
9516             ret = get_user_s64(off, arg3);
9517             if (is_error(ret)) {
9518                 return ret;
9519             }
9520             offp = &off;
9521         }
9522         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9523         if (!is_error(ret) && arg3) {
9524             abi_long ret2 = put_user_s64(off, arg3);
9525             if (is_error(ret2)) {
9526                 ret = ret2;
9527             }
9528         }
9529         return ret;
9530     }
9531 #endif
9532 #endif
9533 #ifdef TARGET_NR_vfork
9534     case TARGET_NR_vfork:
9535         return get_errno(do_fork(cpu_env,
9536                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9537                          0, 0, 0, 0));
9538 #endif
9539 #ifdef TARGET_NR_ugetrlimit
9540     case TARGET_NR_ugetrlimit:
9541     {
9542 	struct rlimit rlim;
9543 	int resource = target_to_host_resource(arg1);
9544 	ret = get_errno(getrlimit(resource, &rlim));
9545 	if (!is_error(ret)) {
9546 	    struct target_rlimit *target_rlim;
9547             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9548                 return -TARGET_EFAULT;
9549 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9550 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9551             unlock_user_struct(target_rlim, arg2, 1);
9552 	}
9553         return ret;
9554     }
9555 #endif
9556 #ifdef TARGET_NR_truncate64
9557     case TARGET_NR_truncate64:
9558         if (!(p = lock_user_string(arg1)))
9559             return -TARGET_EFAULT;
9560 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9561         unlock_user(p, arg1, 0);
9562         return ret;
9563 #endif
9564 #ifdef TARGET_NR_ftruncate64
9565     case TARGET_NR_ftruncate64:
9566         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9567 #endif
9568 #ifdef TARGET_NR_stat64
9569     case TARGET_NR_stat64:
9570         if (!(p = lock_user_string(arg1))) {
9571             return -TARGET_EFAULT;
9572         }
9573         ret = get_errno(stat(path(p), &st));
9574         unlock_user(p, arg1, 0);
9575         if (!is_error(ret))
9576             ret = host_to_target_stat64(cpu_env, arg2, &st);
9577         return ret;
9578 #endif
9579 #ifdef TARGET_NR_lstat64
9580     case TARGET_NR_lstat64:
9581         if (!(p = lock_user_string(arg1))) {
9582             return -TARGET_EFAULT;
9583         }
9584         ret = get_errno(lstat(path(p), &st));
9585         unlock_user(p, arg1, 0);
9586         if (!is_error(ret))
9587             ret = host_to_target_stat64(cpu_env, arg2, &st);
9588         return ret;
9589 #endif
9590 #ifdef TARGET_NR_fstat64
9591     case TARGET_NR_fstat64:
9592         ret = get_errno(fstat(arg1, &st));
9593         if (!is_error(ret))
9594             ret = host_to_target_stat64(cpu_env, arg2, &st);
9595         return ret;
9596 #endif
9597 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9598 #ifdef TARGET_NR_fstatat64
9599     case TARGET_NR_fstatat64:
9600 #endif
9601 #ifdef TARGET_NR_newfstatat
9602     case TARGET_NR_newfstatat:
9603 #endif
9604         if (!(p = lock_user_string(arg2))) {
9605             return -TARGET_EFAULT;
9606         }
9607         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9608         unlock_user(p, arg2, 0);
9609         if (!is_error(ret))
9610             ret = host_to_target_stat64(cpu_env, arg3, &st);
9611         return ret;
9612 #endif
9613 #ifdef TARGET_NR_lchown
9614     case TARGET_NR_lchown:
9615         if (!(p = lock_user_string(arg1)))
9616             return -TARGET_EFAULT;
9617         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9618         unlock_user(p, arg1, 0);
9619         return ret;
9620 #endif
9621 #ifdef TARGET_NR_getuid
9622     case TARGET_NR_getuid:
9623         return get_errno(high2lowuid(getuid()));
9624 #endif
9625 #ifdef TARGET_NR_getgid
9626     case TARGET_NR_getgid:
9627         return get_errno(high2lowgid(getgid()));
9628 #endif
9629 #ifdef TARGET_NR_geteuid
9630     case TARGET_NR_geteuid:
9631         return get_errno(high2lowuid(geteuid()));
9632 #endif
9633 #ifdef TARGET_NR_getegid
9634     case TARGET_NR_getegid:
9635         return get_errno(high2lowgid(getegid()));
9636 #endif
9637     case TARGET_NR_setreuid:
9638         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9639     case TARGET_NR_setregid:
9640         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9641     case TARGET_NR_getgroups:
9642         {
9643             int gidsetsize = arg1;
9644             target_id *target_grouplist;
9645             gid_t *grouplist;
9646             int i;
9647 
9648             grouplist = alloca(gidsetsize * sizeof(gid_t));
9649             ret = get_errno(getgroups(gidsetsize, grouplist));
9650             if (gidsetsize == 0)
9651                 return ret;
9652             if (!is_error(ret)) {
9653                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9654                 if (!target_grouplist)
9655                     return -TARGET_EFAULT;
9656                 for(i = 0;i < ret; i++)
9657                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9658                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9659             }
9660         }
9661         return ret;
9662     case TARGET_NR_setgroups:
9663         {
9664             int gidsetsize = arg1;
9665             target_id *target_grouplist;
9666             gid_t *grouplist = NULL;
9667             int i;
9668             if (gidsetsize) {
9669                 grouplist = alloca(gidsetsize * sizeof(gid_t));
9670                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9671                 if (!target_grouplist) {
9672                     return -TARGET_EFAULT;
9673                 }
9674                 for (i = 0; i < gidsetsize; i++) {
9675                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9676                 }
9677                 unlock_user(target_grouplist, arg2, 0);
9678             }
9679             return get_errno(setgroups(gidsetsize, grouplist));
9680         }
9681     case TARGET_NR_fchown:
9682         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9683 #if defined(TARGET_NR_fchownat)
9684     case TARGET_NR_fchownat:
9685         if (!(p = lock_user_string(arg2)))
9686             return -TARGET_EFAULT;
9687         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9688                                  low2highgid(arg4), arg5));
9689         unlock_user(p, arg2, 0);
9690         return ret;
9691 #endif
9692 #ifdef TARGET_NR_setresuid
9693     case TARGET_NR_setresuid:
9694         return get_errno(sys_setresuid(low2highuid(arg1),
9695                                        low2highuid(arg2),
9696                                        low2highuid(arg3)));
9697 #endif
9698 #ifdef TARGET_NR_getresuid
9699     case TARGET_NR_getresuid:
9700         {
9701             uid_t ruid, euid, suid;
9702             ret = get_errno(getresuid(&ruid, &euid, &suid));
9703             if (!is_error(ret)) {
9704                 if (put_user_id(high2lowuid(ruid), arg1)
9705                     || put_user_id(high2lowuid(euid), arg2)
9706                     || put_user_id(high2lowuid(suid), arg3))
9707                     return -TARGET_EFAULT;
9708             }
9709         }
9710         return ret;
9711 #endif
9712 #ifdef TARGET_NR_getresgid
9713     case TARGET_NR_setresgid:
9714         return get_errno(sys_setresgid(low2highgid(arg1),
9715                                        low2highgid(arg2),
9716                                        low2highgid(arg3)));
9717 #endif
9718 #ifdef TARGET_NR_getresgid
9719     case TARGET_NR_getresgid:
9720         {
9721             gid_t rgid, egid, sgid;
9722             ret = get_errno(getresgid(&rgid, &egid, &sgid));
9723             if (!is_error(ret)) {
9724                 if (put_user_id(high2lowgid(rgid), arg1)
9725                     || put_user_id(high2lowgid(egid), arg2)
9726                     || put_user_id(high2lowgid(sgid), arg3))
9727                     return -TARGET_EFAULT;
9728             }
9729         }
9730         return ret;
9731 #endif
9732 #ifdef TARGET_NR_chown
9733     case TARGET_NR_chown:
9734         if (!(p = lock_user_string(arg1)))
9735             return -TARGET_EFAULT;
9736         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9737         unlock_user(p, arg1, 0);
9738         return ret;
9739 #endif
9740     case TARGET_NR_setuid:
9741         return get_errno(sys_setuid(low2highuid(arg1)));
9742     case TARGET_NR_setgid:
9743         return get_errno(sys_setgid(low2highgid(arg1)));
9744     case TARGET_NR_setfsuid:
9745         return get_errno(setfsuid(arg1));
9746     case TARGET_NR_setfsgid:
9747         return get_errno(setfsgid(arg1));
9748 
9749 #ifdef TARGET_NR_lchown32
9750     case TARGET_NR_lchown32:
9751         if (!(p = lock_user_string(arg1)))
9752             return -TARGET_EFAULT;
9753         ret = get_errno(lchown(p, arg2, arg3));
9754         unlock_user(p, arg1, 0);
9755         return ret;
9756 #endif
9757 #ifdef TARGET_NR_getuid32
9758     case TARGET_NR_getuid32:
9759         return get_errno(getuid());
9760 #endif
9761 
9762 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9763    /* Alpha specific */
9764     case TARGET_NR_getxuid:
9765          {
9766             uid_t euid;
9767             euid=geteuid();
9768             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9769          }
9770         return get_errno(getuid());
9771 #endif
9772 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9773    /* Alpha specific */
9774     case TARGET_NR_getxgid:
9775          {
9776             uid_t egid;
9777             egid=getegid();
9778             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9779          }
9780         return get_errno(getgid());
9781 #endif
9782 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9783     /* Alpha specific */
9784     case TARGET_NR_osf_getsysinfo:
9785         ret = -TARGET_EOPNOTSUPP;
9786         switch (arg1) {
9787           case TARGET_GSI_IEEE_FP_CONTROL:
9788             {
9789                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9790 
9791                 /* Copied from linux ieee_fpcr_to_swcr.  */
9792                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9793                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9794                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9795                                         | SWCR_TRAP_ENABLE_DZE
9796                                         | SWCR_TRAP_ENABLE_OVF);
9797                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9798                                         | SWCR_TRAP_ENABLE_INE);
9799                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9800                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9801 
9802                 if (put_user_u64 (swcr, arg2))
9803                         return -TARGET_EFAULT;
9804                 ret = 0;
9805             }
9806             break;
9807 
9808           /* case GSI_IEEE_STATE_AT_SIGNAL:
9809              -- Not implemented in linux kernel.
9810              case GSI_UACPROC:
9811              -- Retrieves current unaligned access state; not much used.
9812              case GSI_PROC_TYPE:
9813              -- Retrieves implver information; surely not used.
9814              case GSI_GET_HWRPB:
9815              -- Grabs a copy of the HWRPB; surely not used.
9816           */
9817         }
9818         return ret;
9819 #endif
9820 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9821     /* Alpha specific */
9822     case TARGET_NR_osf_setsysinfo:
9823         ret = -TARGET_EOPNOTSUPP;
9824         switch (arg1) {
9825           case TARGET_SSI_IEEE_FP_CONTROL:
9826             {
9827                 uint64_t swcr, fpcr, orig_fpcr;
9828 
9829                 if (get_user_u64 (swcr, arg2)) {
9830                     return -TARGET_EFAULT;
9831                 }
9832                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9833                 fpcr = orig_fpcr & FPCR_DYN_MASK;
9834 
9835                 /* Copied from linux ieee_swcr_to_fpcr.  */
9836                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9837                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9838                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9839                                   | SWCR_TRAP_ENABLE_DZE
9840                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
9841                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9842                                   | SWCR_TRAP_ENABLE_INE)) << 57;
9843                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9844                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9845 
9846                 cpu_alpha_store_fpcr(cpu_env, fpcr);
9847                 ret = 0;
9848             }
9849             break;
9850 
9851           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9852             {
9853                 uint64_t exc, fpcr, orig_fpcr;
9854                 int si_code;
9855 
9856                 if (get_user_u64(exc, arg2)) {
9857                     return -TARGET_EFAULT;
9858                 }
9859 
9860                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9861 
9862                 /* We only add to the exception status here.  */
9863                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9864 
9865                 cpu_alpha_store_fpcr(cpu_env, fpcr);
9866                 ret = 0;
9867 
9868                 /* Old exceptions are not signaled.  */
9869                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9870 
9871                 /* If any exceptions set by this call,
9872                    and are unmasked, send a signal.  */
9873                 si_code = 0;
9874                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9875                     si_code = TARGET_FPE_FLTRES;
9876                 }
9877                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9878                     si_code = TARGET_FPE_FLTUND;
9879                 }
9880                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9881                     si_code = TARGET_FPE_FLTOVF;
9882                 }
9883                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9884                     si_code = TARGET_FPE_FLTDIV;
9885                 }
9886                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9887                     si_code = TARGET_FPE_FLTINV;
9888                 }
9889                 if (si_code != 0) {
9890                     target_siginfo_t info;
9891                     info.si_signo = SIGFPE;
9892                     info.si_errno = 0;
9893                     info.si_code = si_code;
9894                     info._sifields._sigfault._addr
9895                         = ((CPUArchState *)cpu_env)->pc;
9896                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
9897                                  QEMU_SI_FAULT, &info);
9898                 }
9899             }
9900             break;
9901 
9902           /* case SSI_NVPAIRS:
9903              -- Used with SSIN_UACPROC to enable unaligned accesses.
9904              case SSI_IEEE_STATE_AT_SIGNAL:
9905              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9906              -- Not implemented in linux kernel
9907           */
9908         }
9909         return ret;
9910 #endif
9911 #ifdef TARGET_NR_osf_sigprocmask
9912     /* Alpha specific.  */
9913     case TARGET_NR_osf_sigprocmask:
9914         {
9915             abi_ulong mask;
9916             int how;
9917             sigset_t set, oldset;
9918 
9919             switch(arg1) {
9920             case TARGET_SIG_BLOCK:
9921                 how = SIG_BLOCK;
9922                 break;
9923             case TARGET_SIG_UNBLOCK:
9924                 how = SIG_UNBLOCK;
9925                 break;
9926             case TARGET_SIG_SETMASK:
9927                 how = SIG_SETMASK;
9928                 break;
9929             default:
9930                 return -TARGET_EINVAL;
9931             }
9932             mask = arg2;
9933             target_to_host_old_sigset(&set, &mask);
9934             ret = do_sigprocmask(how, &set, &oldset);
9935             if (!ret) {
9936                 host_to_target_old_sigset(&mask, &oldset);
9937                 ret = mask;
9938             }
9939         }
9940         return ret;
9941 #endif
9942 
9943 #ifdef TARGET_NR_getgid32
9944     case TARGET_NR_getgid32:
9945         return get_errno(getgid());
9946 #endif
9947 #ifdef TARGET_NR_geteuid32
9948     case TARGET_NR_geteuid32:
9949         return get_errno(geteuid());
9950 #endif
9951 #ifdef TARGET_NR_getegid32
9952     case TARGET_NR_getegid32:
9953         return get_errno(getegid());
9954 #endif
9955 #ifdef TARGET_NR_setreuid32
9956     case TARGET_NR_setreuid32:
9957         return get_errno(setreuid(arg1, arg2));
9958 #endif
9959 #ifdef TARGET_NR_setregid32
9960     case TARGET_NR_setregid32:
9961         return get_errno(setregid(arg1, arg2));
9962 #endif
9963 #ifdef TARGET_NR_getgroups32
9964     case TARGET_NR_getgroups32:
9965         {
9966             int gidsetsize = arg1;
9967             uint32_t *target_grouplist;
9968             gid_t *grouplist;
9969             int i;
9970 
9971             grouplist = alloca(gidsetsize * sizeof(gid_t));
9972             ret = get_errno(getgroups(gidsetsize, grouplist));
9973             if (gidsetsize == 0)
9974                 return ret;
9975             if (!is_error(ret)) {
9976                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9977                 if (!target_grouplist) {
9978                     return -TARGET_EFAULT;
9979                 }
9980                 for(i = 0;i < ret; i++)
9981                     target_grouplist[i] = tswap32(grouplist[i]);
9982                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9983             }
9984         }
9985         return ret;
9986 #endif
9987 #ifdef TARGET_NR_setgroups32
9988     case TARGET_NR_setgroups32:
9989         {
9990             int gidsetsize = arg1;
9991             uint32_t *target_grouplist;
9992             gid_t *grouplist;
9993             int i;
9994 
9995             grouplist = alloca(gidsetsize * sizeof(gid_t));
9996             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9997             if (!target_grouplist) {
9998                 return -TARGET_EFAULT;
9999             }
10000             for(i = 0;i < gidsetsize; i++)
10001                 grouplist[i] = tswap32(target_grouplist[i]);
10002             unlock_user(target_grouplist, arg2, 0);
10003             return get_errno(setgroups(gidsetsize, grouplist));
10004         }
10005 #endif
10006 #ifdef TARGET_NR_fchown32
10007     case TARGET_NR_fchown32:
10008         return get_errno(fchown(arg1, arg2, arg3));
10009 #endif
10010 #ifdef TARGET_NR_setresuid32
10011     case TARGET_NR_setresuid32:
10012         return get_errno(sys_setresuid(arg1, arg2, arg3));
10013 #endif
10014 #ifdef TARGET_NR_getresuid32
10015     case TARGET_NR_getresuid32:
10016         {
10017             uid_t ruid, euid, suid;
10018             ret = get_errno(getresuid(&ruid, &euid, &suid));
10019             if (!is_error(ret)) {
10020                 if (put_user_u32(ruid, arg1)
10021                     || put_user_u32(euid, arg2)
10022                     || put_user_u32(suid, arg3))
10023                     return -TARGET_EFAULT;
10024             }
10025         }
10026         return ret;
10027 #endif
10028 #ifdef TARGET_NR_setresgid32
10029     case TARGET_NR_setresgid32:
10030         return get_errno(sys_setresgid(arg1, arg2, arg3));
10031 #endif
10032 #ifdef TARGET_NR_getresgid32
10033     case TARGET_NR_getresgid32:
10034         {
10035             gid_t rgid, egid, sgid;
10036             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10037             if (!is_error(ret)) {
10038                 if (put_user_u32(rgid, arg1)
10039                     || put_user_u32(egid, arg2)
10040                     || put_user_u32(sgid, arg3))
10041                     return -TARGET_EFAULT;
10042             }
10043         }
10044         return ret;
10045 #endif
10046 #ifdef TARGET_NR_chown32
10047     case TARGET_NR_chown32:
10048         if (!(p = lock_user_string(arg1)))
10049             return -TARGET_EFAULT;
10050         ret = get_errno(chown(p, arg2, arg3));
10051         unlock_user(p, arg1, 0);
10052         return ret;
10053 #endif
10054 #ifdef TARGET_NR_setuid32
10055     case TARGET_NR_setuid32:
10056         return get_errno(sys_setuid(arg1));
10057 #endif
10058 #ifdef TARGET_NR_setgid32
10059     case TARGET_NR_setgid32:
10060         return get_errno(sys_setgid(arg1));
10061 #endif
10062 #ifdef TARGET_NR_setfsuid32
10063     case TARGET_NR_setfsuid32:
10064         return get_errno(setfsuid(arg1));
10065 #endif
10066 #ifdef TARGET_NR_setfsgid32
10067     case TARGET_NR_setfsgid32:
10068         return get_errno(setfsgid(arg1));
10069 #endif
10070 #ifdef TARGET_NR_mincore
10071     case TARGET_NR_mincore:
10072         {
10073             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10074             if (!a) {
10075                 return -TARGET_ENOMEM;
10076             }
10077             p = lock_user_string(arg3);
10078             if (!p) {
10079                 ret = -TARGET_EFAULT;
10080             } else {
10081                 ret = get_errno(mincore(a, arg2, p));
10082                 unlock_user(p, arg3, ret);
10083             }
10084             unlock_user(a, arg1, 0);
10085         }
10086         return ret;
10087 #endif
10088 #ifdef TARGET_NR_arm_fadvise64_64
10089     case TARGET_NR_arm_fadvise64_64:
10090         /* arm_fadvise64_64 looks like fadvise64_64 but
10091          * with different argument order: fd, advice, offset, len
10092          * rather than the usual fd, offset, len, advice.
10093          * Note that offset and len are both 64-bit so appear as
10094          * pairs of 32-bit registers.
10095          */
10096         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10097                             target_offset64(arg5, arg6), arg2);
10098         return -host_to_target_errno(ret);
10099 #endif
10100 
10101 #if TARGET_ABI_BITS == 32
10102 
10103 #ifdef TARGET_NR_fadvise64_64
10104     case TARGET_NR_fadvise64_64:
10105 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10106         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10107         ret = arg2;
10108         arg2 = arg3;
10109         arg3 = arg4;
10110         arg4 = arg5;
10111         arg5 = arg6;
10112         arg6 = ret;
10113 #else
10114         /* 6 args: fd, offset (high, low), len (high, low), advice */
10115         if (regpairs_aligned(cpu_env, num)) {
10116             /* offset is in (3,4), len in (5,6) and advice in 7 */
10117             arg2 = arg3;
10118             arg3 = arg4;
10119             arg4 = arg5;
10120             arg5 = arg6;
10121             arg6 = arg7;
10122         }
10123 #endif
10124         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10125                             target_offset64(arg4, arg5), arg6);
10126         return -host_to_target_errno(ret);
10127 #endif
10128 
10129 #ifdef TARGET_NR_fadvise64
10130     case TARGET_NR_fadvise64:
10131         /* 5 args: fd, offset (high, low), len, advice */
10132         if (regpairs_aligned(cpu_env, num)) {
10133             /* offset is in (3,4), len in 5 and advice in 6 */
10134             arg2 = arg3;
10135             arg3 = arg4;
10136             arg4 = arg5;
10137             arg5 = arg6;
10138         }
10139         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10140         return -host_to_target_errno(ret);
10141 #endif
10142 
10143 #else /* not a 32-bit ABI */
10144 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10145 #ifdef TARGET_NR_fadvise64_64
10146     case TARGET_NR_fadvise64_64:
10147 #endif
10148 #ifdef TARGET_NR_fadvise64
10149     case TARGET_NR_fadvise64:
10150 #endif
10151 #ifdef TARGET_S390X
10152         switch (arg4) {
10153         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10154         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10155         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10156         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10157         default: break;
10158         }
10159 #endif
10160         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10161 #endif
10162 #endif /* end of 64-bit ABI fadvise handling */
10163 
10164 #ifdef TARGET_NR_madvise
10165     case TARGET_NR_madvise:
10166         /* A straight passthrough may not be safe because qemu sometimes
10167            turns private file-backed mappings into anonymous mappings.
10168            This will break MADV_DONTNEED.
10169            This is a hint, so ignoring and returning success is ok.  */
10170         return 0;
10171 #endif
10172 #if TARGET_ABI_BITS == 32
10173     case TARGET_NR_fcntl64:
10174     {
10175 	int cmd;
10176 	struct flock64 fl;
10177         from_flock64_fn *copyfrom = copy_from_user_flock64;
10178         to_flock64_fn *copyto = copy_to_user_flock64;
10179 
10180 #ifdef TARGET_ARM
10181         if (!((CPUARMState *)cpu_env)->eabi) {
10182             copyfrom = copy_from_user_oabi_flock64;
10183             copyto = copy_to_user_oabi_flock64;
10184         }
10185 #endif
10186 
10187 	cmd = target_to_host_fcntl_cmd(arg2);
10188         if (cmd == -TARGET_EINVAL) {
10189             return cmd;
10190         }
10191 
10192         switch(arg2) {
10193         case TARGET_F_GETLK64:
10194             ret = copyfrom(&fl, arg3);
10195             if (ret) {
10196                 break;
10197             }
10198             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10199             if (ret == 0) {
10200                 ret = copyto(arg3, &fl);
10201             }
10202 	    break;
10203 
10204         case TARGET_F_SETLK64:
10205         case TARGET_F_SETLKW64:
10206             ret = copyfrom(&fl, arg3);
10207             if (ret) {
10208                 break;
10209             }
10210             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10211 	    break;
10212         default:
10213             ret = do_fcntl(arg1, arg2, arg3);
10214             break;
10215         }
10216         return ret;
10217     }
10218 #endif
10219 #ifdef TARGET_NR_cacheflush
10220     case TARGET_NR_cacheflush:
10221         /* self-modifying code is handled automatically, so nothing needed */
10222         return 0;
10223 #endif
10224 #ifdef TARGET_NR_getpagesize
10225     case TARGET_NR_getpagesize:
10226         return TARGET_PAGE_SIZE;
10227 #endif
10228     case TARGET_NR_gettid:
10229         return get_errno(gettid());
10230 #ifdef TARGET_NR_readahead
10231     case TARGET_NR_readahead:
10232 #if TARGET_ABI_BITS == 32
10233         if (regpairs_aligned(cpu_env, num)) {
10234             arg2 = arg3;
10235             arg3 = arg4;
10236             arg4 = arg5;
10237         }
10238         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10239 #else
10240         ret = get_errno(readahead(arg1, arg2, arg3));
10241 #endif
10242         return ret;
10243 #endif
10244 #ifdef CONFIG_ATTR
10245 #ifdef TARGET_NR_setxattr
10246     case TARGET_NR_listxattr:
10247     case TARGET_NR_llistxattr:
10248     {
10249         void *p, *b = 0;
10250         if (arg2) {
10251             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10252             if (!b) {
10253                 return -TARGET_EFAULT;
10254             }
10255         }
10256         p = lock_user_string(arg1);
10257         if (p) {
10258             if (num == TARGET_NR_listxattr) {
10259                 ret = get_errno(listxattr(p, b, arg3));
10260             } else {
10261                 ret = get_errno(llistxattr(p, b, arg3));
10262             }
10263         } else {
10264             ret = -TARGET_EFAULT;
10265         }
10266         unlock_user(p, arg1, 0);
10267         unlock_user(b, arg2, arg3);
10268         return ret;
10269     }
10270     case TARGET_NR_flistxattr:
10271     {
10272         void *b = 0;
10273         if (arg2) {
10274             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10275             if (!b) {
10276                 return -TARGET_EFAULT;
10277             }
10278         }
10279         ret = get_errno(flistxattr(arg1, b, arg3));
10280         unlock_user(b, arg2, arg3);
10281         return ret;
10282     }
10283     case TARGET_NR_setxattr:
10284     case TARGET_NR_lsetxattr:
10285         {
10286             void *p, *n, *v = 0;
10287             if (arg3) {
10288                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10289                 if (!v) {
10290                     return -TARGET_EFAULT;
10291                 }
10292             }
10293             p = lock_user_string(arg1);
10294             n = lock_user_string(arg2);
10295             if (p && n) {
10296                 if (num == TARGET_NR_setxattr) {
10297                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10298                 } else {
10299                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10300                 }
10301             } else {
10302                 ret = -TARGET_EFAULT;
10303             }
10304             unlock_user(p, arg1, 0);
10305             unlock_user(n, arg2, 0);
10306             unlock_user(v, arg3, 0);
10307         }
10308         return ret;
10309     case TARGET_NR_fsetxattr:
10310         {
10311             void *n, *v = 0;
10312             if (arg3) {
10313                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10314                 if (!v) {
10315                     return -TARGET_EFAULT;
10316                 }
10317             }
10318             n = lock_user_string(arg2);
10319             if (n) {
10320                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10321             } else {
10322                 ret = -TARGET_EFAULT;
10323             }
10324             unlock_user(n, arg2, 0);
10325             unlock_user(v, arg3, 0);
10326         }
10327         return ret;
10328     case TARGET_NR_getxattr:
10329     case TARGET_NR_lgetxattr:
10330         {
10331             void *p, *n, *v = 0;
10332             if (arg3) {
10333                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10334                 if (!v) {
10335                     return -TARGET_EFAULT;
10336                 }
10337             }
10338             p = lock_user_string(arg1);
10339             n = lock_user_string(arg2);
10340             if (p && n) {
10341                 if (num == TARGET_NR_getxattr) {
10342                     ret = get_errno(getxattr(p, n, v, arg4));
10343                 } else {
10344                     ret = get_errno(lgetxattr(p, n, v, arg4));
10345                 }
10346             } else {
10347                 ret = -TARGET_EFAULT;
10348             }
10349             unlock_user(p, arg1, 0);
10350             unlock_user(n, arg2, 0);
10351             unlock_user(v, arg3, arg4);
10352         }
10353         return ret;
10354     case TARGET_NR_fgetxattr:
10355         {
10356             void *n, *v = 0;
10357             if (arg3) {
10358                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10359                 if (!v) {
10360                     return -TARGET_EFAULT;
10361                 }
10362             }
10363             n = lock_user_string(arg2);
10364             if (n) {
10365                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10366             } else {
10367                 ret = -TARGET_EFAULT;
10368             }
10369             unlock_user(n, arg2, 0);
10370             unlock_user(v, arg3, arg4);
10371         }
10372         return ret;
10373     case TARGET_NR_removexattr:
10374     case TARGET_NR_lremovexattr:
10375         {
10376             void *p, *n;
10377             p = lock_user_string(arg1);
10378             n = lock_user_string(arg2);
10379             if (p && n) {
10380                 if (num == TARGET_NR_removexattr) {
10381                     ret = get_errno(removexattr(p, n));
10382                 } else {
10383                     ret = get_errno(lremovexattr(p, n));
10384                 }
10385             } else {
10386                 ret = -TARGET_EFAULT;
10387             }
10388             unlock_user(p, arg1, 0);
10389             unlock_user(n, arg2, 0);
10390         }
10391         return ret;
10392     case TARGET_NR_fremovexattr:
10393         {
10394             void *n;
10395             n = lock_user_string(arg2);
10396             if (n) {
10397                 ret = get_errno(fremovexattr(arg1, n));
10398             } else {
10399                 ret = -TARGET_EFAULT;
10400             }
10401             unlock_user(n, arg2, 0);
10402         }
10403         return ret;
10404 #endif
10405 #endif /* CONFIG_ATTR */
10406 #ifdef TARGET_NR_set_thread_area
10407     case TARGET_NR_set_thread_area:
10408 #if defined(TARGET_MIPS)
10409       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10410       return 0;
10411 #elif defined(TARGET_CRIS)
10412       if (arg1 & 0xff)
10413           ret = -TARGET_EINVAL;
10414       else {
10415           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10416           ret = 0;
10417       }
10418       return ret;
10419 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10420       return do_set_thread_area(cpu_env, arg1);
10421 #elif defined(TARGET_M68K)
10422       {
10423           TaskState *ts = cpu->opaque;
10424           ts->tp_value = arg1;
10425           return 0;
10426       }
10427 #else
10428       return -TARGET_ENOSYS;
10429 #endif
10430 #endif
10431 #ifdef TARGET_NR_get_thread_area
10432     case TARGET_NR_get_thread_area:
10433 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10434         return do_get_thread_area(cpu_env, arg1);
10435 #elif defined(TARGET_M68K)
10436         {
10437             TaskState *ts = cpu->opaque;
10438             return ts->tp_value;
10439         }
10440 #else
10441         return -TARGET_ENOSYS;
10442 #endif
10443 #endif
10444 #ifdef TARGET_NR_getdomainname
10445     case TARGET_NR_getdomainname:
10446         return -TARGET_ENOSYS;
10447 #endif
10448 
10449 #ifdef TARGET_NR_clock_settime
10450     case TARGET_NR_clock_settime:
10451     {
10452         struct timespec ts;
10453 
10454         ret = target_to_host_timespec(&ts, arg2);
10455         if (!is_error(ret)) {
10456             ret = get_errno(clock_settime(arg1, &ts));
10457         }
10458         return ret;
10459     }
10460 #endif
10461 #ifdef TARGET_NR_clock_gettime
10462     case TARGET_NR_clock_gettime:
10463     {
10464         struct timespec ts;
10465         ret = get_errno(clock_gettime(arg1, &ts));
10466         if (!is_error(ret)) {
10467             ret = host_to_target_timespec(arg2, &ts);
10468         }
10469         return ret;
10470     }
10471 #endif
10472 #ifdef TARGET_NR_clock_getres
10473     case TARGET_NR_clock_getres:
10474     {
10475         struct timespec ts;
10476         ret = get_errno(clock_getres(arg1, &ts));
10477         if (!is_error(ret)) {
10478             host_to_target_timespec(arg2, &ts);
10479         }
10480         return ret;
10481     }
10482 #endif
10483 #ifdef TARGET_NR_clock_nanosleep
10484     case TARGET_NR_clock_nanosleep:
10485     {
10486         struct timespec ts;
10487         target_to_host_timespec(&ts, arg3);
10488         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10489                                              &ts, arg4 ? &ts : NULL));
10490         if (arg4)
10491             host_to_target_timespec(arg4, &ts);
10492 
10493 #if defined(TARGET_PPC)
10494         /* clock_nanosleep is odd in that it returns positive errno values.
10495          * On PPC, CR0 bit 3 should be set in such a situation. */
10496         if (ret && ret != -TARGET_ERESTARTSYS) {
10497             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10498         }
10499 #endif
10500         return ret;
10501     }
10502 #endif
10503 
10504 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10505     case TARGET_NR_set_tid_address:
10506         return get_errno(set_tid_address((int *)g2h(arg1)));
10507 #endif
10508 
10509     case TARGET_NR_tkill:
10510         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10511 
10512     case TARGET_NR_tgkill:
10513         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10514                          target_to_host_signal(arg3)));
10515 
10516 #ifdef TARGET_NR_set_robust_list
10517     case TARGET_NR_set_robust_list:
10518     case TARGET_NR_get_robust_list:
10519         /* The ABI for supporting robust futexes has userspace pass
10520          * the kernel a pointer to a linked list which is updated by
10521          * userspace after the syscall; the list is walked by the kernel
10522          * when the thread exits. Since the linked list in QEMU guest
10523          * memory isn't a valid linked list for the host and we have
10524          * no way to reliably intercept the thread-death event, we can't
10525          * support these. Silently return ENOSYS so that guest userspace
10526          * falls back to a non-robust futex implementation (which should
10527          * be OK except in the corner case of the guest crashing while
10528          * holding a mutex that is shared with another process via
10529          * shared memory).
10530          */
10531         return -TARGET_ENOSYS;
10532 #endif
10533 
10534 #if defined(TARGET_NR_utimensat)
10535     case TARGET_NR_utimensat:
10536         {
10537             struct timespec *tsp, ts[2];
10538             if (!arg3) {
10539                 tsp = NULL;
10540             } else {
10541                 target_to_host_timespec(ts, arg3);
10542                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10543                 tsp = ts;
10544             }
10545             if (!arg2)
10546                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10547             else {
10548                 if (!(p = lock_user_string(arg2))) {
10549                     return -TARGET_EFAULT;
10550                 }
10551                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10552                 unlock_user(p, arg2, 0);
10553             }
10554         }
10555         return ret;
10556 #endif
10557     case TARGET_NR_futex:
10558         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10559 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10560     case TARGET_NR_inotify_init:
10561         ret = get_errno(sys_inotify_init());
10562         if (ret >= 0) {
10563             fd_trans_register(ret, &target_inotify_trans);
10564         }
10565         return ret;
10566 #endif
10567 #ifdef CONFIG_INOTIFY1
10568 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10569     case TARGET_NR_inotify_init1:
10570         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10571                                           fcntl_flags_tbl)));
10572         if (ret >= 0) {
10573             fd_trans_register(ret, &target_inotify_trans);
10574         }
10575         return ret;
10576 #endif
10577 #endif
10578 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10579     case TARGET_NR_inotify_add_watch:
10580         p = lock_user_string(arg2);
10581         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10582         unlock_user(p, arg2, 0);
10583         return ret;
10584 #endif
10585 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10586     case TARGET_NR_inotify_rm_watch:
10587         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10588 #endif
10589 
10590 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10591     case TARGET_NR_mq_open:
10592         {
10593             struct mq_attr posix_mq_attr;
10594             struct mq_attr *pposix_mq_attr;
10595             int host_flags;
10596 
10597             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10598             pposix_mq_attr = NULL;
10599             if (arg4) {
10600                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10601                     return -TARGET_EFAULT;
10602                 }
10603                 pposix_mq_attr = &posix_mq_attr;
10604             }
10605             p = lock_user_string(arg1 - 1);
10606             if (!p) {
10607                 return -TARGET_EFAULT;
10608             }
10609             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
10610             unlock_user (p, arg1, 0);
10611         }
10612         return ret;
10613 
10614     case TARGET_NR_mq_unlink:
10615         p = lock_user_string(arg1 - 1);
10616         if (!p) {
10617             return -TARGET_EFAULT;
10618         }
10619         ret = get_errno(mq_unlink(p));
10620         unlock_user (p, arg1, 0);
10621         return ret;
10622 
10623     case TARGET_NR_mq_timedsend:
10624         {
10625             struct timespec ts;
10626 
10627             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10628             if (arg5 != 0) {
10629                 target_to_host_timespec(&ts, arg5);
10630                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10631                 host_to_target_timespec(arg5, &ts);
10632             } else {
10633                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10634             }
10635             unlock_user (p, arg2, arg3);
10636         }
10637         return ret;
10638 
10639     case TARGET_NR_mq_timedreceive:
10640         {
10641             struct timespec ts;
10642             unsigned int prio;
10643 
10644             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10645             if (arg5 != 0) {
10646                 target_to_host_timespec(&ts, arg5);
10647                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10648                                                      &prio, &ts));
10649                 host_to_target_timespec(arg5, &ts);
10650             } else {
10651                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10652                                                      &prio, NULL));
10653             }
10654             unlock_user (p, arg2, arg3);
10655             if (arg4 != 0)
10656                 put_user_u32(prio, arg4);
10657         }
10658         return ret;
10659 
10660     /* Not implemented for now... */
10661 /*     case TARGET_NR_mq_notify: */
10662 /*         break; */
10663 
10664     case TARGET_NR_mq_getsetattr:
10665         {
10666             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10667             ret = 0;
10668             if (arg2 != 0) {
10669                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10670                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
10671                                            &posix_mq_attr_out));
10672             } else if (arg3 != 0) {
10673                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
10674             }
10675             if (ret == 0 && arg3 != 0) {
10676                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10677             }
10678         }
10679         return ret;
10680 #endif
10681 
10682 #ifdef CONFIG_SPLICE
10683 #ifdef TARGET_NR_tee
10684     case TARGET_NR_tee:
10685         {
10686             ret = get_errno(tee(arg1,arg2,arg3,arg4));
10687         }
10688         return ret;
10689 #endif
10690 #ifdef TARGET_NR_splice
10691     case TARGET_NR_splice:
10692         {
10693             loff_t loff_in, loff_out;
10694             loff_t *ploff_in = NULL, *ploff_out = NULL;
10695             if (arg2) {
10696                 if (get_user_u64(loff_in, arg2)) {
10697                     return -TARGET_EFAULT;
10698                 }
10699                 ploff_in = &loff_in;
10700             }
10701             if (arg4) {
10702                 if (get_user_u64(loff_out, arg4)) {
10703                     return -TARGET_EFAULT;
10704                 }
10705                 ploff_out = &loff_out;
10706             }
10707             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10708             if (arg2) {
10709                 if (put_user_u64(loff_in, arg2)) {
10710                     return -TARGET_EFAULT;
10711                 }
10712             }
10713             if (arg4) {
10714                 if (put_user_u64(loff_out, arg4)) {
10715                     return -TARGET_EFAULT;
10716                 }
10717             }
10718         }
10719         return ret;
10720 #endif
10721 #ifdef TARGET_NR_vmsplice
10722 	case TARGET_NR_vmsplice:
10723         {
10724             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10725             if (vec != NULL) {
10726                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10727                 unlock_iovec(vec, arg2, arg3, 0);
10728             } else {
10729                 ret = -host_to_target_errno(errno);
10730             }
10731         }
10732         return ret;
10733 #endif
10734 #endif /* CONFIG_SPLICE */
10735 #ifdef CONFIG_EVENTFD
10736 #if defined(TARGET_NR_eventfd)
10737     case TARGET_NR_eventfd:
10738         ret = get_errno(eventfd(arg1, 0));
10739         if (ret >= 0) {
10740             fd_trans_register(ret, &target_eventfd_trans);
10741         }
10742         return ret;
10743 #endif
10744 #if defined(TARGET_NR_eventfd2)
10745     case TARGET_NR_eventfd2:
10746     {
10747         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10748         if (arg2 & TARGET_O_NONBLOCK) {
10749             host_flags |= O_NONBLOCK;
10750         }
10751         if (arg2 & TARGET_O_CLOEXEC) {
10752             host_flags |= O_CLOEXEC;
10753         }
10754         ret = get_errno(eventfd(arg1, host_flags));
10755         if (ret >= 0) {
10756             fd_trans_register(ret, &target_eventfd_trans);
10757         }
10758         return ret;
10759     }
10760 #endif
10761 #endif /* CONFIG_EVENTFD  */
10762 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10763     case TARGET_NR_fallocate:
10764 #if TARGET_ABI_BITS == 32
10765         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10766                                   target_offset64(arg5, arg6)));
10767 #else
10768         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10769 #endif
10770         return ret;
10771 #endif
10772 #if defined(CONFIG_SYNC_FILE_RANGE)
10773 #if defined(TARGET_NR_sync_file_range)
10774     case TARGET_NR_sync_file_range:
10775 #if TARGET_ABI_BITS == 32
10776 #if defined(TARGET_MIPS)
10777         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10778                                         target_offset64(arg5, arg6), arg7));
10779 #else
10780         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10781                                         target_offset64(arg4, arg5), arg6));
10782 #endif /* !TARGET_MIPS */
10783 #else
10784         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10785 #endif
10786         return ret;
10787 #endif
10788 #if defined(TARGET_NR_sync_file_range2)
10789     case TARGET_NR_sync_file_range2:
10790         /* This is like sync_file_range but the arguments are reordered */
10791 #if TARGET_ABI_BITS == 32
10792         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10793                                         target_offset64(arg5, arg6), arg2));
10794 #else
10795         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10796 #endif
10797         return ret;
10798 #endif
10799 #endif
10800 #if defined(TARGET_NR_signalfd4)
10801     case TARGET_NR_signalfd4:
10802         return do_signalfd4(arg1, arg2, arg4);
10803 #endif
10804 #if defined(TARGET_NR_signalfd)
10805     case TARGET_NR_signalfd:
10806         return do_signalfd4(arg1, arg2, 0);
10807 #endif
10808 #if defined(CONFIG_EPOLL)
10809 #if defined(TARGET_NR_epoll_create)
10810     case TARGET_NR_epoll_create:
10811         return get_errno(epoll_create(arg1));
10812 #endif
10813 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10814     case TARGET_NR_epoll_create1:
10815         return get_errno(epoll_create1(arg1));
10816 #endif
10817 #if defined(TARGET_NR_epoll_ctl)
10818     case TARGET_NR_epoll_ctl:
10819     {
10820         struct epoll_event ep;
10821         struct epoll_event *epp = 0;
10822         if (arg4) {
10823             struct target_epoll_event *target_ep;
10824             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10825                 return -TARGET_EFAULT;
10826             }
10827             ep.events = tswap32(target_ep->events);
10828             /* The epoll_data_t union is just opaque data to the kernel,
10829              * so we transfer all 64 bits across and need not worry what
10830              * actual data type it is.
10831              */
10832             ep.data.u64 = tswap64(target_ep->data.u64);
10833             unlock_user_struct(target_ep, arg4, 0);
10834             epp = &ep;
10835         }
10836         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10837     }
10838 #endif
10839 
10840 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10841 #if defined(TARGET_NR_epoll_wait)
10842     case TARGET_NR_epoll_wait:
10843 #endif
10844 #if defined(TARGET_NR_epoll_pwait)
10845     case TARGET_NR_epoll_pwait:
10846 #endif
10847     {
10848         struct target_epoll_event *target_ep;
10849         struct epoll_event *ep;
10850         int epfd = arg1;
10851         int maxevents = arg3;
10852         int timeout = arg4;
10853 
10854         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
10855             return -TARGET_EINVAL;
10856         }
10857 
10858         target_ep = lock_user(VERIFY_WRITE, arg2,
10859                               maxevents * sizeof(struct target_epoll_event), 1);
10860         if (!target_ep) {
10861             return -TARGET_EFAULT;
10862         }
10863 
10864         ep = g_try_new(struct epoll_event, maxevents);
10865         if (!ep) {
10866             unlock_user(target_ep, arg2, 0);
10867             return -TARGET_ENOMEM;
10868         }
10869 
10870         switch (num) {
10871 #if defined(TARGET_NR_epoll_pwait)
10872         case TARGET_NR_epoll_pwait:
10873         {
10874             target_sigset_t *target_set;
10875             sigset_t _set, *set = &_set;
10876 
10877             if (arg5) {
10878                 if (arg6 != sizeof(target_sigset_t)) {
10879                     ret = -TARGET_EINVAL;
10880                     break;
10881                 }
10882 
10883                 target_set = lock_user(VERIFY_READ, arg5,
10884                                        sizeof(target_sigset_t), 1);
10885                 if (!target_set) {
10886                     ret = -TARGET_EFAULT;
10887                     break;
10888                 }
10889                 target_to_host_sigset(set, target_set);
10890                 unlock_user(target_set, arg5, 0);
10891             } else {
10892                 set = NULL;
10893             }
10894 
10895             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10896                                              set, SIGSET_T_SIZE));
10897             break;
10898         }
10899 #endif
10900 #if defined(TARGET_NR_epoll_wait)
10901         case TARGET_NR_epoll_wait:
10902             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10903                                              NULL, 0));
10904             break;
10905 #endif
10906         default:
10907             ret = -TARGET_ENOSYS;
10908         }
10909         if (!is_error(ret)) {
10910             int i;
10911             for (i = 0; i < ret; i++) {
10912                 target_ep[i].events = tswap32(ep[i].events);
10913                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10914             }
10915             unlock_user(target_ep, arg2,
10916                         ret * sizeof(struct target_epoll_event));
10917         } else {
10918             unlock_user(target_ep, arg2, 0);
10919         }
10920         g_free(ep);
10921         return ret;
10922     }
10923 #endif
10924 #endif
10925 #ifdef TARGET_NR_prlimit64
10926     case TARGET_NR_prlimit64:
10927     {
10928         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10929         struct target_rlimit64 *target_rnew, *target_rold;
10930         struct host_rlimit64 rnew, rold, *rnewp = 0;
10931         int resource = target_to_host_resource(arg2);
10932         if (arg3) {
10933             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10934                 return -TARGET_EFAULT;
10935             }
10936             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10937             rnew.rlim_max = tswap64(target_rnew->rlim_max);
10938             unlock_user_struct(target_rnew, arg3, 0);
10939             rnewp = &rnew;
10940         }
10941 
10942         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10943         if (!is_error(ret) && arg4) {
10944             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10945                 return -TARGET_EFAULT;
10946             }
10947             target_rold->rlim_cur = tswap64(rold.rlim_cur);
10948             target_rold->rlim_max = tswap64(rold.rlim_max);
10949             unlock_user_struct(target_rold, arg4, 1);
10950         }
10951         return ret;
10952     }
10953 #endif
10954 #ifdef TARGET_NR_gethostname
10955     case TARGET_NR_gethostname:
10956     {
10957         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10958         if (name) {
10959             ret = get_errno(gethostname(name, arg2));
10960             unlock_user(name, arg1, arg2);
10961         } else {
10962             ret = -TARGET_EFAULT;
10963         }
10964         return ret;
10965     }
10966 #endif
10967 #ifdef TARGET_NR_atomic_cmpxchg_32
10968     case TARGET_NR_atomic_cmpxchg_32:
10969     {
10970         /* should use start_exclusive from main.c */
10971         abi_ulong mem_value;
10972         if (get_user_u32(mem_value, arg6)) {
10973             target_siginfo_t info;
10974             info.si_signo = SIGSEGV;
10975             info.si_errno = 0;
10976             info.si_code = TARGET_SEGV_MAPERR;
10977             info._sifields._sigfault._addr = arg6;
10978             queue_signal((CPUArchState *)cpu_env, info.si_signo,
10979                          QEMU_SI_FAULT, &info);
10980             ret = 0xdeadbeef;
10981 
10982         }
10983         if (mem_value == arg2)
10984             put_user_u32(arg1, arg6);
10985         return mem_value;
10986     }
10987 #endif
10988 #ifdef TARGET_NR_atomic_barrier
10989     case TARGET_NR_atomic_barrier:
10990         /* Like the kernel implementation and the
10991            qemu arm barrier, no-op this? */
10992         return 0;
10993 #endif
10994 
10995 #ifdef TARGET_NR_timer_create
10996     case TARGET_NR_timer_create:
10997     {
10998         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10999 
11000         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11001 
11002         int clkid = arg1;
11003         int timer_index = next_free_host_timer();
11004 
11005         if (timer_index < 0) {
11006             ret = -TARGET_EAGAIN;
11007         } else {
11008             timer_t *phtimer = g_posix_timers  + timer_index;
11009 
11010             if (arg2) {
11011                 phost_sevp = &host_sevp;
11012                 ret = target_to_host_sigevent(phost_sevp, arg2);
11013                 if (ret != 0) {
11014                     return ret;
11015                 }
11016             }
11017 
11018             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11019             if (ret) {
11020                 phtimer = NULL;
11021             } else {
11022                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11023                     return -TARGET_EFAULT;
11024                 }
11025             }
11026         }
11027         return ret;
11028     }
11029 #endif
11030 
11031 #ifdef TARGET_NR_timer_settime
11032     case TARGET_NR_timer_settime:
11033     {
11034         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11035          * struct itimerspec * old_value */
11036         target_timer_t timerid = get_timer_id(arg1);
11037 
11038         if (timerid < 0) {
11039             ret = timerid;
11040         } else if (arg3 == 0) {
11041             ret = -TARGET_EINVAL;
11042         } else {
11043             timer_t htimer = g_posix_timers[timerid];
11044             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11045 
11046             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11047                 return -TARGET_EFAULT;
11048             }
11049             ret = get_errno(
11050                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11051             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11052                 return -TARGET_EFAULT;
11053             }
11054         }
11055         return ret;
11056     }
11057 #endif
11058 
11059 #ifdef TARGET_NR_timer_gettime
11060     case TARGET_NR_timer_gettime:
11061     {
11062         /* args: timer_t timerid, struct itimerspec *curr_value */
11063         target_timer_t timerid = get_timer_id(arg1);
11064 
11065         if (timerid < 0) {
11066             ret = timerid;
11067         } else if (!arg2) {
11068             ret = -TARGET_EFAULT;
11069         } else {
11070             timer_t htimer = g_posix_timers[timerid];
11071             struct itimerspec hspec;
11072             ret = get_errno(timer_gettime(htimer, &hspec));
11073 
11074             if (host_to_target_itimerspec(arg2, &hspec)) {
11075                 ret = -TARGET_EFAULT;
11076             }
11077         }
11078         return ret;
11079     }
11080 #endif
11081 
11082 #ifdef TARGET_NR_timer_getoverrun
11083     case TARGET_NR_timer_getoverrun:
11084     {
11085         /* args: timer_t timerid */
11086         target_timer_t timerid = get_timer_id(arg1);
11087 
11088         if (timerid < 0) {
11089             ret = timerid;
11090         } else {
11091             timer_t htimer = g_posix_timers[timerid];
11092             ret = get_errno(timer_getoverrun(htimer));
11093         }
11094         fd_trans_unregister(ret);
11095         return ret;
11096     }
11097 #endif
11098 
11099 #ifdef TARGET_NR_timer_delete
11100     case TARGET_NR_timer_delete:
11101     {
11102         /* args: timer_t timerid */
11103         target_timer_t timerid = get_timer_id(arg1);
11104 
11105         if (timerid < 0) {
11106             ret = timerid;
11107         } else {
11108             timer_t htimer = g_posix_timers[timerid];
11109             ret = get_errno(timer_delete(htimer));
11110             g_posix_timers[timerid] = 0;
11111         }
11112         return ret;
11113     }
11114 #endif
11115 
11116 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11117     case TARGET_NR_timerfd_create:
11118         return get_errno(timerfd_create(arg1,
11119                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11120 #endif
11121 
11122 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11123     case TARGET_NR_timerfd_gettime:
11124         {
11125             struct itimerspec its_curr;
11126 
11127             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11128 
11129             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11130                 return -TARGET_EFAULT;
11131             }
11132         }
11133         return ret;
11134 #endif
11135 
11136 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11137     case TARGET_NR_timerfd_settime:
11138         {
11139             struct itimerspec its_new, its_old, *p_new;
11140 
11141             if (arg3) {
11142                 if (target_to_host_itimerspec(&its_new, arg3)) {
11143                     return -TARGET_EFAULT;
11144                 }
11145                 p_new = &its_new;
11146             } else {
11147                 p_new = NULL;
11148             }
11149 
11150             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11151 
11152             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11153                 return -TARGET_EFAULT;
11154             }
11155         }
11156         return ret;
11157 #endif
11158 
11159 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11160     case TARGET_NR_ioprio_get:
11161         return get_errno(ioprio_get(arg1, arg2));
11162 #endif
11163 
11164 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11165     case TARGET_NR_ioprio_set:
11166         return get_errno(ioprio_set(arg1, arg2, arg3));
11167 #endif
11168 
11169 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11170     case TARGET_NR_setns:
11171         return get_errno(setns(arg1, arg2));
11172 #endif
11173 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11174     case TARGET_NR_unshare:
11175         return get_errno(unshare(arg1));
11176 #endif
11177 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11178     case TARGET_NR_kcmp:
11179         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11180 #endif
11181 #ifdef TARGET_NR_swapcontext
11182     case TARGET_NR_swapcontext:
11183         /* PowerPC specific.  */
11184         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11185 #endif
11186 
11187     default:
11188         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11189         return -TARGET_ENOSYS;
11190     }
11191     return ret;
11192 }
11193 
11194 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11195                     abi_long arg2, abi_long arg3, abi_long arg4,
11196                     abi_long arg5, abi_long arg6, abi_long arg7,
11197                     abi_long arg8)
11198 {
11199     CPUState *cpu = ENV_GET_CPU(cpu_env);
11200     abi_long ret;
11201 
11202 #ifdef DEBUG_ERESTARTSYS
11203     /* Debug-only code for exercising the syscall-restart code paths
11204      * in the per-architecture cpu main loops: restart every syscall
11205      * the guest makes once before letting it through.
11206      */
11207     {
11208         static bool flag;
11209         flag = !flag;
11210         if (flag) {
11211             return -TARGET_ERESTARTSYS;
11212         }
11213     }
11214 #endif
11215 
11216     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11217                              arg5, arg6, arg7, arg8);
11218 
11219     if (unlikely(do_strace)) {
11220         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11221         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11222                           arg5, arg6, arg7, arg8);
11223         print_syscall_ret(num, ret);
11224     } else {
11225         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11226                           arg5, arg6, arg7, arg8);
11227     }
11228 
11229     trace_guest_user_syscall_ret(cpu, num, ret);
11230     return ret;
11231 }
11232