xref: /openbmc/qemu/linux-user/syscall.c (revision 07f426c3)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include "linux_loop.h"
106 #include "uname.h"
107 
108 #include "qemu.h"
109 #include "fd-trans.h"
110 
111 #ifndef CLONE_IO
112 #define CLONE_IO                0x80000000      /* Clone io context */
113 #endif
114 
115 /* We can't directly call the host clone syscall, because this will
116  * badly confuse libc (breaking mutexes, for example). So we must
117  * divide clone flags into:
118  *  * flag combinations that look like pthread_create()
119  *  * flag combinations that look like fork()
120  *  * flags we can implement within QEMU itself
121  *  * flags we can't support and will return an error for
122  */
123 /* For thread creation, all these flags must be present; for
124  * fork, none must be present.
125  */
126 #define CLONE_THREAD_FLAGS                              \
127     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
128      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
129 
130 /* These flags are ignored:
131  * CLONE_DETACHED is now ignored by the kernel;
132  * CLONE_IO is just an optimisation hint to the I/O scheduler
133  */
134 #define CLONE_IGNORED_FLAGS                     \
135     (CLONE_DETACHED | CLONE_IO)
136 
137 /* Flags for fork which we can implement within QEMU itself */
138 #define CLONE_OPTIONAL_FORK_FLAGS               \
139     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
140      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
141 
142 /* Flags for thread creation which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
144     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
145      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
146 
147 #define CLONE_INVALID_FORK_FLAGS                                        \
148     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
149 
150 #define CLONE_INVALID_THREAD_FLAGS                                      \
151     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
152        CLONE_IGNORED_FLAGS))
153 
154 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
155  * have almost all been allocated. We cannot support any of
156  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
157  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
158  * The checks against the invalid thread masks above will catch these.
159  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
160  */
161 
162 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
163  * once. This exercises the codepaths for restart.
164  */
165 //#define DEBUG_ERESTARTSYS
166 
167 //#include <linux/msdos_fs.h>
168 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
169 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
170 
171 #undef _syscall0
172 #undef _syscall1
173 #undef _syscall2
174 #undef _syscall3
175 #undef _syscall4
176 #undef _syscall5
177 #undef _syscall6
178 
179 #define _syscall0(type,name)		\
180 static type name (void)			\
181 {					\
182 	return syscall(__NR_##name);	\
183 }
184 
185 #define _syscall1(type,name,type1,arg1)		\
186 static type name (type1 arg1)			\
187 {						\
188 	return syscall(__NR_##name, arg1);	\
189 }
190 
191 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
192 static type name (type1 arg1,type2 arg2)		\
193 {							\
194 	return syscall(__NR_##name, arg1, arg2);	\
195 }
196 
197 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
198 static type name (type1 arg1,type2 arg2,type3 arg3)		\
199 {								\
200 	return syscall(__NR_##name, arg1, arg2, arg3);		\
201 }
202 
203 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
204 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
205 {										\
206 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
207 }
208 
209 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
210 		  type5,arg5)							\
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
212 {										\
213 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
214 }
215 
216 
217 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
218 		  type5,arg5,type6,arg6)					\
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
220                   type6 arg6)							\
221 {										\
222 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
223 }
224 
225 
226 #define __NR_sys_uname __NR_uname
227 #define __NR_sys_getcwd1 __NR_getcwd
228 #define __NR_sys_getdents __NR_getdents
229 #define __NR_sys_getdents64 __NR_getdents64
230 #define __NR_sys_getpriority __NR_getpriority
231 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
232 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
233 #define __NR_sys_syslog __NR_syslog
234 #define __NR_sys_futex __NR_futex
235 #define __NR_sys_inotify_init __NR_inotify_init
236 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
237 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
238 
239 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
240 #define __NR__llseek __NR_lseek
241 #endif
242 
243 /* Newer kernel ports have llseek() instead of _llseek() */
244 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
245 #define TARGET_NR__llseek TARGET_NR_llseek
246 #endif
247 
248 #ifdef __NR_gettid
249 _syscall0(int, gettid)
250 #else
251 /* This is a replacement for the host gettid() and must return a host
252    errno. */
253 static int gettid(void) {
254     return -ENOSYS;
255 }
256 #endif
257 
258 /* For the 64-bit guest on 32-bit host case we must emulate
259  * getdents using getdents64, because otherwise the host
260  * might hand us back more dirent records than we can fit
261  * into the guest buffer after structure format conversion.
262  * Otherwise we emulate getdents with getdents if the host has it.
263  */
264 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
265 #define EMULATE_GETDENTS_WITH_GETDENTS
266 #endif
267 
268 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
270 #endif
271 #if (defined(TARGET_NR_getdents) && \
272       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
273     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
274 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
275 #endif
276 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
277 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
278           loff_t *, res, uint, wh);
279 #endif
280 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
281 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
282           siginfo_t *, uinfo)
283 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
284 #ifdef __NR_exit_group
285 _syscall1(int,exit_group,int,error_code)
286 #endif
287 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
288 _syscall1(int,set_tid_address,int *,tidptr)
289 #endif
290 #if defined(TARGET_NR_futex) && defined(__NR_futex)
291 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
292           const struct timespec *,timeout,int *,uaddr2,int,val3)
293 #endif
294 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
295 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
296           unsigned long *, user_mask_ptr);
297 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
298 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
299           unsigned long *, user_mask_ptr);
300 #define __NR_sys_getcpu __NR_getcpu
301 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
302 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
303           void *, arg);
304 _syscall2(int, capget, struct __user_cap_header_struct *, header,
305           struct __user_cap_data_struct *, data);
306 _syscall2(int, capset, struct __user_cap_header_struct *, header,
307           struct __user_cap_data_struct *, data);
308 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
309 _syscall2(int, ioprio_get, int, which, int, who)
310 #endif
311 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
312 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
313 #endif
314 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
315 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
316 #endif
317 
318 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
319 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
320           unsigned long, idx1, unsigned long, idx2)
321 #endif
322 
323 static bitmask_transtbl fcntl_flags_tbl[] = {
324   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
325   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
326   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
327   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
328   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
329   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
330   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
331   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
332   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
333   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
334   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
335   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
336   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
337 #if defined(O_DIRECT)
338   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
339 #endif
340 #if defined(O_NOATIME)
341   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
342 #endif
343 #if defined(O_CLOEXEC)
344   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
345 #endif
346 #if defined(O_PATH)
347   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
348 #endif
349 #if defined(O_TMPFILE)
350   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
351 #endif
352   /* Don't terminate the list prematurely on 64-bit host+guest.  */
353 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
354   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
355 #endif
356   { 0, 0, 0, 0 }
357 };
358 
359 static int sys_getcwd1(char *buf, size_t size)
360 {
361   if (getcwd(buf, size) == NULL) {
362       /* getcwd() sets errno */
363       return (-1);
364   }
365   return strlen(buf)+1;
366 }
367 
368 #ifdef TARGET_NR_utimensat
369 #if defined(__NR_utimensat)
370 #define __NR_sys_utimensat __NR_utimensat
371 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
372           const struct timespec *,tsp,int,flags)
373 #else
374 static int sys_utimensat(int dirfd, const char *pathname,
375                          const struct timespec times[2], int flags)
376 {
377     errno = ENOSYS;
378     return -1;
379 }
380 #endif
381 #endif /* TARGET_NR_utimensat */
382 
383 #ifdef TARGET_NR_renameat2
384 #if defined(__NR_renameat2)
385 #define __NR_sys_renameat2 __NR_renameat2
386 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
387           const char *, new, unsigned int, flags)
388 #else
389 static int sys_renameat2(int oldfd, const char *old,
390                          int newfd, const char *new, int flags)
391 {
392     if (flags == 0) {
393         return renameat(oldfd, old, newfd, new);
394     }
395     errno = ENOSYS;
396     return -1;
397 }
398 #endif
399 #endif /* TARGET_NR_renameat2 */
400 
401 #ifdef CONFIG_INOTIFY
402 #include <sys/inotify.h>
403 
404 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
405 static int sys_inotify_init(void)
406 {
407   return (inotify_init());
408 }
409 #endif
410 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
411 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
412 {
413   return (inotify_add_watch(fd, pathname, mask));
414 }
415 #endif
416 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
417 static int sys_inotify_rm_watch(int fd, int32_t wd)
418 {
419   return (inotify_rm_watch(fd, wd));
420 }
421 #endif
422 #ifdef CONFIG_INOTIFY1
423 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
424 static int sys_inotify_init1(int flags)
425 {
426   return (inotify_init1(flags));
427 }
428 #endif
429 #endif
430 #else
431 /* Userspace can usually survive runtime without inotify */
432 #undef TARGET_NR_inotify_init
433 #undef TARGET_NR_inotify_init1
434 #undef TARGET_NR_inotify_add_watch
435 #undef TARGET_NR_inotify_rm_watch
436 #endif /* CONFIG_INOTIFY  */
437 
438 #if defined(TARGET_NR_prlimit64)
439 #ifndef __NR_prlimit64
440 # define __NR_prlimit64 -1
441 #endif
442 #define __NR_sys_prlimit64 __NR_prlimit64
443 /* The glibc rlimit structure may not be that used by the underlying syscall */
444 struct host_rlimit64 {
445     uint64_t rlim_cur;
446     uint64_t rlim_max;
447 };
448 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
449           const struct host_rlimit64 *, new_limit,
450           struct host_rlimit64 *, old_limit)
451 #endif
452 
453 
454 #if defined(TARGET_NR_timer_create)
455 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
456 static timer_t g_posix_timers[32] = { 0, } ;
457 
458 static inline int next_free_host_timer(void)
459 {
460     int k ;
461     /* FIXME: Does finding the next free slot require a lock? */
462     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
463         if (g_posix_timers[k] == 0) {
464             g_posix_timers[k] = (timer_t) 1;
465             return k;
466         }
467     }
468     return -1;
469 }
470 #endif
471 
472 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
473 #ifdef TARGET_ARM
474 static inline int regpairs_aligned(void *cpu_env, int num)
475 {
476     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
477 }
478 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
479 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
480 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
481 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
482  * of registers which translates to the same as ARM/MIPS, because we start with
483  * r3 as arg1 */
484 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
485 #elif defined(TARGET_SH4)
486 /* SH4 doesn't align register pairs, except for p{read,write}64 */
487 static inline int regpairs_aligned(void *cpu_env, int num)
488 {
489     switch (num) {
490     case TARGET_NR_pread64:
491     case TARGET_NR_pwrite64:
492         return 1;
493 
494     default:
495         return 0;
496     }
497 }
498 #elif defined(TARGET_XTENSA)
499 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
500 #else
501 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
502 #endif
503 
504 #define ERRNO_TABLE_SIZE 1200
505 
506 /* target_to_host_errno_table[] is initialized from
507  * host_to_target_errno_table[] in syscall_init(). */
508 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
509 };
510 
511 /*
512  * This list is the union of errno values overridden in asm-<arch>/errno.h
513  * minus the errnos that are not actually generic to all archs.
514  */
515 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
516     [EAGAIN]		= TARGET_EAGAIN,
517     [EIDRM]		= TARGET_EIDRM,
518     [ECHRNG]		= TARGET_ECHRNG,
519     [EL2NSYNC]		= TARGET_EL2NSYNC,
520     [EL3HLT]		= TARGET_EL3HLT,
521     [EL3RST]		= TARGET_EL3RST,
522     [ELNRNG]		= TARGET_ELNRNG,
523     [EUNATCH]		= TARGET_EUNATCH,
524     [ENOCSI]		= TARGET_ENOCSI,
525     [EL2HLT]		= TARGET_EL2HLT,
526     [EDEADLK]		= TARGET_EDEADLK,
527     [ENOLCK]		= TARGET_ENOLCK,
528     [EBADE]		= TARGET_EBADE,
529     [EBADR]		= TARGET_EBADR,
530     [EXFULL]		= TARGET_EXFULL,
531     [ENOANO]		= TARGET_ENOANO,
532     [EBADRQC]		= TARGET_EBADRQC,
533     [EBADSLT]		= TARGET_EBADSLT,
534     [EBFONT]		= TARGET_EBFONT,
535     [ENOSTR]		= TARGET_ENOSTR,
536     [ENODATA]		= TARGET_ENODATA,
537     [ETIME]		= TARGET_ETIME,
538     [ENOSR]		= TARGET_ENOSR,
539     [ENONET]		= TARGET_ENONET,
540     [ENOPKG]		= TARGET_ENOPKG,
541     [EREMOTE]		= TARGET_EREMOTE,
542     [ENOLINK]		= TARGET_ENOLINK,
543     [EADV]		= TARGET_EADV,
544     [ESRMNT]		= TARGET_ESRMNT,
545     [ECOMM]		= TARGET_ECOMM,
546     [EPROTO]		= TARGET_EPROTO,
547     [EDOTDOT]		= TARGET_EDOTDOT,
548     [EMULTIHOP]		= TARGET_EMULTIHOP,
549     [EBADMSG]		= TARGET_EBADMSG,
550     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
551     [EOVERFLOW]		= TARGET_EOVERFLOW,
552     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
553     [EBADFD]		= TARGET_EBADFD,
554     [EREMCHG]		= TARGET_EREMCHG,
555     [ELIBACC]		= TARGET_ELIBACC,
556     [ELIBBAD]		= TARGET_ELIBBAD,
557     [ELIBSCN]		= TARGET_ELIBSCN,
558     [ELIBMAX]		= TARGET_ELIBMAX,
559     [ELIBEXEC]		= TARGET_ELIBEXEC,
560     [EILSEQ]		= TARGET_EILSEQ,
561     [ENOSYS]		= TARGET_ENOSYS,
562     [ELOOP]		= TARGET_ELOOP,
563     [ERESTART]		= TARGET_ERESTART,
564     [ESTRPIPE]		= TARGET_ESTRPIPE,
565     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
566     [EUSERS]		= TARGET_EUSERS,
567     [ENOTSOCK]		= TARGET_ENOTSOCK,
568     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
569     [EMSGSIZE]		= TARGET_EMSGSIZE,
570     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
571     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
572     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
573     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
574     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
575     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
576     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
577     [EADDRINUSE]	= TARGET_EADDRINUSE,
578     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
579     [ENETDOWN]		= TARGET_ENETDOWN,
580     [ENETUNREACH]	= TARGET_ENETUNREACH,
581     [ENETRESET]		= TARGET_ENETRESET,
582     [ECONNABORTED]	= TARGET_ECONNABORTED,
583     [ECONNRESET]	= TARGET_ECONNRESET,
584     [ENOBUFS]		= TARGET_ENOBUFS,
585     [EISCONN]		= TARGET_EISCONN,
586     [ENOTCONN]		= TARGET_ENOTCONN,
587     [EUCLEAN]		= TARGET_EUCLEAN,
588     [ENOTNAM]		= TARGET_ENOTNAM,
589     [ENAVAIL]		= TARGET_ENAVAIL,
590     [EISNAM]		= TARGET_EISNAM,
591     [EREMOTEIO]		= TARGET_EREMOTEIO,
592     [EDQUOT]            = TARGET_EDQUOT,
593     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
594     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
595     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
596     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
597     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
598     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
599     [EALREADY]		= TARGET_EALREADY,
600     [EINPROGRESS]	= TARGET_EINPROGRESS,
601     [ESTALE]		= TARGET_ESTALE,
602     [ECANCELED]		= TARGET_ECANCELED,
603     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
604     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
605 #ifdef ENOKEY
606     [ENOKEY]		= TARGET_ENOKEY,
607 #endif
608 #ifdef EKEYEXPIRED
609     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
610 #endif
611 #ifdef EKEYREVOKED
612     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
613 #endif
614 #ifdef EKEYREJECTED
615     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
616 #endif
617 #ifdef EOWNERDEAD
618     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
619 #endif
620 #ifdef ENOTRECOVERABLE
621     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
622 #endif
623 #ifdef ENOMSG
624     [ENOMSG]            = TARGET_ENOMSG,
625 #endif
626 #ifdef ERKFILL
627     [ERFKILL]           = TARGET_ERFKILL,
628 #endif
629 #ifdef EHWPOISON
630     [EHWPOISON]         = TARGET_EHWPOISON,
631 #endif
632 };
633 
634 static inline int host_to_target_errno(int err)
635 {
636     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
637         host_to_target_errno_table[err]) {
638         return host_to_target_errno_table[err];
639     }
640     return err;
641 }
642 
643 static inline int target_to_host_errno(int err)
644 {
645     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
646         target_to_host_errno_table[err]) {
647         return target_to_host_errno_table[err];
648     }
649     return err;
650 }
651 
652 static inline abi_long get_errno(abi_long ret)
653 {
654     if (ret == -1)
655         return -host_to_target_errno(errno);
656     else
657         return ret;
658 }
659 
660 const char *target_strerror(int err)
661 {
662     if (err == TARGET_ERESTARTSYS) {
663         return "To be restarted";
664     }
665     if (err == TARGET_QEMU_ESIGRETURN) {
666         return "Successful exit from sigreturn";
667     }
668 
669     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
670         return NULL;
671     }
672     return strerror(target_to_host_errno(err));
673 }
674 
675 #define safe_syscall0(type, name) \
676 static type safe_##name(void) \
677 { \
678     return safe_syscall(__NR_##name); \
679 }
680 
681 #define safe_syscall1(type, name, type1, arg1) \
682 static type safe_##name(type1 arg1) \
683 { \
684     return safe_syscall(__NR_##name, arg1); \
685 }
686 
687 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
688 static type safe_##name(type1 arg1, type2 arg2) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2); \
691 }
692 
693 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
694 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
697 }
698 
699 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
700     type4, arg4) \
701 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
702 { \
703     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
704 }
705 
706 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
707     type4, arg4, type5, arg5) \
708 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
709     type5 arg5) \
710 { \
711     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
712 }
713 
714 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
715     type4, arg4, type5, arg5, type6, arg6) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717     type5 arg5, type6 arg6) \
718 { \
719     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
720 }
721 
722 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
723 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
724 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
725               int, flags, mode_t, mode)
726 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
727               struct rusage *, rusage)
728 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
729               int, options, struct rusage *, rusage)
730 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
731 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
732               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734               struct timespec *, tsp, const sigset_t *, sigmask,
735               size_t, sigsetsize)
736 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
737               int, maxevents, int, timeout, const sigset_t *, sigmask,
738               size_t, sigsetsize)
739 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
740               const struct timespec *,timeout,int *,uaddr2,int,val3)
741 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
742 safe_syscall2(int, kill, pid_t, pid, int, sig)
743 safe_syscall2(int, tkill, int, tid, int, sig)
744 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
745 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
746 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
747 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
748               unsigned long, pos_l, unsigned long, pos_h)
749 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
750               unsigned long, pos_l, unsigned long, pos_h)
751 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
752               socklen_t, addrlen)
753 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
754               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
755 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
756               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
757 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
758 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
759 safe_syscall2(int, flock, int, fd, int, operation)
760 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
761               const struct timespec *, uts, size_t, sigsetsize)
762 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
763               int, flags)
764 safe_syscall2(int, nanosleep, const struct timespec *, req,
765               struct timespec *, rem)
766 #ifdef TARGET_NR_clock_nanosleep
767 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
768               const struct timespec *, req, struct timespec *, rem)
769 #endif
770 #ifdef __NR_msgsnd
771 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
772               int, flags)
773 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
774               long, msgtype, int, flags)
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776               unsigned, nsops, const struct timespec *, timeout)
777 #else
778 /* This host kernel architecture uses a single ipc syscall; fake up
779  * wrappers for the sub-operations to hide this implementation detail.
780  * Annoyingly we can't include linux/ipc.h to get the constant definitions
781  * for the call parameter because some structs in there conflict with the
782  * sys/ipc.h ones. So we just define them here, and rely on them being
783  * the same for all host architectures.
784  */
785 #define Q_SEMTIMEDOP 4
786 #define Q_MSGSND 11
787 #define Q_MSGRCV 12
788 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
789 
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791               void *, ptr, long, fifth)
792 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
793 {
794     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
795 }
796 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
799 }
800 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
801                            const struct timespec *timeout)
802 {
803     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
804                     (long)timeout);
805 }
806 #endif
807 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
809               size_t, len, unsigned, prio, const struct timespec *, timeout)
810 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
811               size_t, len, unsigned *, prio, const struct timespec *, timeout)
812 #endif
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814  * "third argument might be integer or pointer or not present" behaviour of
815  * the libc function.
816  */
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820  *  use the flock64 struct rather than unsuffixed flock
821  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
822  */
823 #ifdef __NR_fcntl64
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
825 #else
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 #endif
828 
829 static inline int host_to_target_sock_type(int host_type)
830 {
831     int target_type;
832 
833     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
834     case SOCK_DGRAM:
835         target_type = TARGET_SOCK_DGRAM;
836         break;
837     case SOCK_STREAM:
838         target_type = TARGET_SOCK_STREAM;
839         break;
840     default:
841         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
842         break;
843     }
844 
845 #if defined(SOCK_CLOEXEC)
846     if (host_type & SOCK_CLOEXEC) {
847         target_type |= TARGET_SOCK_CLOEXEC;
848     }
849 #endif
850 
851 #if defined(SOCK_NONBLOCK)
852     if (host_type & SOCK_NONBLOCK) {
853         target_type |= TARGET_SOCK_NONBLOCK;
854     }
855 #endif
856 
857     return target_type;
858 }
859 
860 static abi_ulong target_brk;
861 static abi_ulong target_original_brk;
862 static abi_ulong brk_page;
863 
864 void target_set_brk(abi_ulong new_brk)
865 {
866     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
867     brk_page = HOST_PAGE_ALIGN(target_brk);
868 }
869 
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
872 
873 /* do_brk() must return target values and target errnos. */
874 abi_long do_brk(abi_ulong new_brk)
875 {
876     abi_long mapped_addr;
877     abi_ulong new_alloc_size;
878 
879     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
880 
881     if (!new_brk) {
882         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
883         return target_brk;
884     }
885     if (new_brk < target_original_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
887                    target_brk);
888         return target_brk;
889     }
890 
891     /* If the new brk is less than the highest page reserved to the
892      * target heap allocation, set it and we're almost done...  */
893     if (new_brk <= brk_page) {
894         /* Heap contents are initialized to zero, as for anonymous
895          * mapped pages.  */
896         if (new_brk > target_brk) {
897             memset(g2h(target_brk), 0, new_brk - target_brk);
898         }
899 	target_brk = new_brk;
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
901     	return target_brk;
902     }
903 
904     /* We need to allocate more memory after the brk... Note that
905      * we don't use MAP_FIXED because that will map over the top of
906      * any existing mapping (like the one with the host libc or qemu
907      * itself); instead we treat "mapped but at wrong address" as
908      * a failure and unmap again.
909      */
910     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
911     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
912                                         PROT_READ|PROT_WRITE,
913                                         MAP_ANON|MAP_PRIVATE, 0, 0));
914 
915     if (mapped_addr == brk_page) {
916         /* Heap contents are initialized to zero, as for anonymous
917          * mapped pages.  Technically the new pages are already
918          * initialized to zero since they *are* anonymous mapped
919          * pages, however we have to take care with the contents that
920          * come from the remaining part of the previous page: it may
921          * contains garbage data due to a previous heap usage (grown
922          * then shrunken).  */
923         memset(g2h(target_brk), 0, brk_page - target_brk);
924 
925         target_brk = new_brk;
926         brk_page = HOST_PAGE_ALIGN(target_brk);
927         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
928             target_brk);
929         return target_brk;
930     } else if (mapped_addr != -1) {
931         /* Mapped but at wrong address, meaning there wasn't actually
932          * enough space for this brk.
933          */
934         target_munmap(mapped_addr, new_alloc_size);
935         mapped_addr = -1;
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
937     }
938     else {
939         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
940     }
941 
942 #if defined(TARGET_ALPHA)
943     /* We (partially) emulate OSF/1 on Alpha, which requires we
944        return a proper errno, not an unchanged brk value.  */
945     return -TARGET_ENOMEM;
946 #endif
947     /* For everything else, return the previous break. */
948     return target_brk;
949 }
950 
951 static inline abi_long copy_from_user_fdset(fd_set *fds,
952                                             abi_ulong target_fds_addr,
953                                             int n)
954 {
955     int i, nw, j, k;
956     abi_ulong b, *target_fds;
957 
958     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
959     if (!(target_fds = lock_user(VERIFY_READ,
960                                  target_fds_addr,
961                                  sizeof(abi_ulong) * nw,
962                                  1)))
963         return -TARGET_EFAULT;
964 
965     FD_ZERO(fds);
966     k = 0;
967     for (i = 0; i < nw; i++) {
968         /* grab the abi_ulong */
969         __get_user(b, &target_fds[i]);
970         for (j = 0; j < TARGET_ABI_BITS; j++) {
971             /* check the bit inside the abi_ulong */
972             if ((b >> j) & 1)
973                 FD_SET(k, fds);
974             k++;
975         }
976     }
977 
978     unlock_user(target_fds, target_fds_addr, 0);
979 
980     return 0;
981 }
982 
983 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
984                                                  abi_ulong target_fds_addr,
985                                                  int n)
986 {
987     if (target_fds_addr) {
988         if (copy_from_user_fdset(fds, target_fds_addr, n))
989             return -TARGET_EFAULT;
990         *fds_ptr = fds;
991     } else {
992         *fds_ptr = NULL;
993     }
994     return 0;
995 }
996 
997 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
998                                           const fd_set *fds,
999                                           int n)
1000 {
1001     int i, nw, j, k;
1002     abi_long v;
1003     abi_ulong *target_fds;
1004 
1005     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1006     if (!(target_fds = lock_user(VERIFY_WRITE,
1007                                  target_fds_addr,
1008                                  sizeof(abi_ulong) * nw,
1009                                  0)))
1010         return -TARGET_EFAULT;
1011 
1012     k = 0;
1013     for (i = 0; i < nw; i++) {
1014         v = 0;
1015         for (j = 0; j < TARGET_ABI_BITS; j++) {
1016             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1017             k++;
1018         }
1019         __put_user(v, &target_fds[i]);
1020     }
1021 
1022     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1023 
1024     return 0;
1025 }
1026 
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1029 #else
1030 #define HOST_HZ 100
1031 #endif
1032 
1033 static inline abi_long host_to_target_clock_t(long ticks)
1034 {
1035 #if HOST_HZ == TARGET_HZ
1036     return ticks;
1037 #else
1038     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1039 #endif
1040 }
1041 
1042 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1043                                              const struct rusage *rusage)
1044 {
1045     struct target_rusage *target_rusage;
1046 
1047     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1048         return -TARGET_EFAULT;
1049     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1050     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1051     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1052     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1053     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1054     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1055     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1056     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1057     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1058     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1059     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1060     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1061     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1062     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1063     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1064     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1065     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1066     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1067     unlock_user_struct(target_rusage, target_addr, 1);
1068 
1069     return 0;
1070 }
1071 
1072 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1073 {
1074     abi_ulong target_rlim_swap;
1075     rlim_t result;
1076 
1077     target_rlim_swap = tswapal(target_rlim);
1078     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1079         return RLIM_INFINITY;
1080 
1081     result = target_rlim_swap;
1082     if (target_rlim_swap != (rlim_t)result)
1083         return RLIM_INFINITY;
1084 
1085     return result;
1086 }
1087 
1088 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1089 {
1090     abi_ulong target_rlim_swap;
1091     abi_ulong result;
1092 
1093     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1094         target_rlim_swap = TARGET_RLIM_INFINITY;
1095     else
1096         target_rlim_swap = rlim;
1097     result = tswapal(target_rlim_swap);
1098 
1099     return result;
1100 }
1101 
1102 static inline int target_to_host_resource(int code)
1103 {
1104     switch (code) {
1105     case TARGET_RLIMIT_AS:
1106         return RLIMIT_AS;
1107     case TARGET_RLIMIT_CORE:
1108         return RLIMIT_CORE;
1109     case TARGET_RLIMIT_CPU:
1110         return RLIMIT_CPU;
1111     case TARGET_RLIMIT_DATA:
1112         return RLIMIT_DATA;
1113     case TARGET_RLIMIT_FSIZE:
1114         return RLIMIT_FSIZE;
1115     case TARGET_RLIMIT_LOCKS:
1116         return RLIMIT_LOCKS;
1117     case TARGET_RLIMIT_MEMLOCK:
1118         return RLIMIT_MEMLOCK;
1119     case TARGET_RLIMIT_MSGQUEUE:
1120         return RLIMIT_MSGQUEUE;
1121     case TARGET_RLIMIT_NICE:
1122         return RLIMIT_NICE;
1123     case TARGET_RLIMIT_NOFILE:
1124         return RLIMIT_NOFILE;
1125     case TARGET_RLIMIT_NPROC:
1126         return RLIMIT_NPROC;
1127     case TARGET_RLIMIT_RSS:
1128         return RLIMIT_RSS;
1129     case TARGET_RLIMIT_RTPRIO:
1130         return RLIMIT_RTPRIO;
1131     case TARGET_RLIMIT_SIGPENDING:
1132         return RLIMIT_SIGPENDING;
1133     case TARGET_RLIMIT_STACK:
1134         return RLIMIT_STACK;
1135     default:
1136         return code;
1137     }
1138 }
1139 
1140 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1141                                               abi_ulong target_tv_addr)
1142 {
1143     struct target_timeval *target_tv;
1144 
1145     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1146         return -TARGET_EFAULT;
1147 
1148     __get_user(tv->tv_sec, &target_tv->tv_sec);
1149     __get_user(tv->tv_usec, &target_tv->tv_usec);
1150 
1151     unlock_user_struct(target_tv, target_tv_addr, 0);
1152 
1153     return 0;
1154 }
1155 
1156 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1157                                             const struct timeval *tv)
1158 {
1159     struct target_timeval *target_tv;
1160 
1161     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1162         return -TARGET_EFAULT;
1163 
1164     __put_user(tv->tv_sec, &target_tv->tv_sec);
1165     __put_user(tv->tv_usec, &target_tv->tv_usec);
1166 
1167     unlock_user_struct(target_tv, target_tv_addr, 1);
1168 
1169     return 0;
1170 }
1171 
1172 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1173                                                abi_ulong target_tz_addr)
1174 {
1175     struct target_timezone *target_tz;
1176 
1177     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1178         return -TARGET_EFAULT;
1179     }
1180 
1181     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1182     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1183 
1184     unlock_user_struct(target_tz, target_tz_addr, 0);
1185 
1186     return 0;
1187 }
1188 
1189 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1190 #include <mqueue.h>
1191 
1192 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1193                                               abi_ulong target_mq_attr_addr)
1194 {
1195     struct target_mq_attr *target_mq_attr;
1196 
1197     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1198                           target_mq_attr_addr, 1))
1199         return -TARGET_EFAULT;
1200 
1201     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1202     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1203     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1204     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1205 
1206     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1207 
1208     return 0;
1209 }
1210 
1211 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1212                                             const struct mq_attr *attr)
1213 {
1214     struct target_mq_attr *target_mq_attr;
1215 
1216     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1217                           target_mq_attr_addr, 0))
1218         return -TARGET_EFAULT;
1219 
1220     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1221     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1222     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1223     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1224 
1225     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1226 
1227     return 0;
1228 }
1229 #endif
1230 
1231 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1232 /* do_select() must return target values and target errnos. */
1233 static abi_long do_select(int n,
1234                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1235                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1236 {
1237     fd_set rfds, wfds, efds;
1238     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1239     struct timeval tv;
1240     struct timespec ts, *ts_ptr;
1241     abi_long ret;
1242 
1243     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1244     if (ret) {
1245         return ret;
1246     }
1247     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255 
1256     if (target_tv_addr) {
1257         if (copy_from_user_timeval(&tv, target_tv_addr))
1258             return -TARGET_EFAULT;
1259         ts.tv_sec = tv.tv_sec;
1260         ts.tv_nsec = tv.tv_usec * 1000;
1261         ts_ptr = &ts;
1262     } else {
1263         ts_ptr = NULL;
1264     }
1265 
1266     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1267                                   ts_ptr, NULL));
1268 
1269     if (!is_error(ret)) {
1270         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1271             return -TARGET_EFAULT;
1272         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1273             return -TARGET_EFAULT;
1274         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1275             return -TARGET_EFAULT;
1276 
1277         if (target_tv_addr) {
1278             tv.tv_sec = ts.tv_sec;
1279             tv.tv_usec = ts.tv_nsec / 1000;
1280             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1281                 return -TARGET_EFAULT;
1282             }
1283         }
1284     }
1285 
1286     return ret;
1287 }
1288 
1289 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1290 static abi_long do_old_select(abi_ulong arg1)
1291 {
1292     struct target_sel_arg_struct *sel;
1293     abi_ulong inp, outp, exp, tvp;
1294     long nsel;
1295 
1296     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1297         return -TARGET_EFAULT;
1298     }
1299 
1300     nsel = tswapal(sel->n);
1301     inp = tswapal(sel->inp);
1302     outp = tswapal(sel->outp);
1303     exp = tswapal(sel->exp);
1304     tvp = tswapal(sel->tvp);
1305 
1306     unlock_user_struct(sel, arg1, 0);
1307 
1308     return do_select(nsel, inp, outp, exp, tvp);
1309 }
1310 #endif
1311 #endif
1312 
1313 static abi_long do_pipe2(int host_pipe[], int flags)
1314 {
1315 #ifdef CONFIG_PIPE2
1316     return pipe2(host_pipe, flags);
1317 #else
1318     return -ENOSYS;
1319 #endif
1320 }
1321 
1322 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1323                         int flags, int is_pipe2)
1324 {
1325     int host_pipe[2];
1326     abi_long ret;
1327     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1328 
1329     if (is_error(ret))
1330         return get_errno(ret);
1331 
1332     /* Several targets have special calling conventions for the original
1333        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1334     if (!is_pipe2) {
1335 #if defined(TARGET_ALPHA)
1336         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1337         return host_pipe[0];
1338 #elif defined(TARGET_MIPS)
1339         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1340         return host_pipe[0];
1341 #elif defined(TARGET_SH4)
1342         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1343         return host_pipe[0];
1344 #elif defined(TARGET_SPARC)
1345         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1346         return host_pipe[0];
1347 #endif
1348     }
1349 
1350     if (put_user_s32(host_pipe[0], pipedes)
1351         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1352         return -TARGET_EFAULT;
1353     return get_errno(ret);
1354 }
1355 
1356 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1357                                               abi_ulong target_addr,
1358                                               socklen_t len)
1359 {
1360     struct target_ip_mreqn *target_smreqn;
1361 
1362     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1363     if (!target_smreqn)
1364         return -TARGET_EFAULT;
1365     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1366     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1367     if (len == sizeof(struct target_ip_mreqn))
1368         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1369     unlock_user(target_smreqn, target_addr, 0);
1370 
1371     return 0;
1372 }
1373 
1374 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1375                                                abi_ulong target_addr,
1376                                                socklen_t len)
1377 {
1378     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1379     sa_family_t sa_family;
1380     struct target_sockaddr *target_saddr;
1381 
1382     if (fd_trans_target_to_host_addr(fd)) {
1383         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1384     }
1385 
1386     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1387     if (!target_saddr)
1388         return -TARGET_EFAULT;
1389 
1390     sa_family = tswap16(target_saddr->sa_family);
1391 
1392     /* Oops. The caller might send a incomplete sun_path; sun_path
1393      * must be terminated by \0 (see the manual page), but
1394      * unfortunately it is quite common to specify sockaddr_un
1395      * length as "strlen(x->sun_path)" while it should be
1396      * "strlen(...) + 1". We'll fix that here if needed.
1397      * Linux kernel has a similar feature.
1398      */
1399 
1400     if (sa_family == AF_UNIX) {
1401         if (len < unix_maxlen && len > 0) {
1402             char *cp = (char*)target_saddr;
1403 
1404             if ( cp[len-1] && !cp[len] )
1405                 len++;
1406         }
1407         if (len > unix_maxlen)
1408             len = unix_maxlen;
1409     }
1410 
1411     memcpy(addr, target_saddr, len);
1412     addr->sa_family = sa_family;
1413     if (sa_family == AF_NETLINK) {
1414         struct sockaddr_nl *nladdr;
1415 
1416         nladdr = (struct sockaddr_nl *)addr;
1417         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1418         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1419     } else if (sa_family == AF_PACKET) {
1420 	struct target_sockaddr_ll *lladdr;
1421 
1422 	lladdr = (struct target_sockaddr_ll *)addr;
1423 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1424 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1425     }
1426     unlock_user(target_saddr, target_addr, 0);
1427 
1428     return 0;
1429 }
1430 
1431 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1432                                                struct sockaddr *addr,
1433                                                socklen_t len)
1434 {
1435     struct target_sockaddr *target_saddr;
1436 
1437     if (len == 0) {
1438         return 0;
1439     }
1440     assert(addr);
1441 
1442     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1443     if (!target_saddr)
1444         return -TARGET_EFAULT;
1445     memcpy(target_saddr, addr, len);
1446     if (len >= offsetof(struct target_sockaddr, sa_family) +
1447         sizeof(target_saddr->sa_family)) {
1448         target_saddr->sa_family = tswap16(addr->sa_family);
1449     }
1450     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1451         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1452         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1453         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1454     } else if (addr->sa_family == AF_PACKET) {
1455         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1456         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1457         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1458     } else if (addr->sa_family == AF_INET6 &&
1459                len >= sizeof(struct target_sockaddr_in6)) {
1460         struct target_sockaddr_in6 *target_in6 =
1461                (struct target_sockaddr_in6 *)target_saddr;
1462         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1463     }
1464     unlock_user(target_saddr, target_addr, len);
1465 
1466     return 0;
1467 }
1468 
1469 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1470                                            struct target_msghdr *target_msgh)
1471 {
1472     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1473     abi_long msg_controllen;
1474     abi_ulong target_cmsg_addr;
1475     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1476     socklen_t space = 0;
1477 
1478     msg_controllen = tswapal(target_msgh->msg_controllen);
1479     if (msg_controllen < sizeof (struct target_cmsghdr))
1480         goto the_end;
1481     target_cmsg_addr = tswapal(target_msgh->msg_control);
1482     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1483     target_cmsg_start = target_cmsg;
1484     if (!target_cmsg)
1485         return -TARGET_EFAULT;
1486 
1487     while (cmsg && target_cmsg) {
1488         void *data = CMSG_DATA(cmsg);
1489         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1490 
1491         int len = tswapal(target_cmsg->cmsg_len)
1492             - sizeof(struct target_cmsghdr);
1493 
1494         space += CMSG_SPACE(len);
1495         if (space > msgh->msg_controllen) {
1496             space -= CMSG_SPACE(len);
1497             /* This is a QEMU bug, since we allocated the payload
1498              * area ourselves (unlike overflow in host-to-target
1499              * conversion, which is just the guest giving us a buffer
1500              * that's too small). It can't happen for the payload types
1501              * we currently support; if it becomes an issue in future
1502              * we would need to improve our allocation strategy to
1503              * something more intelligent than "twice the size of the
1504              * target buffer we're reading from".
1505              */
1506             gemu_log("Host cmsg overflow\n");
1507             break;
1508         }
1509 
1510         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1511             cmsg->cmsg_level = SOL_SOCKET;
1512         } else {
1513             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1514         }
1515         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1516         cmsg->cmsg_len = CMSG_LEN(len);
1517 
1518         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1519             int *fd = (int *)data;
1520             int *target_fd = (int *)target_data;
1521             int i, numfds = len / sizeof(int);
1522 
1523             for (i = 0; i < numfds; i++) {
1524                 __get_user(fd[i], target_fd + i);
1525             }
1526         } else if (cmsg->cmsg_level == SOL_SOCKET
1527                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1528             struct ucred *cred = (struct ucred *)data;
1529             struct target_ucred *target_cred =
1530                 (struct target_ucred *)target_data;
1531 
1532             __get_user(cred->pid, &target_cred->pid);
1533             __get_user(cred->uid, &target_cred->uid);
1534             __get_user(cred->gid, &target_cred->gid);
1535         } else {
1536             gemu_log("Unsupported ancillary data: %d/%d\n",
1537                                         cmsg->cmsg_level, cmsg->cmsg_type);
1538             memcpy(data, target_data, len);
1539         }
1540 
1541         cmsg = CMSG_NXTHDR(msgh, cmsg);
1542         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1543                                          target_cmsg_start);
1544     }
1545     unlock_user(target_cmsg, target_cmsg_addr, 0);
1546  the_end:
1547     msgh->msg_controllen = space;
1548     return 0;
1549 }
1550 
1551 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1552                                            struct msghdr *msgh)
1553 {
1554     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1555     abi_long msg_controllen;
1556     abi_ulong target_cmsg_addr;
1557     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1558     socklen_t space = 0;
1559 
1560     msg_controllen = tswapal(target_msgh->msg_controllen);
1561     if (msg_controllen < sizeof (struct target_cmsghdr))
1562         goto the_end;
1563     target_cmsg_addr = tswapal(target_msgh->msg_control);
1564     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1565     target_cmsg_start = target_cmsg;
1566     if (!target_cmsg)
1567         return -TARGET_EFAULT;
1568 
1569     while (cmsg && target_cmsg) {
1570         void *data = CMSG_DATA(cmsg);
1571         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1572 
1573         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1574         int tgt_len, tgt_space;
1575 
1576         /* We never copy a half-header but may copy half-data;
1577          * this is Linux's behaviour in put_cmsg(). Note that
1578          * truncation here is a guest problem (which we report
1579          * to the guest via the CTRUNC bit), unlike truncation
1580          * in target_to_host_cmsg, which is a QEMU bug.
1581          */
1582         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1583             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1584             break;
1585         }
1586 
1587         if (cmsg->cmsg_level == SOL_SOCKET) {
1588             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1589         } else {
1590             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1591         }
1592         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1593 
1594         /* Payload types which need a different size of payload on
1595          * the target must adjust tgt_len here.
1596          */
1597         tgt_len = len;
1598         switch (cmsg->cmsg_level) {
1599         case SOL_SOCKET:
1600             switch (cmsg->cmsg_type) {
1601             case SO_TIMESTAMP:
1602                 tgt_len = sizeof(struct target_timeval);
1603                 break;
1604             default:
1605                 break;
1606             }
1607             break;
1608         default:
1609             break;
1610         }
1611 
1612         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1613             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1614             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1615         }
1616 
1617         /* We must now copy-and-convert len bytes of payload
1618          * into tgt_len bytes of destination space. Bear in mind
1619          * that in both source and destination we may be dealing
1620          * with a truncated value!
1621          */
1622         switch (cmsg->cmsg_level) {
1623         case SOL_SOCKET:
1624             switch (cmsg->cmsg_type) {
1625             case SCM_RIGHTS:
1626             {
1627                 int *fd = (int *)data;
1628                 int *target_fd = (int *)target_data;
1629                 int i, numfds = tgt_len / sizeof(int);
1630 
1631                 for (i = 0; i < numfds; i++) {
1632                     __put_user(fd[i], target_fd + i);
1633                 }
1634                 break;
1635             }
1636             case SO_TIMESTAMP:
1637             {
1638                 struct timeval *tv = (struct timeval *)data;
1639                 struct target_timeval *target_tv =
1640                     (struct target_timeval *)target_data;
1641 
1642                 if (len != sizeof(struct timeval) ||
1643                     tgt_len != sizeof(struct target_timeval)) {
1644                     goto unimplemented;
1645                 }
1646 
1647                 /* copy struct timeval to target */
1648                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1649                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1650                 break;
1651             }
1652             case SCM_CREDENTIALS:
1653             {
1654                 struct ucred *cred = (struct ucred *)data;
1655                 struct target_ucred *target_cred =
1656                     (struct target_ucred *)target_data;
1657 
1658                 __put_user(cred->pid, &target_cred->pid);
1659                 __put_user(cred->uid, &target_cred->uid);
1660                 __put_user(cred->gid, &target_cred->gid);
1661                 break;
1662             }
1663             default:
1664                 goto unimplemented;
1665             }
1666             break;
1667 
1668         case SOL_IP:
1669             switch (cmsg->cmsg_type) {
1670             case IP_TTL:
1671             {
1672                 uint32_t *v = (uint32_t *)data;
1673                 uint32_t *t_int = (uint32_t *)target_data;
1674 
1675                 if (len != sizeof(uint32_t) ||
1676                     tgt_len != sizeof(uint32_t)) {
1677                     goto unimplemented;
1678                 }
1679                 __put_user(*v, t_int);
1680                 break;
1681             }
1682             case IP_RECVERR:
1683             {
1684                 struct errhdr_t {
1685                    struct sock_extended_err ee;
1686                    struct sockaddr_in offender;
1687                 };
1688                 struct errhdr_t *errh = (struct errhdr_t *)data;
1689                 struct errhdr_t *target_errh =
1690                     (struct errhdr_t *)target_data;
1691 
1692                 if (len != sizeof(struct errhdr_t) ||
1693                     tgt_len != sizeof(struct errhdr_t)) {
1694                     goto unimplemented;
1695                 }
1696                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1697                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1698                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1699                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1700                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1701                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1702                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1703                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1704                     (void *) &errh->offender, sizeof(errh->offender));
1705                 break;
1706             }
1707             default:
1708                 goto unimplemented;
1709             }
1710             break;
1711 
1712         case SOL_IPV6:
1713             switch (cmsg->cmsg_type) {
1714             case IPV6_HOPLIMIT:
1715             {
1716                 uint32_t *v = (uint32_t *)data;
1717                 uint32_t *t_int = (uint32_t *)target_data;
1718 
1719                 if (len != sizeof(uint32_t) ||
1720                     tgt_len != sizeof(uint32_t)) {
1721                     goto unimplemented;
1722                 }
1723                 __put_user(*v, t_int);
1724                 break;
1725             }
1726             case IPV6_RECVERR:
1727             {
1728                 struct errhdr6_t {
1729                    struct sock_extended_err ee;
1730                    struct sockaddr_in6 offender;
1731                 };
1732                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1733                 struct errhdr6_t *target_errh =
1734                     (struct errhdr6_t *)target_data;
1735 
1736                 if (len != sizeof(struct errhdr6_t) ||
1737                     tgt_len != sizeof(struct errhdr6_t)) {
1738                     goto unimplemented;
1739                 }
1740                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1741                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1742                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1743                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1744                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1745                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1746                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1747                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1748                     (void *) &errh->offender, sizeof(errh->offender));
1749                 break;
1750             }
1751             default:
1752                 goto unimplemented;
1753             }
1754             break;
1755 
1756         default:
1757         unimplemented:
1758             gemu_log("Unsupported ancillary data: %d/%d\n",
1759                                         cmsg->cmsg_level, cmsg->cmsg_type);
1760             memcpy(target_data, data, MIN(len, tgt_len));
1761             if (tgt_len > len) {
1762                 memset(target_data + len, 0, tgt_len - len);
1763             }
1764         }
1765 
1766         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1767         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1768         if (msg_controllen < tgt_space) {
1769             tgt_space = msg_controllen;
1770         }
1771         msg_controllen -= tgt_space;
1772         space += tgt_space;
1773         cmsg = CMSG_NXTHDR(msgh, cmsg);
1774         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1775                                          target_cmsg_start);
1776     }
1777     unlock_user(target_cmsg, target_cmsg_addr, space);
1778  the_end:
1779     target_msgh->msg_controllen = tswapal(space);
1780     return 0;
1781 }
1782 
1783 /* do_setsockopt() Must return target values and target errnos. */
1784 static abi_long do_setsockopt(int sockfd, int level, int optname,
1785                               abi_ulong optval_addr, socklen_t optlen)
1786 {
1787     abi_long ret;
1788     int val;
1789     struct ip_mreqn *ip_mreq;
1790     struct ip_mreq_source *ip_mreq_source;
1791 
1792     switch(level) {
1793     case SOL_TCP:
1794         /* TCP options all take an 'int' value.  */
1795         if (optlen < sizeof(uint32_t))
1796             return -TARGET_EINVAL;
1797 
1798         if (get_user_u32(val, optval_addr))
1799             return -TARGET_EFAULT;
1800         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1801         break;
1802     case SOL_IP:
1803         switch(optname) {
1804         case IP_TOS:
1805         case IP_TTL:
1806         case IP_HDRINCL:
1807         case IP_ROUTER_ALERT:
1808         case IP_RECVOPTS:
1809         case IP_RETOPTS:
1810         case IP_PKTINFO:
1811         case IP_MTU_DISCOVER:
1812         case IP_RECVERR:
1813         case IP_RECVTTL:
1814         case IP_RECVTOS:
1815 #ifdef IP_FREEBIND
1816         case IP_FREEBIND:
1817 #endif
1818         case IP_MULTICAST_TTL:
1819         case IP_MULTICAST_LOOP:
1820             val = 0;
1821             if (optlen >= sizeof(uint32_t)) {
1822                 if (get_user_u32(val, optval_addr))
1823                     return -TARGET_EFAULT;
1824             } else if (optlen >= 1) {
1825                 if (get_user_u8(val, optval_addr))
1826                     return -TARGET_EFAULT;
1827             }
1828             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1829             break;
1830         case IP_ADD_MEMBERSHIP:
1831         case IP_DROP_MEMBERSHIP:
1832             if (optlen < sizeof (struct target_ip_mreq) ||
1833                 optlen > sizeof (struct target_ip_mreqn))
1834                 return -TARGET_EINVAL;
1835 
1836             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1837             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1838             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1839             break;
1840 
1841         case IP_BLOCK_SOURCE:
1842         case IP_UNBLOCK_SOURCE:
1843         case IP_ADD_SOURCE_MEMBERSHIP:
1844         case IP_DROP_SOURCE_MEMBERSHIP:
1845             if (optlen != sizeof (struct target_ip_mreq_source))
1846                 return -TARGET_EINVAL;
1847 
1848             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1849             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1850             unlock_user (ip_mreq_source, optval_addr, 0);
1851             break;
1852 
1853         default:
1854             goto unimplemented;
1855         }
1856         break;
1857     case SOL_IPV6:
1858         switch (optname) {
1859         case IPV6_MTU_DISCOVER:
1860         case IPV6_MTU:
1861         case IPV6_V6ONLY:
1862         case IPV6_RECVPKTINFO:
1863         case IPV6_UNICAST_HOPS:
1864         case IPV6_MULTICAST_HOPS:
1865         case IPV6_MULTICAST_LOOP:
1866         case IPV6_RECVERR:
1867         case IPV6_RECVHOPLIMIT:
1868         case IPV6_2292HOPLIMIT:
1869         case IPV6_CHECKSUM:
1870             val = 0;
1871             if (optlen < sizeof(uint32_t)) {
1872                 return -TARGET_EINVAL;
1873             }
1874             if (get_user_u32(val, optval_addr)) {
1875                 return -TARGET_EFAULT;
1876             }
1877             ret = get_errno(setsockopt(sockfd, level, optname,
1878                                        &val, sizeof(val)));
1879             break;
1880         case IPV6_PKTINFO:
1881         {
1882             struct in6_pktinfo pki;
1883 
1884             if (optlen < sizeof(pki)) {
1885                 return -TARGET_EINVAL;
1886             }
1887 
1888             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1889                 return -TARGET_EFAULT;
1890             }
1891 
1892             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1893 
1894             ret = get_errno(setsockopt(sockfd, level, optname,
1895                                        &pki, sizeof(pki)));
1896             break;
1897         }
1898         default:
1899             goto unimplemented;
1900         }
1901         break;
1902     case SOL_ICMPV6:
1903         switch (optname) {
1904         case ICMPV6_FILTER:
1905         {
1906             struct icmp6_filter icmp6f;
1907 
1908             if (optlen > sizeof(icmp6f)) {
1909                 optlen = sizeof(icmp6f);
1910             }
1911 
1912             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1913                 return -TARGET_EFAULT;
1914             }
1915 
1916             for (val = 0; val < 8; val++) {
1917                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1918             }
1919 
1920             ret = get_errno(setsockopt(sockfd, level, optname,
1921                                        &icmp6f, optlen));
1922             break;
1923         }
1924         default:
1925             goto unimplemented;
1926         }
1927         break;
1928     case SOL_RAW:
1929         switch (optname) {
1930         case ICMP_FILTER:
1931         case IPV6_CHECKSUM:
1932             /* those take an u32 value */
1933             if (optlen < sizeof(uint32_t)) {
1934                 return -TARGET_EINVAL;
1935             }
1936 
1937             if (get_user_u32(val, optval_addr)) {
1938                 return -TARGET_EFAULT;
1939             }
1940             ret = get_errno(setsockopt(sockfd, level, optname,
1941                                        &val, sizeof(val)));
1942             break;
1943 
1944         default:
1945             goto unimplemented;
1946         }
1947         break;
1948     case TARGET_SOL_SOCKET:
1949         switch (optname) {
1950         case TARGET_SO_RCVTIMEO:
1951         {
1952                 struct timeval tv;
1953 
1954                 optname = SO_RCVTIMEO;
1955 
1956 set_timeout:
1957                 if (optlen != sizeof(struct target_timeval)) {
1958                     return -TARGET_EINVAL;
1959                 }
1960 
1961                 if (copy_from_user_timeval(&tv, optval_addr)) {
1962                     return -TARGET_EFAULT;
1963                 }
1964 
1965                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1966                                 &tv, sizeof(tv)));
1967                 return ret;
1968         }
1969         case TARGET_SO_SNDTIMEO:
1970                 optname = SO_SNDTIMEO;
1971                 goto set_timeout;
1972         case TARGET_SO_ATTACH_FILTER:
1973         {
1974                 struct target_sock_fprog *tfprog;
1975                 struct target_sock_filter *tfilter;
1976                 struct sock_fprog fprog;
1977                 struct sock_filter *filter;
1978                 int i;
1979 
1980                 if (optlen != sizeof(*tfprog)) {
1981                     return -TARGET_EINVAL;
1982                 }
1983                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1984                     return -TARGET_EFAULT;
1985                 }
1986                 if (!lock_user_struct(VERIFY_READ, tfilter,
1987                                       tswapal(tfprog->filter), 0)) {
1988                     unlock_user_struct(tfprog, optval_addr, 1);
1989                     return -TARGET_EFAULT;
1990                 }
1991 
1992                 fprog.len = tswap16(tfprog->len);
1993                 filter = g_try_new(struct sock_filter, fprog.len);
1994                 if (filter == NULL) {
1995                     unlock_user_struct(tfilter, tfprog->filter, 1);
1996                     unlock_user_struct(tfprog, optval_addr, 1);
1997                     return -TARGET_ENOMEM;
1998                 }
1999                 for (i = 0; i < fprog.len; i++) {
2000                     filter[i].code = tswap16(tfilter[i].code);
2001                     filter[i].jt = tfilter[i].jt;
2002                     filter[i].jf = tfilter[i].jf;
2003                     filter[i].k = tswap32(tfilter[i].k);
2004                 }
2005                 fprog.filter = filter;
2006 
2007                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2008                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2009                 g_free(filter);
2010 
2011                 unlock_user_struct(tfilter, tfprog->filter, 1);
2012                 unlock_user_struct(tfprog, optval_addr, 1);
2013                 return ret;
2014         }
2015 	case TARGET_SO_BINDTODEVICE:
2016 	{
2017 		char *dev_ifname, *addr_ifname;
2018 
2019 		if (optlen > IFNAMSIZ - 1) {
2020 		    optlen = IFNAMSIZ - 1;
2021 		}
2022 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2023 		if (!dev_ifname) {
2024 		    return -TARGET_EFAULT;
2025 		}
2026 		optname = SO_BINDTODEVICE;
2027 		addr_ifname = alloca(IFNAMSIZ);
2028 		memcpy(addr_ifname, dev_ifname, optlen);
2029 		addr_ifname[optlen] = 0;
2030 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2031                                            addr_ifname, optlen));
2032 		unlock_user (dev_ifname, optval_addr, 0);
2033 		return ret;
2034 	}
2035         case TARGET_SO_LINGER:
2036         {
2037                 struct linger lg;
2038                 struct target_linger *tlg;
2039 
2040                 if (optlen != sizeof(struct target_linger)) {
2041                     return -TARGET_EINVAL;
2042                 }
2043                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2044                     return -TARGET_EFAULT;
2045                 }
2046                 __get_user(lg.l_onoff, &tlg->l_onoff);
2047                 __get_user(lg.l_linger, &tlg->l_linger);
2048                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2049                                 &lg, sizeof(lg)));
2050                 unlock_user_struct(tlg, optval_addr, 0);
2051                 return ret;
2052         }
2053             /* Options with 'int' argument.  */
2054         case TARGET_SO_DEBUG:
2055 		optname = SO_DEBUG;
2056 		break;
2057         case TARGET_SO_REUSEADDR:
2058 		optname = SO_REUSEADDR;
2059 		break;
2060         case TARGET_SO_TYPE:
2061 		optname = SO_TYPE;
2062 		break;
2063         case TARGET_SO_ERROR:
2064 		optname = SO_ERROR;
2065 		break;
2066         case TARGET_SO_DONTROUTE:
2067 		optname = SO_DONTROUTE;
2068 		break;
2069         case TARGET_SO_BROADCAST:
2070 		optname = SO_BROADCAST;
2071 		break;
2072         case TARGET_SO_SNDBUF:
2073 		optname = SO_SNDBUF;
2074 		break;
2075         case TARGET_SO_SNDBUFFORCE:
2076                 optname = SO_SNDBUFFORCE;
2077                 break;
2078         case TARGET_SO_RCVBUF:
2079 		optname = SO_RCVBUF;
2080 		break;
2081         case TARGET_SO_RCVBUFFORCE:
2082                 optname = SO_RCVBUFFORCE;
2083                 break;
2084         case TARGET_SO_KEEPALIVE:
2085 		optname = SO_KEEPALIVE;
2086 		break;
2087         case TARGET_SO_OOBINLINE:
2088 		optname = SO_OOBINLINE;
2089 		break;
2090         case TARGET_SO_NO_CHECK:
2091 		optname = SO_NO_CHECK;
2092 		break;
2093         case TARGET_SO_PRIORITY:
2094 		optname = SO_PRIORITY;
2095 		break;
2096 #ifdef SO_BSDCOMPAT
2097         case TARGET_SO_BSDCOMPAT:
2098 		optname = SO_BSDCOMPAT;
2099 		break;
2100 #endif
2101         case TARGET_SO_PASSCRED:
2102 		optname = SO_PASSCRED;
2103 		break;
2104         case TARGET_SO_PASSSEC:
2105                 optname = SO_PASSSEC;
2106                 break;
2107         case TARGET_SO_TIMESTAMP:
2108 		optname = SO_TIMESTAMP;
2109 		break;
2110         case TARGET_SO_RCVLOWAT:
2111 		optname = SO_RCVLOWAT;
2112 		break;
2113         default:
2114             goto unimplemented;
2115         }
2116 	if (optlen < sizeof(uint32_t))
2117             return -TARGET_EINVAL;
2118 
2119 	if (get_user_u32(val, optval_addr))
2120             return -TARGET_EFAULT;
2121 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2122         break;
2123     default:
2124     unimplemented:
2125         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2126         ret = -TARGET_ENOPROTOOPT;
2127     }
2128     return ret;
2129 }
2130 
2131 /* do_getsockopt() Must return target values and target errnos. */
2132 static abi_long do_getsockopt(int sockfd, int level, int optname,
2133                               abi_ulong optval_addr, abi_ulong optlen)
2134 {
2135     abi_long ret;
2136     int len, val;
2137     socklen_t lv;
2138 
2139     switch(level) {
2140     case TARGET_SOL_SOCKET:
2141         level = SOL_SOCKET;
2142         switch (optname) {
2143         /* These don't just return a single integer */
2144         case TARGET_SO_RCVTIMEO:
2145         case TARGET_SO_SNDTIMEO:
2146         case TARGET_SO_PEERNAME:
2147             goto unimplemented;
2148         case TARGET_SO_PEERCRED: {
2149             struct ucred cr;
2150             socklen_t crlen;
2151             struct target_ucred *tcr;
2152 
2153             if (get_user_u32(len, optlen)) {
2154                 return -TARGET_EFAULT;
2155             }
2156             if (len < 0) {
2157                 return -TARGET_EINVAL;
2158             }
2159 
2160             crlen = sizeof(cr);
2161             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2162                                        &cr, &crlen));
2163             if (ret < 0) {
2164                 return ret;
2165             }
2166             if (len > crlen) {
2167                 len = crlen;
2168             }
2169             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2170                 return -TARGET_EFAULT;
2171             }
2172             __put_user(cr.pid, &tcr->pid);
2173             __put_user(cr.uid, &tcr->uid);
2174             __put_user(cr.gid, &tcr->gid);
2175             unlock_user_struct(tcr, optval_addr, 1);
2176             if (put_user_u32(len, optlen)) {
2177                 return -TARGET_EFAULT;
2178             }
2179             break;
2180         }
2181         case TARGET_SO_LINGER:
2182         {
2183             struct linger lg;
2184             socklen_t lglen;
2185             struct target_linger *tlg;
2186 
2187             if (get_user_u32(len, optlen)) {
2188                 return -TARGET_EFAULT;
2189             }
2190             if (len < 0) {
2191                 return -TARGET_EINVAL;
2192             }
2193 
2194             lglen = sizeof(lg);
2195             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2196                                        &lg, &lglen));
2197             if (ret < 0) {
2198                 return ret;
2199             }
2200             if (len > lglen) {
2201                 len = lglen;
2202             }
2203             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2204                 return -TARGET_EFAULT;
2205             }
2206             __put_user(lg.l_onoff, &tlg->l_onoff);
2207             __put_user(lg.l_linger, &tlg->l_linger);
2208             unlock_user_struct(tlg, optval_addr, 1);
2209             if (put_user_u32(len, optlen)) {
2210                 return -TARGET_EFAULT;
2211             }
2212             break;
2213         }
2214         /* Options with 'int' argument.  */
2215         case TARGET_SO_DEBUG:
2216             optname = SO_DEBUG;
2217             goto int_case;
2218         case TARGET_SO_REUSEADDR:
2219             optname = SO_REUSEADDR;
2220             goto int_case;
2221         case TARGET_SO_TYPE:
2222             optname = SO_TYPE;
2223             goto int_case;
2224         case TARGET_SO_ERROR:
2225             optname = SO_ERROR;
2226             goto int_case;
2227         case TARGET_SO_DONTROUTE:
2228             optname = SO_DONTROUTE;
2229             goto int_case;
2230         case TARGET_SO_BROADCAST:
2231             optname = SO_BROADCAST;
2232             goto int_case;
2233         case TARGET_SO_SNDBUF:
2234             optname = SO_SNDBUF;
2235             goto int_case;
2236         case TARGET_SO_RCVBUF:
2237             optname = SO_RCVBUF;
2238             goto int_case;
2239         case TARGET_SO_KEEPALIVE:
2240             optname = SO_KEEPALIVE;
2241             goto int_case;
2242         case TARGET_SO_OOBINLINE:
2243             optname = SO_OOBINLINE;
2244             goto int_case;
2245         case TARGET_SO_NO_CHECK:
2246             optname = SO_NO_CHECK;
2247             goto int_case;
2248         case TARGET_SO_PRIORITY:
2249             optname = SO_PRIORITY;
2250             goto int_case;
2251 #ifdef SO_BSDCOMPAT
2252         case TARGET_SO_BSDCOMPAT:
2253             optname = SO_BSDCOMPAT;
2254             goto int_case;
2255 #endif
2256         case TARGET_SO_PASSCRED:
2257             optname = SO_PASSCRED;
2258             goto int_case;
2259         case TARGET_SO_TIMESTAMP:
2260             optname = SO_TIMESTAMP;
2261             goto int_case;
2262         case TARGET_SO_RCVLOWAT:
2263             optname = SO_RCVLOWAT;
2264             goto int_case;
2265         case TARGET_SO_ACCEPTCONN:
2266             optname = SO_ACCEPTCONN;
2267             goto int_case;
2268         default:
2269             goto int_case;
2270         }
2271         break;
2272     case SOL_TCP:
2273         /* TCP options all take an 'int' value.  */
2274     int_case:
2275         if (get_user_u32(len, optlen))
2276             return -TARGET_EFAULT;
2277         if (len < 0)
2278             return -TARGET_EINVAL;
2279         lv = sizeof(lv);
2280         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2281         if (ret < 0)
2282             return ret;
2283         if (optname == SO_TYPE) {
2284             val = host_to_target_sock_type(val);
2285         }
2286         if (len > lv)
2287             len = lv;
2288         if (len == 4) {
2289             if (put_user_u32(val, optval_addr))
2290                 return -TARGET_EFAULT;
2291         } else {
2292             if (put_user_u8(val, optval_addr))
2293                 return -TARGET_EFAULT;
2294         }
2295         if (put_user_u32(len, optlen))
2296             return -TARGET_EFAULT;
2297         break;
2298     case SOL_IP:
2299         switch(optname) {
2300         case IP_TOS:
2301         case IP_TTL:
2302         case IP_HDRINCL:
2303         case IP_ROUTER_ALERT:
2304         case IP_RECVOPTS:
2305         case IP_RETOPTS:
2306         case IP_PKTINFO:
2307         case IP_MTU_DISCOVER:
2308         case IP_RECVERR:
2309         case IP_RECVTOS:
2310 #ifdef IP_FREEBIND
2311         case IP_FREEBIND:
2312 #endif
2313         case IP_MULTICAST_TTL:
2314         case IP_MULTICAST_LOOP:
2315             if (get_user_u32(len, optlen))
2316                 return -TARGET_EFAULT;
2317             if (len < 0)
2318                 return -TARGET_EINVAL;
2319             lv = sizeof(lv);
2320             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2321             if (ret < 0)
2322                 return ret;
2323             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2324                 len = 1;
2325                 if (put_user_u32(len, optlen)
2326                     || put_user_u8(val, optval_addr))
2327                     return -TARGET_EFAULT;
2328             } else {
2329                 if (len > sizeof(int))
2330                     len = sizeof(int);
2331                 if (put_user_u32(len, optlen)
2332                     || put_user_u32(val, optval_addr))
2333                     return -TARGET_EFAULT;
2334             }
2335             break;
2336         default:
2337             ret = -TARGET_ENOPROTOOPT;
2338             break;
2339         }
2340         break;
2341     default:
2342     unimplemented:
2343         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2344                  level, optname);
2345         ret = -TARGET_EOPNOTSUPP;
2346         break;
2347     }
2348     return ret;
2349 }
2350 
2351 /* Convert target low/high pair representing file offset into the host
2352  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2353  * as the kernel doesn't handle them either.
2354  */
2355 static void target_to_host_low_high(abi_ulong tlow,
2356                                     abi_ulong thigh,
2357                                     unsigned long *hlow,
2358                                     unsigned long *hhigh)
2359 {
2360     uint64_t off = tlow |
2361         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2362         TARGET_LONG_BITS / 2;
2363 
2364     *hlow = off;
2365     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2366 }
2367 
2368 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2369                                 abi_ulong count, int copy)
2370 {
2371     struct target_iovec *target_vec;
2372     struct iovec *vec;
2373     abi_ulong total_len, max_len;
2374     int i;
2375     int err = 0;
2376     bool bad_address = false;
2377 
2378     if (count == 0) {
2379         errno = 0;
2380         return NULL;
2381     }
2382     if (count > IOV_MAX) {
2383         errno = EINVAL;
2384         return NULL;
2385     }
2386 
2387     vec = g_try_new0(struct iovec, count);
2388     if (vec == NULL) {
2389         errno = ENOMEM;
2390         return NULL;
2391     }
2392 
2393     target_vec = lock_user(VERIFY_READ, target_addr,
2394                            count * sizeof(struct target_iovec), 1);
2395     if (target_vec == NULL) {
2396         err = EFAULT;
2397         goto fail2;
2398     }
2399 
2400     /* ??? If host page size > target page size, this will result in a
2401        value larger than what we can actually support.  */
2402     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2403     total_len = 0;
2404 
2405     for (i = 0; i < count; i++) {
2406         abi_ulong base = tswapal(target_vec[i].iov_base);
2407         abi_long len = tswapal(target_vec[i].iov_len);
2408 
2409         if (len < 0) {
2410             err = EINVAL;
2411             goto fail;
2412         } else if (len == 0) {
2413             /* Zero length pointer is ignored.  */
2414             vec[i].iov_base = 0;
2415         } else {
2416             vec[i].iov_base = lock_user(type, base, len, copy);
2417             /* If the first buffer pointer is bad, this is a fault.  But
2418              * subsequent bad buffers will result in a partial write; this
2419              * is realized by filling the vector with null pointers and
2420              * zero lengths. */
2421             if (!vec[i].iov_base) {
2422                 if (i == 0) {
2423                     err = EFAULT;
2424                     goto fail;
2425                 } else {
2426                     bad_address = true;
2427                 }
2428             }
2429             if (bad_address) {
2430                 len = 0;
2431             }
2432             if (len > max_len - total_len) {
2433                 len = max_len - total_len;
2434             }
2435         }
2436         vec[i].iov_len = len;
2437         total_len += len;
2438     }
2439 
2440     unlock_user(target_vec, target_addr, 0);
2441     return vec;
2442 
2443  fail:
2444     while (--i >= 0) {
2445         if (tswapal(target_vec[i].iov_len) > 0) {
2446             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2447         }
2448     }
2449     unlock_user(target_vec, target_addr, 0);
2450  fail2:
2451     g_free(vec);
2452     errno = err;
2453     return NULL;
2454 }
2455 
2456 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2457                          abi_ulong count, int copy)
2458 {
2459     struct target_iovec *target_vec;
2460     int i;
2461 
2462     target_vec = lock_user(VERIFY_READ, target_addr,
2463                            count * sizeof(struct target_iovec), 1);
2464     if (target_vec) {
2465         for (i = 0; i < count; i++) {
2466             abi_ulong base = tswapal(target_vec[i].iov_base);
2467             abi_long len = tswapal(target_vec[i].iov_len);
2468             if (len < 0) {
2469                 break;
2470             }
2471             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2472         }
2473         unlock_user(target_vec, target_addr, 0);
2474     }
2475 
2476     g_free(vec);
2477 }
2478 
2479 static inline int target_to_host_sock_type(int *type)
2480 {
2481     int host_type = 0;
2482     int target_type = *type;
2483 
2484     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2485     case TARGET_SOCK_DGRAM:
2486         host_type = SOCK_DGRAM;
2487         break;
2488     case TARGET_SOCK_STREAM:
2489         host_type = SOCK_STREAM;
2490         break;
2491     default:
2492         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2493         break;
2494     }
2495     if (target_type & TARGET_SOCK_CLOEXEC) {
2496 #if defined(SOCK_CLOEXEC)
2497         host_type |= SOCK_CLOEXEC;
2498 #else
2499         return -TARGET_EINVAL;
2500 #endif
2501     }
2502     if (target_type & TARGET_SOCK_NONBLOCK) {
2503 #if defined(SOCK_NONBLOCK)
2504         host_type |= SOCK_NONBLOCK;
2505 #elif !defined(O_NONBLOCK)
2506         return -TARGET_EINVAL;
2507 #endif
2508     }
2509     *type = host_type;
2510     return 0;
2511 }
2512 
2513 /* Try to emulate socket type flags after socket creation.  */
2514 static int sock_flags_fixup(int fd, int target_type)
2515 {
2516 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2517     if (target_type & TARGET_SOCK_NONBLOCK) {
2518         int flags = fcntl(fd, F_GETFL);
2519         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2520             close(fd);
2521             return -TARGET_EINVAL;
2522         }
2523     }
2524 #endif
2525     return fd;
2526 }
2527 
2528 /* do_socket() Must return target values and target errnos. */
2529 static abi_long do_socket(int domain, int type, int protocol)
2530 {
2531     int target_type = type;
2532     int ret;
2533 
2534     ret = target_to_host_sock_type(&type);
2535     if (ret) {
2536         return ret;
2537     }
2538 
2539     if (domain == PF_NETLINK && !(
2540 #ifdef CONFIG_RTNETLINK
2541          protocol == NETLINK_ROUTE ||
2542 #endif
2543          protocol == NETLINK_KOBJECT_UEVENT ||
2544          protocol == NETLINK_AUDIT)) {
2545         return -EPFNOSUPPORT;
2546     }
2547 
2548     if (domain == AF_PACKET ||
2549         (domain == AF_INET && type == SOCK_PACKET)) {
2550         protocol = tswap16(protocol);
2551     }
2552 
2553     ret = get_errno(socket(domain, type, protocol));
2554     if (ret >= 0) {
2555         ret = sock_flags_fixup(ret, target_type);
2556         if (type == SOCK_PACKET) {
2557             /* Manage an obsolete case :
2558              * if socket type is SOCK_PACKET, bind by name
2559              */
2560             fd_trans_register(ret, &target_packet_trans);
2561         } else if (domain == PF_NETLINK) {
2562             switch (protocol) {
2563 #ifdef CONFIG_RTNETLINK
2564             case NETLINK_ROUTE:
2565                 fd_trans_register(ret, &target_netlink_route_trans);
2566                 break;
2567 #endif
2568             case NETLINK_KOBJECT_UEVENT:
2569                 /* nothing to do: messages are strings */
2570                 break;
2571             case NETLINK_AUDIT:
2572                 fd_trans_register(ret, &target_netlink_audit_trans);
2573                 break;
2574             default:
2575                 g_assert_not_reached();
2576             }
2577         }
2578     }
2579     return ret;
2580 }
2581 
2582 /* do_bind() Must return target values and target errnos. */
2583 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2584                         socklen_t addrlen)
2585 {
2586     void *addr;
2587     abi_long ret;
2588 
2589     if ((int)addrlen < 0) {
2590         return -TARGET_EINVAL;
2591     }
2592 
2593     addr = alloca(addrlen+1);
2594 
2595     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2596     if (ret)
2597         return ret;
2598 
2599     return get_errno(bind(sockfd, addr, addrlen));
2600 }
2601 
2602 /* do_connect() Must return target values and target errnos. */
2603 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2604                            socklen_t addrlen)
2605 {
2606     void *addr;
2607     abi_long ret;
2608 
2609     if ((int)addrlen < 0) {
2610         return -TARGET_EINVAL;
2611     }
2612 
2613     addr = alloca(addrlen+1);
2614 
2615     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2616     if (ret)
2617         return ret;
2618 
2619     return get_errno(safe_connect(sockfd, addr, addrlen));
2620 }
2621 
2622 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2623 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2624                                       int flags, int send)
2625 {
2626     abi_long ret, len;
2627     struct msghdr msg;
2628     abi_ulong count;
2629     struct iovec *vec;
2630     abi_ulong target_vec;
2631 
2632     if (msgp->msg_name) {
2633         msg.msg_namelen = tswap32(msgp->msg_namelen);
2634         msg.msg_name = alloca(msg.msg_namelen+1);
2635         ret = target_to_host_sockaddr(fd, msg.msg_name,
2636                                       tswapal(msgp->msg_name),
2637                                       msg.msg_namelen);
2638         if (ret == -TARGET_EFAULT) {
2639             /* For connected sockets msg_name and msg_namelen must
2640              * be ignored, so returning EFAULT immediately is wrong.
2641              * Instead, pass a bad msg_name to the host kernel, and
2642              * let it decide whether to return EFAULT or not.
2643              */
2644             msg.msg_name = (void *)-1;
2645         } else if (ret) {
2646             goto out2;
2647         }
2648     } else {
2649         msg.msg_name = NULL;
2650         msg.msg_namelen = 0;
2651     }
2652     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2653     msg.msg_control = alloca(msg.msg_controllen);
2654     memset(msg.msg_control, 0, msg.msg_controllen);
2655 
2656     msg.msg_flags = tswap32(msgp->msg_flags);
2657 
2658     count = tswapal(msgp->msg_iovlen);
2659     target_vec = tswapal(msgp->msg_iov);
2660 
2661     if (count > IOV_MAX) {
2662         /* sendrcvmsg returns a different errno for this condition than
2663          * readv/writev, so we must catch it here before lock_iovec() does.
2664          */
2665         ret = -TARGET_EMSGSIZE;
2666         goto out2;
2667     }
2668 
2669     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2670                      target_vec, count, send);
2671     if (vec == NULL) {
2672         ret = -host_to_target_errno(errno);
2673         goto out2;
2674     }
2675     msg.msg_iovlen = count;
2676     msg.msg_iov = vec;
2677 
2678     if (send) {
2679         if (fd_trans_target_to_host_data(fd)) {
2680             void *host_msg;
2681 
2682             host_msg = g_malloc(msg.msg_iov->iov_len);
2683             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2684             ret = fd_trans_target_to_host_data(fd)(host_msg,
2685                                                    msg.msg_iov->iov_len);
2686             if (ret >= 0) {
2687                 msg.msg_iov->iov_base = host_msg;
2688                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2689             }
2690             g_free(host_msg);
2691         } else {
2692             ret = target_to_host_cmsg(&msg, msgp);
2693             if (ret == 0) {
2694                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2695             }
2696         }
2697     } else {
2698         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2699         if (!is_error(ret)) {
2700             len = ret;
2701             if (fd_trans_host_to_target_data(fd)) {
2702                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2703                                                MIN(msg.msg_iov->iov_len, len));
2704             } else {
2705                 ret = host_to_target_cmsg(msgp, &msg);
2706             }
2707             if (!is_error(ret)) {
2708                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2709                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2710                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2711                                     msg.msg_name, msg.msg_namelen);
2712                     if (ret) {
2713                         goto out;
2714                     }
2715                 }
2716 
2717                 ret = len;
2718             }
2719         }
2720     }
2721 
2722 out:
2723     unlock_iovec(vec, target_vec, count, !send);
2724 out2:
2725     return ret;
2726 }
2727 
2728 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2729                                int flags, int send)
2730 {
2731     abi_long ret;
2732     struct target_msghdr *msgp;
2733 
2734     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2735                           msgp,
2736                           target_msg,
2737                           send ? 1 : 0)) {
2738         return -TARGET_EFAULT;
2739     }
2740     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2741     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2742     return ret;
2743 }
2744 
2745 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2746  * so it might not have this *mmsg-specific flag either.
2747  */
2748 #ifndef MSG_WAITFORONE
2749 #define MSG_WAITFORONE 0x10000
2750 #endif
2751 
2752 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2753                                 unsigned int vlen, unsigned int flags,
2754                                 int send)
2755 {
2756     struct target_mmsghdr *mmsgp;
2757     abi_long ret = 0;
2758     int i;
2759 
2760     if (vlen > UIO_MAXIOV) {
2761         vlen = UIO_MAXIOV;
2762     }
2763 
2764     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2765     if (!mmsgp) {
2766         return -TARGET_EFAULT;
2767     }
2768 
2769     for (i = 0; i < vlen; i++) {
2770         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2771         if (is_error(ret)) {
2772             break;
2773         }
2774         mmsgp[i].msg_len = tswap32(ret);
2775         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2776         if (flags & MSG_WAITFORONE) {
2777             flags |= MSG_DONTWAIT;
2778         }
2779     }
2780 
2781     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2782 
2783     /* Return number of datagrams sent if we sent any at all;
2784      * otherwise return the error.
2785      */
2786     if (i) {
2787         return i;
2788     }
2789     return ret;
2790 }
2791 
2792 /* do_accept4() Must return target values and target errnos. */
2793 static abi_long do_accept4(int fd, abi_ulong target_addr,
2794                            abi_ulong target_addrlen_addr, int flags)
2795 {
2796     socklen_t addrlen;
2797     void *addr;
2798     abi_long ret;
2799     int host_flags;
2800 
2801     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2802 
2803     if (target_addr == 0) {
2804         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2805     }
2806 
2807     /* linux returns EINVAL if addrlen pointer is invalid */
2808     if (get_user_u32(addrlen, target_addrlen_addr))
2809         return -TARGET_EINVAL;
2810 
2811     if ((int)addrlen < 0) {
2812         return -TARGET_EINVAL;
2813     }
2814 
2815     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2816         return -TARGET_EINVAL;
2817 
2818     addr = alloca(addrlen);
2819 
2820     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2821     if (!is_error(ret)) {
2822         host_to_target_sockaddr(target_addr, addr, addrlen);
2823         if (put_user_u32(addrlen, target_addrlen_addr))
2824             ret = -TARGET_EFAULT;
2825     }
2826     return ret;
2827 }
2828 
2829 /* do_getpeername() Must return target values and target errnos. */
2830 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2831                                abi_ulong target_addrlen_addr)
2832 {
2833     socklen_t addrlen;
2834     void *addr;
2835     abi_long ret;
2836 
2837     if (get_user_u32(addrlen, target_addrlen_addr))
2838         return -TARGET_EFAULT;
2839 
2840     if ((int)addrlen < 0) {
2841         return -TARGET_EINVAL;
2842     }
2843 
2844     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2845         return -TARGET_EFAULT;
2846 
2847     addr = alloca(addrlen);
2848 
2849     ret = get_errno(getpeername(fd, addr, &addrlen));
2850     if (!is_error(ret)) {
2851         host_to_target_sockaddr(target_addr, addr, addrlen);
2852         if (put_user_u32(addrlen, target_addrlen_addr))
2853             ret = -TARGET_EFAULT;
2854     }
2855     return ret;
2856 }
2857 
2858 /* do_getsockname() Must return target values and target errnos. */
2859 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2860                                abi_ulong target_addrlen_addr)
2861 {
2862     socklen_t addrlen;
2863     void *addr;
2864     abi_long ret;
2865 
2866     if (get_user_u32(addrlen, target_addrlen_addr))
2867         return -TARGET_EFAULT;
2868 
2869     if ((int)addrlen < 0) {
2870         return -TARGET_EINVAL;
2871     }
2872 
2873     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2874         return -TARGET_EFAULT;
2875 
2876     addr = alloca(addrlen);
2877 
2878     ret = get_errno(getsockname(fd, addr, &addrlen));
2879     if (!is_error(ret)) {
2880         host_to_target_sockaddr(target_addr, addr, addrlen);
2881         if (put_user_u32(addrlen, target_addrlen_addr))
2882             ret = -TARGET_EFAULT;
2883     }
2884     return ret;
2885 }
2886 
2887 /* do_socketpair() Must return target values and target errnos. */
2888 static abi_long do_socketpair(int domain, int type, int protocol,
2889                               abi_ulong target_tab_addr)
2890 {
2891     int tab[2];
2892     abi_long ret;
2893 
2894     target_to_host_sock_type(&type);
2895 
2896     ret = get_errno(socketpair(domain, type, protocol, tab));
2897     if (!is_error(ret)) {
2898         if (put_user_s32(tab[0], target_tab_addr)
2899             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2900             ret = -TARGET_EFAULT;
2901     }
2902     return ret;
2903 }
2904 
2905 /* do_sendto() Must return target values and target errnos. */
2906 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2907                           abi_ulong target_addr, socklen_t addrlen)
2908 {
2909     void *addr;
2910     void *host_msg;
2911     void *copy_msg = NULL;
2912     abi_long ret;
2913 
2914     if ((int)addrlen < 0) {
2915         return -TARGET_EINVAL;
2916     }
2917 
2918     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2919     if (!host_msg)
2920         return -TARGET_EFAULT;
2921     if (fd_trans_target_to_host_data(fd)) {
2922         copy_msg = host_msg;
2923         host_msg = g_malloc(len);
2924         memcpy(host_msg, copy_msg, len);
2925         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2926         if (ret < 0) {
2927             goto fail;
2928         }
2929     }
2930     if (target_addr) {
2931         addr = alloca(addrlen+1);
2932         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2933         if (ret) {
2934             goto fail;
2935         }
2936         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2937     } else {
2938         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2939     }
2940 fail:
2941     if (copy_msg) {
2942         g_free(host_msg);
2943         host_msg = copy_msg;
2944     }
2945     unlock_user(host_msg, msg, 0);
2946     return ret;
2947 }
2948 
2949 /* do_recvfrom() Must return target values and target errnos. */
2950 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2951                             abi_ulong target_addr,
2952                             abi_ulong target_addrlen)
2953 {
2954     socklen_t addrlen;
2955     void *addr;
2956     void *host_msg;
2957     abi_long ret;
2958 
2959     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2960     if (!host_msg)
2961         return -TARGET_EFAULT;
2962     if (target_addr) {
2963         if (get_user_u32(addrlen, target_addrlen)) {
2964             ret = -TARGET_EFAULT;
2965             goto fail;
2966         }
2967         if ((int)addrlen < 0) {
2968             ret = -TARGET_EINVAL;
2969             goto fail;
2970         }
2971         addr = alloca(addrlen);
2972         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
2973                                       addr, &addrlen));
2974     } else {
2975         addr = NULL; /* To keep compiler quiet.  */
2976         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
2977     }
2978     if (!is_error(ret)) {
2979         if (fd_trans_host_to_target_data(fd)) {
2980             abi_long trans;
2981             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
2982             if (is_error(trans)) {
2983                 ret = trans;
2984                 goto fail;
2985             }
2986         }
2987         if (target_addr) {
2988             host_to_target_sockaddr(target_addr, addr, addrlen);
2989             if (put_user_u32(addrlen, target_addrlen)) {
2990                 ret = -TARGET_EFAULT;
2991                 goto fail;
2992             }
2993         }
2994         unlock_user(host_msg, msg, len);
2995     } else {
2996 fail:
2997         unlock_user(host_msg, msg, 0);
2998     }
2999     return ret;
3000 }
3001 
3002 #ifdef TARGET_NR_socketcall
3003 /* do_socketcall() must return target values and target errnos. */
3004 static abi_long do_socketcall(int num, abi_ulong vptr)
3005 {
3006     static const unsigned nargs[] = { /* number of arguments per operation */
3007         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3008         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3009         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3010         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3011         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3012         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3013         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3014         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3015         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3016         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3017         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3018         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3019         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3020         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3021         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3022         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3023         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3024         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3025         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3026         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3027     };
3028     abi_long a[6]; /* max 6 args */
3029     unsigned i;
3030 
3031     /* check the range of the first argument num */
3032     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3033     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3034         return -TARGET_EINVAL;
3035     }
3036     /* ensure we have space for args */
3037     if (nargs[num] > ARRAY_SIZE(a)) {
3038         return -TARGET_EINVAL;
3039     }
3040     /* collect the arguments in a[] according to nargs[] */
3041     for (i = 0; i < nargs[num]; ++i) {
3042         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3043             return -TARGET_EFAULT;
3044         }
3045     }
3046     /* now when we have the args, invoke the appropriate underlying function */
3047     switch (num) {
3048     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3049         return do_socket(a[0], a[1], a[2]);
3050     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3051         return do_bind(a[0], a[1], a[2]);
3052     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3053         return do_connect(a[0], a[1], a[2]);
3054     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3055         return get_errno(listen(a[0], a[1]));
3056     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3057         return do_accept4(a[0], a[1], a[2], 0);
3058     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3059         return do_getsockname(a[0], a[1], a[2]);
3060     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3061         return do_getpeername(a[0], a[1], a[2]);
3062     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3063         return do_socketpair(a[0], a[1], a[2], a[3]);
3064     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3065         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3066     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3067         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3068     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3069         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3070     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3071         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3072     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3073         return get_errno(shutdown(a[0], a[1]));
3074     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3075         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3076     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3077         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3078     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3079         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3080     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3081         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3082     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3083         return do_accept4(a[0], a[1], a[2], a[3]);
3084     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3085         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3086     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3087         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3088     default:
3089         gemu_log("Unsupported socketcall: %d\n", num);
3090         return -TARGET_EINVAL;
3091     }
3092 }
3093 #endif
3094 
3095 #define N_SHM_REGIONS	32
3096 
3097 static struct shm_region {
3098     abi_ulong start;
3099     abi_ulong size;
3100     bool in_use;
3101 } shm_regions[N_SHM_REGIONS];
3102 
3103 #ifndef TARGET_SEMID64_DS
3104 /* asm-generic version of this struct */
3105 struct target_semid64_ds
3106 {
3107   struct target_ipc_perm sem_perm;
3108   abi_ulong sem_otime;
3109 #if TARGET_ABI_BITS == 32
3110   abi_ulong __unused1;
3111 #endif
3112   abi_ulong sem_ctime;
3113 #if TARGET_ABI_BITS == 32
3114   abi_ulong __unused2;
3115 #endif
3116   abi_ulong sem_nsems;
3117   abi_ulong __unused3;
3118   abi_ulong __unused4;
3119 };
3120 #endif
3121 
3122 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3123                                                abi_ulong target_addr)
3124 {
3125     struct target_ipc_perm *target_ip;
3126     struct target_semid64_ds *target_sd;
3127 
3128     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3129         return -TARGET_EFAULT;
3130     target_ip = &(target_sd->sem_perm);
3131     host_ip->__key = tswap32(target_ip->__key);
3132     host_ip->uid = tswap32(target_ip->uid);
3133     host_ip->gid = tswap32(target_ip->gid);
3134     host_ip->cuid = tswap32(target_ip->cuid);
3135     host_ip->cgid = tswap32(target_ip->cgid);
3136 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3137     host_ip->mode = tswap32(target_ip->mode);
3138 #else
3139     host_ip->mode = tswap16(target_ip->mode);
3140 #endif
3141 #if defined(TARGET_PPC)
3142     host_ip->__seq = tswap32(target_ip->__seq);
3143 #else
3144     host_ip->__seq = tswap16(target_ip->__seq);
3145 #endif
3146     unlock_user_struct(target_sd, target_addr, 0);
3147     return 0;
3148 }
3149 
3150 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3151                                                struct ipc_perm *host_ip)
3152 {
3153     struct target_ipc_perm *target_ip;
3154     struct target_semid64_ds *target_sd;
3155 
3156     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3157         return -TARGET_EFAULT;
3158     target_ip = &(target_sd->sem_perm);
3159     target_ip->__key = tswap32(host_ip->__key);
3160     target_ip->uid = tswap32(host_ip->uid);
3161     target_ip->gid = tswap32(host_ip->gid);
3162     target_ip->cuid = tswap32(host_ip->cuid);
3163     target_ip->cgid = tswap32(host_ip->cgid);
3164 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3165     target_ip->mode = tswap32(host_ip->mode);
3166 #else
3167     target_ip->mode = tswap16(host_ip->mode);
3168 #endif
3169 #if defined(TARGET_PPC)
3170     target_ip->__seq = tswap32(host_ip->__seq);
3171 #else
3172     target_ip->__seq = tswap16(host_ip->__seq);
3173 #endif
3174     unlock_user_struct(target_sd, target_addr, 1);
3175     return 0;
3176 }
3177 
3178 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3179                                                abi_ulong target_addr)
3180 {
3181     struct target_semid64_ds *target_sd;
3182 
3183     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3184         return -TARGET_EFAULT;
3185     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3186         return -TARGET_EFAULT;
3187     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3188     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3189     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3190     unlock_user_struct(target_sd, target_addr, 0);
3191     return 0;
3192 }
3193 
3194 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3195                                                struct semid_ds *host_sd)
3196 {
3197     struct target_semid64_ds *target_sd;
3198 
3199     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3200         return -TARGET_EFAULT;
3201     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3202         return -TARGET_EFAULT;
3203     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3204     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3205     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3206     unlock_user_struct(target_sd, target_addr, 1);
3207     return 0;
3208 }
3209 
3210 struct target_seminfo {
3211     int semmap;
3212     int semmni;
3213     int semmns;
3214     int semmnu;
3215     int semmsl;
3216     int semopm;
3217     int semume;
3218     int semusz;
3219     int semvmx;
3220     int semaem;
3221 };
3222 
3223 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3224                                               struct seminfo *host_seminfo)
3225 {
3226     struct target_seminfo *target_seminfo;
3227     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3228         return -TARGET_EFAULT;
3229     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3230     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3231     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3232     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3233     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3234     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3235     __put_user(host_seminfo->semume, &target_seminfo->semume);
3236     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3237     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3238     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3239     unlock_user_struct(target_seminfo, target_addr, 1);
3240     return 0;
3241 }
3242 
3243 union semun {
3244 	int val;
3245 	struct semid_ds *buf;
3246 	unsigned short *array;
3247 	struct seminfo *__buf;
3248 };
3249 
3250 union target_semun {
3251 	int val;
3252 	abi_ulong buf;
3253 	abi_ulong array;
3254 	abi_ulong __buf;
3255 };
3256 
3257 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3258                                                abi_ulong target_addr)
3259 {
3260     int nsems;
3261     unsigned short *array;
3262     union semun semun;
3263     struct semid_ds semid_ds;
3264     int i, ret;
3265 
3266     semun.buf = &semid_ds;
3267 
3268     ret = semctl(semid, 0, IPC_STAT, semun);
3269     if (ret == -1)
3270         return get_errno(ret);
3271 
3272     nsems = semid_ds.sem_nsems;
3273 
3274     *host_array = g_try_new(unsigned short, nsems);
3275     if (!*host_array) {
3276         return -TARGET_ENOMEM;
3277     }
3278     array = lock_user(VERIFY_READ, target_addr,
3279                       nsems*sizeof(unsigned short), 1);
3280     if (!array) {
3281         g_free(*host_array);
3282         return -TARGET_EFAULT;
3283     }
3284 
3285     for(i=0; i<nsems; i++) {
3286         __get_user((*host_array)[i], &array[i]);
3287     }
3288     unlock_user(array, target_addr, 0);
3289 
3290     return 0;
3291 }
3292 
3293 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3294                                                unsigned short **host_array)
3295 {
3296     int nsems;
3297     unsigned short *array;
3298     union semun semun;
3299     struct semid_ds semid_ds;
3300     int i, ret;
3301 
3302     semun.buf = &semid_ds;
3303 
3304     ret = semctl(semid, 0, IPC_STAT, semun);
3305     if (ret == -1)
3306         return get_errno(ret);
3307 
3308     nsems = semid_ds.sem_nsems;
3309 
3310     array = lock_user(VERIFY_WRITE, target_addr,
3311                       nsems*sizeof(unsigned short), 0);
3312     if (!array)
3313         return -TARGET_EFAULT;
3314 
3315     for(i=0; i<nsems; i++) {
3316         __put_user((*host_array)[i], &array[i]);
3317     }
3318     g_free(*host_array);
3319     unlock_user(array, target_addr, 1);
3320 
3321     return 0;
3322 }
3323 
3324 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3325                                  abi_ulong target_arg)
3326 {
3327     union target_semun target_su = { .buf = target_arg };
3328     union semun arg;
3329     struct semid_ds dsarg;
3330     unsigned short *array = NULL;
3331     struct seminfo seminfo;
3332     abi_long ret = -TARGET_EINVAL;
3333     abi_long err;
3334     cmd &= 0xff;
3335 
3336     switch( cmd ) {
3337 	case GETVAL:
3338 	case SETVAL:
3339             /* In 64 bit cross-endian situations, we will erroneously pick up
3340              * the wrong half of the union for the "val" element.  To rectify
3341              * this, the entire 8-byte structure is byteswapped, followed by
3342 	     * a swap of the 4 byte val field. In other cases, the data is
3343 	     * already in proper host byte order. */
3344 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3345 		target_su.buf = tswapal(target_su.buf);
3346 		arg.val = tswap32(target_su.val);
3347 	    } else {
3348 		arg.val = target_su.val;
3349 	    }
3350             ret = get_errno(semctl(semid, semnum, cmd, arg));
3351             break;
3352 	case GETALL:
3353 	case SETALL:
3354             err = target_to_host_semarray(semid, &array, target_su.array);
3355             if (err)
3356                 return err;
3357             arg.array = array;
3358             ret = get_errno(semctl(semid, semnum, cmd, arg));
3359             err = host_to_target_semarray(semid, target_su.array, &array);
3360             if (err)
3361                 return err;
3362             break;
3363 	case IPC_STAT:
3364 	case IPC_SET:
3365 	case SEM_STAT:
3366             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3367             if (err)
3368                 return err;
3369             arg.buf = &dsarg;
3370             ret = get_errno(semctl(semid, semnum, cmd, arg));
3371             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3372             if (err)
3373                 return err;
3374             break;
3375 	case IPC_INFO:
3376 	case SEM_INFO:
3377             arg.__buf = &seminfo;
3378             ret = get_errno(semctl(semid, semnum, cmd, arg));
3379             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3380             if (err)
3381                 return err;
3382             break;
3383 	case IPC_RMID:
3384 	case GETPID:
3385 	case GETNCNT:
3386 	case GETZCNT:
3387             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3388             break;
3389     }
3390 
3391     return ret;
3392 }
3393 
3394 struct target_sembuf {
3395     unsigned short sem_num;
3396     short sem_op;
3397     short sem_flg;
3398 };
3399 
3400 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3401                                              abi_ulong target_addr,
3402                                              unsigned nsops)
3403 {
3404     struct target_sembuf *target_sembuf;
3405     int i;
3406 
3407     target_sembuf = lock_user(VERIFY_READ, target_addr,
3408                               nsops*sizeof(struct target_sembuf), 1);
3409     if (!target_sembuf)
3410         return -TARGET_EFAULT;
3411 
3412     for(i=0; i<nsops; i++) {
3413         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3414         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3415         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3416     }
3417 
3418     unlock_user(target_sembuf, target_addr, 0);
3419 
3420     return 0;
3421 }
3422 
3423 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3424 {
3425     struct sembuf sops[nsops];
3426 
3427     if (target_to_host_sembuf(sops, ptr, nsops))
3428         return -TARGET_EFAULT;
3429 
3430     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3431 }
3432 
3433 struct target_msqid_ds
3434 {
3435     struct target_ipc_perm msg_perm;
3436     abi_ulong msg_stime;
3437 #if TARGET_ABI_BITS == 32
3438     abi_ulong __unused1;
3439 #endif
3440     abi_ulong msg_rtime;
3441 #if TARGET_ABI_BITS == 32
3442     abi_ulong __unused2;
3443 #endif
3444     abi_ulong msg_ctime;
3445 #if TARGET_ABI_BITS == 32
3446     abi_ulong __unused3;
3447 #endif
3448     abi_ulong __msg_cbytes;
3449     abi_ulong msg_qnum;
3450     abi_ulong msg_qbytes;
3451     abi_ulong msg_lspid;
3452     abi_ulong msg_lrpid;
3453     abi_ulong __unused4;
3454     abi_ulong __unused5;
3455 };
3456 
3457 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3458                                                abi_ulong target_addr)
3459 {
3460     struct target_msqid_ds *target_md;
3461 
3462     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3463         return -TARGET_EFAULT;
3464     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3465         return -TARGET_EFAULT;
3466     host_md->msg_stime = tswapal(target_md->msg_stime);
3467     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3468     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3469     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3470     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3471     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3472     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3473     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3474     unlock_user_struct(target_md, target_addr, 0);
3475     return 0;
3476 }
3477 
3478 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3479                                                struct msqid_ds *host_md)
3480 {
3481     struct target_msqid_ds *target_md;
3482 
3483     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3484         return -TARGET_EFAULT;
3485     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3486         return -TARGET_EFAULT;
3487     target_md->msg_stime = tswapal(host_md->msg_stime);
3488     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3489     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3490     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3491     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3492     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3493     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3494     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3495     unlock_user_struct(target_md, target_addr, 1);
3496     return 0;
3497 }
3498 
3499 struct target_msginfo {
3500     int msgpool;
3501     int msgmap;
3502     int msgmax;
3503     int msgmnb;
3504     int msgmni;
3505     int msgssz;
3506     int msgtql;
3507     unsigned short int msgseg;
3508 };
3509 
3510 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3511                                               struct msginfo *host_msginfo)
3512 {
3513     struct target_msginfo *target_msginfo;
3514     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3515         return -TARGET_EFAULT;
3516     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3517     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3518     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3519     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3520     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3521     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3522     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3523     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3524     unlock_user_struct(target_msginfo, target_addr, 1);
3525     return 0;
3526 }
3527 
3528 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3529 {
3530     struct msqid_ds dsarg;
3531     struct msginfo msginfo;
3532     abi_long ret = -TARGET_EINVAL;
3533 
3534     cmd &= 0xff;
3535 
3536     switch (cmd) {
3537     case IPC_STAT:
3538     case IPC_SET:
3539     case MSG_STAT:
3540         if (target_to_host_msqid_ds(&dsarg,ptr))
3541             return -TARGET_EFAULT;
3542         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3543         if (host_to_target_msqid_ds(ptr,&dsarg))
3544             return -TARGET_EFAULT;
3545         break;
3546     case IPC_RMID:
3547         ret = get_errno(msgctl(msgid, cmd, NULL));
3548         break;
3549     case IPC_INFO:
3550     case MSG_INFO:
3551         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3552         if (host_to_target_msginfo(ptr, &msginfo))
3553             return -TARGET_EFAULT;
3554         break;
3555     }
3556 
3557     return ret;
3558 }
3559 
3560 struct target_msgbuf {
3561     abi_long mtype;
3562     char	mtext[1];
3563 };
3564 
3565 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3566                                  ssize_t msgsz, int msgflg)
3567 {
3568     struct target_msgbuf *target_mb;
3569     struct msgbuf *host_mb;
3570     abi_long ret = 0;
3571 
3572     if (msgsz < 0) {
3573         return -TARGET_EINVAL;
3574     }
3575 
3576     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3577         return -TARGET_EFAULT;
3578     host_mb = g_try_malloc(msgsz + sizeof(long));
3579     if (!host_mb) {
3580         unlock_user_struct(target_mb, msgp, 0);
3581         return -TARGET_ENOMEM;
3582     }
3583     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3584     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3585     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3586     g_free(host_mb);
3587     unlock_user_struct(target_mb, msgp, 0);
3588 
3589     return ret;
3590 }
3591 
3592 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3593                                  ssize_t msgsz, abi_long msgtyp,
3594                                  int msgflg)
3595 {
3596     struct target_msgbuf *target_mb;
3597     char *target_mtext;
3598     struct msgbuf *host_mb;
3599     abi_long ret = 0;
3600 
3601     if (msgsz < 0) {
3602         return -TARGET_EINVAL;
3603     }
3604 
3605     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3606         return -TARGET_EFAULT;
3607 
3608     host_mb = g_try_malloc(msgsz + sizeof(long));
3609     if (!host_mb) {
3610         ret = -TARGET_ENOMEM;
3611         goto end;
3612     }
3613     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3614 
3615     if (ret > 0) {
3616         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3617         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3618         if (!target_mtext) {
3619             ret = -TARGET_EFAULT;
3620             goto end;
3621         }
3622         memcpy(target_mb->mtext, host_mb->mtext, ret);
3623         unlock_user(target_mtext, target_mtext_addr, ret);
3624     }
3625 
3626     target_mb->mtype = tswapal(host_mb->mtype);
3627 
3628 end:
3629     if (target_mb)
3630         unlock_user_struct(target_mb, msgp, 1);
3631     g_free(host_mb);
3632     return ret;
3633 }
3634 
3635 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3636                                                abi_ulong target_addr)
3637 {
3638     struct target_shmid_ds *target_sd;
3639 
3640     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3641         return -TARGET_EFAULT;
3642     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3643         return -TARGET_EFAULT;
3644     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3645     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3646     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3647     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3648     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3649     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3650     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3651     unlock_user_struct(target_sd, target_addr, 0);
3652     return 0;
3653 }
3654 
3655 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3656                                                struct shmid_ds *host_sd)
3657 {
3658     struct target_shmid_ds *target_sd;
3659 
3660     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3661         return -TARGET_EFAULT;
3662     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3663         return -TARGET_EFAULT;
3664     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3665     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3666     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3667     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3668     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3669     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3670     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3671     unlock_user_struct(target_sd, target_addr, 1);
3672     return 0;
3673 }
3674 
3675 struct  target_shminfo {
3676     abi_ulong shmmax;
3677     abi_ulong shmmin;
3678     abi_ulong shmmni;
3679     abi_ulong shmseg;
3680     abi_ulong shmall;
3681 };
3682 
3683 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3684                                               struct shminfo *host_shminfo)
3685 {
3686     struct target_shminfo *target_shminfo;
3687     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3688         return -TARGET_EFAULT;
3689     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3690     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3691     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3692     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3693     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3694     unlock_user_struct(target_shminfo, target_addr, 1);
3695     return 0;
3696 }
3697 
3698 struct target_shm_info {
3699     int used_ids;
3700     abi_ulong shm_tot;
3701     abi_ulong shm_rss;
3702     abi_ulong shm_swp;
3703     abi_ulong swap_attempts;
3704     abi_ulong swap_successes;
3705 };
3706 
3707 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3708                                                struct shm_info *host_shm_info)
3709 {
3710     struct target_shm_info *target_shm_info;
3711     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3712         return -TARGET_EFAULT;
3713     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3714     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3715     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3716     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3717     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3718     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3719     unlock_user_struct(target_shm_info, target_addr, 1);
3720     return 0;
3721 }
3722 
3723 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3724 {
3725     struct shmid_ds dsarg;
3726     struct shminfo shminfo;
3727     struct shm_info shm_info;
3728     abi_long ret = -TARGET_EINVAL;
3729 
3730     cmd &= 0xff;
3731 
3732     switch(cmd) {
3733     case IPC_STAT:
3734     case IPC_SET:
3735     case SHM_STAT:
3736         if (target_to_host_shmid_ds(&dsarg, buf))
3737             return -TARGET_EFAULT;
3738         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3739         if (host_to_target_shmid_ds(buf, &dsarg))
3740             return -TARGET_EFAULT;
3741         break;
3742     case IPC_INFO:
3743         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3744         if (host_to_target_shminfo(buf, &shminfo))
3745             return -TARGET_EFAULT;
3746         break;
3747     case SHM_INFO:
3748         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3749         if (host_to_target_shm_info(buf, &shm_info))
3750             return -TARGET_EFAULT;
3751         break;
3752     case IPC_RMID:
3753     case SHM_LOCK:
3754     case SHM_UNLOCK:
3755         ret = get_errno(shmctl(shmid, cmd, NULL));
3756         break;
3757     }
3758 
3759     return ret;
3760 }
3761 
3762 #ifndef TARGET_FORCE_SHMLBA
3763 /* For most architectures, SHMLBA is the same as the page size;
3764  * some architectures have larger values, in which case they should
3765  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3766  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3767  * and defining its own value for SHMLBA.
3768  *
3769  * The kernel also permits SHMLBA to be set by the architecture to a
3770  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3771  * this means that addresses are rounded to the large size if
3772  * SHM_RND is set but addresses not aligned to that size are not rejected
3773  * as long as they are at least page-aligned. Since the only architecture
3774  * which uses this is ia64 this code doesn't provide for that oddity.
3775  */
3776 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3777 {
3778     return TARGET_PAGE_SIZE;
3779 }
3780 #endif
3781 
3782 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3783                                  int shmid, abi_ulong shmaddr, int shmflg)
3784 {
3785     abi_long raddr;
3786     void *host_raddr;
3787     struct shmid_ds shm_info;
3788     int i,ret;
3789     abi_ulong shmlba;
3790 
3791     /* find out the length of the shared memory segment */
3792     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3793     if (is_error(ret)) {
3794         /* can't get length, bail out */
3795         return ret;
3796     }
3797 
3798     shmlba = target_shmlba(cpu_env);
3799 
3800     if (shmaddr & (shmlba - 1)) {
3801         if (shmflg & SHM_RND) {
3802             shmaddr &= ~(shmlba - 1);
3803         } else {
3804             return -TARGET_EINVAL;
3805         }
3806     }
3807     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3808         return -TARGET_EINVAL;
3809     }
3810 
3811     mmap_lock();
3812 
3813     if (shmaddr)
3814         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3815     else {
3816         abi_ulong mmap_start;
3817 
3818         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3819 
3820         if (mmap_start == -1) {
3821             errno = ENOMEM;
3822             host_raddr = (void *)-1;
3823         } else
3824             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3825     }
3826 
3827     if (host_raddr == (void *)-1) {
3828         mmap_unlock();
3829         return get_errno((long)host_raddr);
3830     }
3831     raddr=h2g((unsigned long)host_raddr);
3832 
3833     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3834                    PAGE_VALID | PAGE_READ |
3835                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3836 
3837     for (i = 0; i < N_SHM_REGIONS; i++) {
3838         if (!shm_regions[i].in_use) {
3839             shm_regions[i].in_use = true;
3840             shm_regions[i].start = raddr;
3841             shm_regions[i].size = shm_info.shm_segsz;
3842             break;
3843         }
3844     }
3845 
3846     mmap_unlock();
3847     return raddr;
3848 
3849 }
3850 
3851 static inline abi_long do_shmdt(abi_ulong shmaddr)
3852 {
3853     int i;
3854     abi_long rv;
3855 
3856     mmap_lock();
3857 
3858     for (i = 0; i < N_SHM_REGIONS; ++i) {
3859         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3860             shm_regions[i].in_use = false;
3861             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3862             break;
3863         }
3864     }
3865     rv = get_errno(shmdt(g2h(shmaddr)));
3866 
3867     mmap_unlock();
3868 
3869     return rv;
3870 }
3871 
3872 #ifdef TARGET_NR_ipc
3873 /* ??? This only works with linear mappings.  */
3874 /* do_ipc() must return target values and target errnos. */
3875 static abi_long do_ipc(CPUArchState *cpu_env,
3876                        unsigned int call, abi_long first,
3877                        abi_long second, abi_long third,
3878                        abi_long ptr, abi_long fifth)
3879 {
3880     int version;
3881     abi_long ret = 0;
3882 
3883     version = call >> 16;
3884     call &= 0xffff;
3885 
3886     switch (call) {
3887     case IPCOP_semop:
3888         ret = do_semop(first, ptr, second);
3889         break;
3890 
3891     case IPCOP_semget:
3892         ret = get_errno(semget(first, second, third));
3893         break;
3894 
3895     case IPCOP_semctl: {
3896         /* The semun argument to semctl is passed by value, so dereference the
3897          * ptr argument. */
3898         abi_ulong atptr;
3899         get_user_ual(atptr, ptr);
3900         ret = do_semctl(first, second, third, atptr);
3901         break;
3902     }
3903 
3904     case IPCOP_msgget:
3905         ret = get_errno(msgget(first, second));
3906         break;
3907 
3908     case IPCOP_msgsnd:
3909         ret = do_msgsnd(first, ptr, second, third);
3910         break;
3911 
3912     case IPCOP_msgctl:
3913         ret = do_msgctl(first, second, ptr);
3914         break;
3915 
3916     case IPCOP_msgrcv:
3917         switch (version) {
3918         case 0:
3919             {
3920                 struct target_ipc_kludge {
3921                     abi_long msgp;
3922                     abi_long msgtyp;
3923                 } *tmp;
3924 
3925                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3926                     ret = -TARGET_EFAULT;
3927                     break;
3928                 }
3929 
3930                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3931 
3932                 unlock_user_struct(tmp, ptr, 0);
3933                 break;
3934             }
3935         default:
3936             ret = do_msgrcv(first, ptr, second, fifth, third);
3937         }
3938         break;
3939 
3940     case IPCOP_shmat:
3941         switch (version) {
3942         default:
3943         {
3944             abi_ulong raddr;
3945             raddr = do_shmat(cpu_env, first, ptr, second);
3946             if (is_error(raddr))
3947                 return get_errno(raddr);
3948             if (put_user_ual(raddr, third))
3949                 return -TARGET_EFAULT;
3950             break;
3951         }
3952         case 1:
3953             ret = -TARGET_EINVAL;
3954             break;
3955         }
3956 	break;
3957     case IPCOP_shmdt:
3958         ret = do_shmdt(ptr);
3959 	break;
3960 
3961     case IPCOP_shmget:
3962 	/* IPC_* flag values are the same on all linux platforms */
3963 	ret = get_errno(shmget(first, second, third));
3964 	break;
3965 
3966 	/* IPC_* and SHM_* command values are the same on all linux platforms */
3967     case IPCOP_shmctl:
3968         ret = do_shmctl(first, second, ptr);
3969         break;
3970     default:
3971 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3972 	ret = -TARGET_ENOSYS;
3973 	break;
3974     }
3975     return ret;
3976 }
3977 #endif
3978 
3979 /* kernel structure types definitions */
3980 
3981 #define STRUCT(name, ...) STRUCT_ ## name,
3982 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3983 enum {
3984 #include "syscall_types.h"
3985 STRUCT_MAX
3986 };
3987 #undef STRUCT
3988 #undef STRUCT_SPECIAL
3989 
3990 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
3991 #define STRUCT_SPECIAL(name)
3992 #include "syscall_types.h"
3993 #undef STRUCT
3994 #undef STRUCT_SPECIAL
3995 
3996 typedef struct IOCTLEntry IOCTLEntry;
3997 
3998 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3999                              int fd, int cmd, abi_long arg);
4000 
4001 struct IOCTLEntry {
4002     int target_cmd;
4003     unsigned int host_cmd;
4004     const char *name;
4005     int access;
4006     do_ioctl_fn *do_ioctl;
4007     const argtype arg_type[5];
4008 };
4009 
4010 #define IOC_R 0x0001
4011 #define IOC_W 0x0002
4012 #define IOC_RW (IOC_R | IOC_W)
4013 
4014 #define MAX_STRUCT_SIZE 4096
4015 
4016 #ifdef CONFIG_FIEMAP
4017 /* So fiemap access checks don't overflow on 32 bit systems.
4018  * This is very slightly smaller than the limit imposed by
4019  * the underlying kernel.
4020  */
4021 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4022                             / sizeof(struct fiemap_extent))
4023 
4024 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4025                                        int fd, int cmd, abi_long arg)
4026 {
4027     /* The parameter for this ioctl is a struct fiemap followed
4028      * by an array of struct fiemap_extent whose size is set
4029      * in fiemap->fm_extent_count. The array is filled in by the
4030      * ioctl.
4031      */
4032     int target_size_in, target_size_out;
4033     struct fiemap *fm;
4034     const argtype *arg_type = ie->arg_type;
4035     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4036     void *argptr, *p;
4037     abi_long ret;
4038     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4039     uint32_t outbufsz;
4040     int free_fm = 0;
4041 
4042     assert(arg_type[0] == TYPE_PTR);
4043     assert(ie->access == IOC_RW);
4044     arg_type++;
4045     target_size_in = thunk_type_size(arg_type, 0);
4046     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4047     if (!argptr) {
4048         return -TARGET_EFAULT;
4049     }
4050     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4051     unlock_user(argptr, arg, 0);
4052     fm = (struct fiemap *)buf_temp;
4053     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4054         return -TARGET_EINVAL;
4055     }
4056 
4057     outbufsz = sizeof (*fm) +
4058         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4059 
4060     if (outbufsz > MAX_STRUCT_SIZE) {
4061         /* We can't fit all the extents into the fixed size buffer.
4062          * Allocate one that is large enough and use it instead.
4063          */
4064         fm = g_try_malloc(outbufsz);
4065         if (!fm) {
4066             return -TARGET_ENOMEM;
4067         }
4068         memcpy(fm, buf_temp, sizeof(struct fiemap));
4069         free_fm = 1;
4070     }
4071     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4072     if (!is_error(ret)) {
4073         target_size_out = target_size_in;
4074         /* An extent_count of 0 means we were only counting the extents
4075          * so there are no structs to copy
4076          */
4077         if (fm->fm_extent_count != 0) {
4078             target_size_out += fm->fm_mapped_extents * extent_size;
4079         }
4080         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4081         if (!argptr) {
4082             ret = -TARGET_EFAULT;
4083         } else {
4084             /* Convert the struct fiemap */
4085             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4086             if (fm->fm_extent_count != 0) {
4087                 p = argptr + target_size_in;
4088                 /* ...and then all the struct fiemap_extents */
4089                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4090                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4091                                   THUNK_TARGET);
4092                     p += extent_size;
4093                 }
4094             }
4095             unlock_user(argptr, arg, target_size_out);
4096         }
4097     }
4098     if (free_fm) {
4099         g_free(fm);
4100     }
4101     return ret;
4102 }
4103 #endif
4104 
4105 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4106                                 int fd, int cmd, abi_long arg)
4107 {
4108     const argtype *arg_type = ie->arg_type;
4109     int target_size;
4110     void *argptr;
4111     int ret;
4112     struct ifconf *host_ifconf;
4113     uint32_t outbufsz;
4114     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4115     int target_ifreq_size;
4116     int nb_ifreq;
4117     int free_buf = 0;
4118     int i;
4119     int target_ifc_len;
4120     abi_long target_ifc_buf;
4121     int host_ifc_len;
4122     char *host_ifc_buf;
4123 
4124     assert(arg_type[0] == TYPE_PTR);
4125     assert(ie->access == IOC_RW);
4126 
4127     arg_type++;
4128     target_size = thunk_type_size(arg_type, 0);
4129 
4130     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4131     if (!argptr)
4132         return -TARGET_EFAULT;
4133     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4134     unlock_user(argptr, arg, 0);
4135 
4136     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4137     target_ifc_len = host_ifconf->ifc_len;
4138     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4139 
4140     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4141     nb_ifreq = target_ifc_len / target_ifreq_size;
4142     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4143 
4144     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4145     if (outbufsz > MAX_STRUCT_SIZE) {
4146         /* We can't fit all the extents into the fixed size buffer.
4147          * Allocate one that is large enough and use it instead.
4148          */
4149         host_ifconf = malloc(outbufsz);
4150         if (!host_ifconf) {
4151             return -TARGET_ENOMEM;
4152         }
4153         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4154         free_buf = 1;
4155     }
4156     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4157 
4158     host_ifconf->ifc_len = host_ifc_len;
4159     host_ifconf->ifc_buf = host_ifc_buf;
4160 
4161     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4162     if (!is_error(ret)) {
4163 	/* convert host ifc_len to target ifc_len */
4164 
4165         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4166         target_ifc_len = nb_ifreq * target_ifreq_size;
4167         host_ifconf->ifc_len = target_ifc_len;
4168 
4169 	/* restore target ifc_buf */
4170 
4171         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4172 
4173 	/* copy struct ifconf to target user */
4174 
4175         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4176         if (!argptr)
4177             return -TARGET_EFAULT;
4178         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4179         unlock_user(argptr, arg, target_size);
4180 
4181 	/* copy ifreq[] to target user */
4182 
4183         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4184         for (i = 0; i < nb_ifreq ; i++) {
4185             thunk_convert(argptr + i * target_ifreq_size,
4186                           host_ifc_buf + i * sizeof(struct ifreq),
4187                           ifreq_arg_type, THUNK_TARGET);
4188         }
4189         unlock_user(argptr, target_ifc_buf, target_ifc_len);
4190     }
4191 
4192     if (free_buf) {
4193         free(host_ifconf);
4194     }
4195 
4196     return ret;
4197 }
4198 
4199 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4200                             int cmd, abi_long arg)
4201 {
4202     void *argptr;
4203     struct dm_ioctl *host_dm;
4204     abi_long guest_data;
4205     uint32_t guest_data_size;
4206     int target_size;
4207     const argtype *arg_type = ie->arg_type;
4208     abi_long ret;
4209     void *big_buf = NULL;
4210     char *host_data;
4211 
4212     arg_type++;
4213     target_size = thunk_type_size(arg_type, 0);
4214     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4215     if (!argptr) {
4216         ret = -TARGET_EFAULT;
4217         goto out;
4218     }
4219     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4220     unlock_user(argptr, arg, 0);
4221 
4222     /* buf_temp is too small, so fetch things into a bigger buffer */
4223     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4224     memcpy(big_buf, buf_temp, target_size);
4225     buf_temp = big_buf;
4226     host_dm = big_buf;
4227 
4228     guest_data = arg + host_dm->data_start;
4229     if ((guest_data - arg) < 0) {
4230         ret = -TARGET_EINVAL;
4231         goto out;
4232     }
4233     guest_data_size = host_dm->data_size - host_dm->data_start;
4234     host_data = (char*)host_dm + host_dm->data_start;
4235 
4236     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4237     if (!argptr) {
4238         ret = -TARGET_EFAULT;
4239         goto out;
4240     }
4241 
4242     switch (ie->host_cmd) {
4243     case DM_REMOVE_ALL:
4244     case DM_LIST_DEVICES:
4245     case DM_DEV_CREATE:
4246     case DM_DEV_REMOVE:
4247     case DM_DEV_SUSPEND:
4248     case DM_DEV_STATUS:
4249     case DM_DEV_WAIT:
4250     case DM_TABLE_STATUS:
4251     case DM_TABLE_CLEAR:
4252     case DM_TABLE_DEPS:
4253     case DM_LIST_VERSIONS:
4254         /* no input data */
4255         break;
4256     case DM_DEV_RENAME:
4257     case DM_DEV_SET_GEOMETRY:
4258         /* data contains only strings */
4259         memcpy(host_data, argptr, guest_data_size);
4260         break;
4261     case DM_TARGET_MSG:
4262         memcpy(host_data, argptr, guest_data_size);
4263         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4264         break;
4265     case DM_TABLE_LOAD:
4266     {
4267         void *gspec = argptr;
4268         void *cur_data = host_data;
4269         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4270         int spec_size = thunk_type_size(arg_type, 0);
4271         int i;
4272 
4273         for (i = 0; i < host_dm->target_count; i++) {
4274             struct dm_target_spec *spec = cur_data;
4275             uint32_t next;
4276             int slen;
4277 
4278             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4279             slen = strlen((char*)gspec + spec_size) + 1;
4280             next = spec->next;
4281             spec->next = sizeof(*spec) + slen;
4282             strcpy((char*)&spec[1], gspec + spec_size);
4283             gspec += next;
4284             cur_data += spec->next;
4285         }
4286         break;
4287     }
4288     default:
4289         ret = -TARGET_EINVAL;
4290         unlock_user(argptr, guest_data, 0);
4291         goto out;
4292     }
4293     unlock_user(argptr, guest_data, 0);
4294 
4295     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4296     if (!is_error(ret)) {
4297         guest_data = arg + host_dm->data_start;
4298         guest_data_size = host_dm->data_size - host_dm->data_start;
4299         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4300         switch (ie->host_cmd) {
4301         case DM_REMOVE_ALL:
4302         case DM_DEV_CREATE:
4303         case DM_DEV_REMOVE:
4304         case DM_DEV_RENAME:
4305         case DM_DEV_SUSPEND:
4306         case DM_DEV_STATUS:
4307         case DM_TABLE_LOAD:
4308         case DM_TABLE_CLEAR:
4309         case DM_TARGET_MSG:
4310         case DM_DEV_SET_GEOMETRY:
4311             /* no return data */
4312             break;
4313         case DM_LIST_DEVICES:
4314         {
4315             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4316             uint32_t remaining_data = guest_data_size;
4317             void *cur_data = argptr;
4318             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4319             int nl_size = 12; /* can't use thunk_size due to alignment */
4320 
4321             while (1) {
4322                 uint32_t next = nl->next;
4323                 if (next) {
4324                     nl->next = nl_size + (strlen(nl->name) + 1);
4325                 }
4326                 if (remaining_data < nl->next) {
4327                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4328                     break;
4329                 }
4330                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4331                 strcpy(cur_data + nl_size, nl->name);
4332                 cur_data += nl->next;
4333                 remaining_data -= nl->next;
4334                 if (!next) {
4335                     break;
4336                 }
4337                 nl = (void*)nl + next;
4338             }
4339             break;
4340         }
4341         case DM_DEV_WAIT:
4342         case DM_TABLE_STATUS:
4343         {
4344             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4345             void *cur_data = argptr;
4346             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4347             int spec_size = thunk_type_size(arg_type, 0);
4348             int i;
4349 
4350             for (i = 0; i < host_dm->target_count; i++) {
4351                 uint32_t next = spec->next;
4352                 int slen = strlen((char*)&spec[1]) + 1;
4353                 spec->next = (cur_data - argptr) + spec_size + slen;
4354                 if (guest_data_size < spec->next) {
4355                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4356                     break;
4357                 }
4358                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4359                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4360                 cur_data = argptr + spec->next;
4361                 spec = (void*)host_dm + host_dm->data_start + next;
4362             }
4363             break;
4364         }
4365         case DM_TABLE_DEPS:
4366         {
4367             void *hdata = (void*)host_dm + host_dm->data_start;
4368             int count = *(uint32_t*)hdata;
4369             uint64_t *hdev = hdata + 8;
4370             uint64_t *gdev = argptr + 8;
4371             int i;
4372 
4373             *(uint32_t*)argptr = tswap32(count);
4374             for (i = 0; i < count; i++) {
4375                 *gdev = tswap64(*hdev);
4376                 gdev++;
4377                 hdev++;
4378             }
4379             break;
4380         }
4381         case DM_LIST_VERSIONS:
4382         {
4383             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4384             uint32_t remaining_data = guest_data_size;
4385             void *cur_data = argptr;
4386             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4387             int vers_size = thunk_type_size(arg_type, 0);
4388 
4389             while (1) {
4390                 uint32_t next = vers->next;
4391                 if (next) {
4392                     vers->next = vers_size + (strlen(vers->name) + 1);
4393                 }
4394                 if (remaining_data < vers->next) {
4395                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4396                     break;
4397                 }
4398                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4399                 strcpy(cur_data + vers_size, vers->name);
4400                 cur_data += vers->next;
4401                 remaining_data -= vers->next;
4402                 if (!next) {
4403                     break;
4404                 }
4405                 vers = (void*)vers + next;
4406             }
4407             break;
4408         }
4409         default:
4410             unlock_user(argptr, guest_data, 0);
4411             ret = -TARGET_EINVAL;
4412             goto out;
4413         }
4414         unlock_user(argptr, guest_data, guest_data_size);
4415 
4416         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4417         if (!argptr) {
4418             ret = -TARGET_EFAULT;
4419             goto out;
4420         }
4421         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4422         unlock_user(argptr, arg, target_size);
4423     }
4424 out:
4425     g_free(big_buf);
4426     return ret;
4427 }
4428 
4429 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4430                                int cmd, abi_long arg)
4431 {
4432     void *argptr;
4433     int target_size;
4434     const argtype *arg_type = ie->arg_type;
4435     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4436     abi_long ret;
4437 
4438     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4439     struct blkpg_partition host_part;
4440 
4441     /* Read and convert blkpg */
4442     arg_type++;
4443     target_size = thunk_type_size(arg_type, 0);
4444     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4445     if (!argptr) {
4446         ret = -TARGET_EFAULT;
4447         goto out;
4448     }
4449     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4450     unlock_user(argptr, arg, 0);
4451 
4452     switch (host_blkpg->op) {
4453     case BLKPG_ADD_PARTITION:
4454     case BLKPG_DEL_PARTITION:
4455         /* payload is struct blkpg_partition */
4456         break;
4457     default:
4458         /* Unknown opcode */
4459         ret = -TARGET_EINVAL;
4460         goto out;
4461     }
4462 
4463     /* Read and convert blkpg->data */
4464     arg = (abi_long)(uintptr_t)host_blkpg->data;
4465     target_size = thunk_type_size(part_arg_type, 0);
4466     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4467     if (!argptr) {
4468         ret = -TARGET_EFAULT;
4469         goto out;
4470     }
4471     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4472     unlock_user(argptr, arg, 0);
4473 
4474     /* Swizzle the data pointer to our local copy and call! */
4475     host_blkpg->data = &host_part;
4476     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4477 
4478 out:
4479     return ret;
4480 }
4481 
4482 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4483                                 int fd, int cmd, abi_long arg)
4484 {
4485     const argtype *arg_type = ie->arg_type;
4486     const StructEntry *se;
4487     const argtype *field_types;
4488     const int *dst_offsets, *src_offsets;
4489     int target_size;
4490     void *argptr;
4491     abi_ulong *target_rt_dev_ptr;
4492     unsigned long *host_rt_dev_ptr;
4493     abi_long ret;
4494     int i;
4495 
4496     assert(ie->access == IOC_W);
4497     assert(*arg_type == TYPE_PTR);
4498     arg_type++;
4499     assert(*arg_type == TYPE_STRUCT);
4500     target_size = thunk_type_size(arg_type, 0);
4501     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4502     if (!argptr) {
4503         return -TARGET_EFAULT;
4504     }
4505     arg_type++;
4506     assert(*arg_type == (int)STRUCT_rtentry);
4507     se = struct_entries + *arg_type++;
4508     assert(se->convert[0] == NULL);
4509     /* convert struct here to be able to catch rt_dev string */
4510     field_types = se->field_types;
4511     dst_offsets = se->field_offsets[THUNK_HOST];
4512     src_offsets = se->field_offsets[THUNK_TARGET];
4513     for (i = 0; i < se->nb_fields; i++) {
4514         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4515             assert(*field_types == TYPE_PTRVOID);
4516             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4517             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4518             if (*target_rt_dev_ptr != 0) {
4519                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4520                                                   tswapal(*target_rt_dev_ptr));
4521                 if (!*host_rt_dev_ptr) {
4522                     unlock_user(argptr, arg, 0);
4523                     return -TARGET_EFAULT;
4524                 }
4525             } else {
4526                 *host_rt_dev_ptr = 0;
4527             }
4528             field_types++;
4529             continue;
4530         }
4531         field_types = thunk_convert(buf_temp + dst_offsets[i],
4532                                     argptr + src_offsets[i],
4533                                     field_types, THUNK_HOST);
4534     }
4535     unlock_user(argptr, arg, 0);
4536 
4537     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4538     if (*host_rt_dev_ptr != 0) {
4539         unlock_user((void *)*host_rt_dev_ptr,
4540                     *target_rt_dev_ptr, 0);
4541     }
4542     return ret;
4543 }
4544 
4545 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4546                                      int fd, int cmd, abi_long arg)
4547 {
4548     int sig = target_to_host_signal(arg);
4549     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4550 }
4551 
4552 #ifdef TIOCGPTPEER
4553 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4554                                      int fd, int cmd, abi_long arg)
4555 {
4556     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4557     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4558 }
4559 #endif
4560 
4561 static IOCTLEntry ioctl_entries[] = {
4562 #define IOCTL(cmd, access, ...) \
4563     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4564 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4565     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4566 #define IOCTL_IGNORE(cmd) \
4567     { TARGET_ ## cmd, 0, #cmd },
4568 #include "ioctls.h"
4569     { 0, 0, },
4570 };
4571 
4572 /* ??? Implement proper locking for ioctls.  */
4573 /* do_ioctl() Must return target values and target errnos. */
4574 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4575 {
4576     const IOCTLEntry *ie;
4577     const argtype *arg_type;
4578     abi_long ret;
4579     uint8_t buf_temp[MAX_STRUCT_SIZE];
4580     int target_size;
4581     void *argptr;
4582 
4583     ie = ioctl_entries;
4584     for(;;) {
4585         if (ie->target_cmd == 0) {
4586             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4587             return -TARGET_ENOSYS;
4588         }
4589         if (ie->target_cmd == cmd)
4590             break;
4591         ie++;
4592     }
4593     arg_type = ie->arg_type;
4594     if (ie->do_ioctl) {
4595         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4596     } else if (!ie->host_cmd) {
4597         /* Some architectures define BSD ioctls in their headers
4598            that are not implemented in Linux.  */
4599         return -TARGET_ENOSYS;
4600     }
4601 
4602     switch(arg_type[0]) {
4603     case TYPE_NULL:
4604         /* no argument */
4605         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4606         break;
4607     case TYPE_PTRVOID:
4608     case TYPE_INT:
4609         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4610         break;
4611     case TYPE_PTR:
4612         arg_type++;
4613         target_size = thunk_type_size(arg_type, 0);
4614         switch(ie->access) {
4615         case IOC_R:
4616             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4617             if (!is_error(ret)) {
4618                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4619                 if (!argptr)
4620                     return -TARGET_EFAULT;
4621                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4622                 unlock_user(argptr, arg, target_size);
4623             }
4624             break;
4625         case IOC_W:
4626             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4627             if (!argptr)
4628                 return -TARGET_EFAULT;
4629             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4630             unlock_user(argptr, arg, 0);
4631             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4632             break;
4633         default:
4634         case IOC_RW:
4635             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4636             if (!argptr)
4637                 return -TARGET_EFAULT;
4638             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4639             unlock_user(argptr, arg, 0);
4640             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4641             if (!is_error(ret)) {
4642                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4643                 if (!argptr)
4644                     return -TARGET_EFAULT;
4645                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4646                 unlock_user(argptr, arg, target_size);
4647             }
4648             break;
4649         }
4650         break;
4651     default:
4652         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4653                  (long)cmd, arg_type[0]);
4654         ret = -TARGET_ENOSYS;
4655         break;
4656     }
4657     return ret;
4658 }
4659 
4660 static const bitmask_transtbl iflag_tbl[] = {
4661         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4662         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4663         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4664         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4665         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4666         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4667         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4668         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4669         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4670         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4671         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4672         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4673         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4674         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4675         { 0, 0, 0, 0 }
4676 };
4677 
4678 static const bitmask_transtbl oflag_tbl[] = {
4679 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4680 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4681 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4682 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4683 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4684 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4685 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4686 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4687 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4688 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4689 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4690 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4691 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4692 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4693 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4694 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4695 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4696 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4697 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4698 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4699 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4700 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4701 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4702 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4703 	{ 0, 0, 0, 0 }
4704 };
4705 
4706 static const bitmask_transtbl cflag_tbl[] = {
4707 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4708 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4709 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4710 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4711 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4712 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4713 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4714 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4715 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4716 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4717 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4718 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4719 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4720 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4721 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4722 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4723 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4724 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4725 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4726 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4727 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4728 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4729 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4730 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4731 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4732 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4733 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4734 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4735 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4736 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4737 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4738 	{ 0, 0, 0, 0 }
4739 };
4740 
4741 static const bitmask_transtbl lflag_tbl[] = {
4742 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4743 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4744 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4745 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4746 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4747 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4748 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4749 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4750 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4751 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4752 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4753 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4754 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4755 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4756 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4757 	{ 0, 0, 0, 0 }
4758 };
4759 
4760 static void target_to_host_termios (void *dst, const void *src)
4761 {
4762     struct host_termios *host = dst;
4763     const struct target_termios *target = src;
4764 
4765     host->c_iflag =
4766         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4767     host->c_oflag =
4768         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4769     host->c_cflag =
4770         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4771     host->c_lflag =
4772         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4773     host->c_line = target->c_line;
4774 
4775     memset(host->c_cc, 0, sizeof(host->c_cc));
4776     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4777     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4778     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4779     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4780     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4781     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4782     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4783     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4784     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4785     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4786     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4787     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4788     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4789     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4790     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4791     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4792     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4793 }
4794 
4795 static void host_to_target_termios (void *dst, const void *src)
4796 {
4797     struct target_termios *target = dst;
4798     const struct host_termios *host = src;
4799 
4800     target->c_iflag =
4801         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4802     target->c_oflag =
4803         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4804     target->c_cflag =
4805         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4806     target->c_lflag =
4807         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4808     target->c_line = host->c_line;
4809 
4810     memset(target->c_cc, 0, sizeof(target->c_cc));
4811     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4812     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4813     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4814     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4815     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4816     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4817     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4818     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4819     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4820     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4821     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4822     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4823     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4824     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4825     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4826     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4827     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4828 }
4829 
4830 static const StructEntry struct_termios_def = {
4831     .convert = { host_to_target_termios, target_to_host_termios },
4832     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4833     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4834 };
4835 
4836 static bitmask_transtbl mmap_flags_tbl[] = {
4837     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4838     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4839     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4840     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
4841       MAP_ANONYMOUS, MAP_ANONYMOUS },
4842     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
4843       MAP_GROWSDOWN, MAP_GROWSDOWN },
4844     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
4845       MAP_DENYWRITE, MAP_DENYWRITE },
4846     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
4847       MAP_EXECUTABLE, MAP_EXECUTABLE },
4848     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4849     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
4850       MAP_NORESERVE, MAP_NORESERVE },
4851     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
4852     /* MAP_STACK had been ignored by the kernel for quite some time.
4853        Recognize it for the target insofar as we do not want to pass
4854        it through to the host.  */
4855     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
4856     { 0, 0, 0, 0 }
4857 };
4858 
4859 #if defined(TARGET_I386)
4860 
4861 /* NOTE: there is really one LDT for all the threads */
4862 static uint8_t *ldt_table;
4863 
4864 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4865 {
4866     int size;
4867     void *p;
4868 
4869     if (!ldt_table)
4870         return 0;
4871     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4872     if (size > bytecount)
4873         size = bytecount;
4874     p = lock_user(VERIFY_WRITE, ptr, size, 0);
4875     if (!p)
4876         return -TARGET_EFAULT;
4877     /* ??? Should this by byteswapped?  */
4878     memcpy(p, ldt_table, size);
4879     unlock_user(p, ptr, size);
4880     return size;
4881 }
4882 
4883 /* XXX: add locking support */
4884 static abi_long write_ldt(CPUX86State *env,
4885                           abi_ulong ptr, unsigned long bytecount, int oldmode)
4886 {
4887     struct target_modify_ldt_ldt_s ldt_info;
4888     struct target_modify_ldt_ldt_s *target_ldt_info;
4889     int seg_32bit, contents, read_exec_only, limit_in_pages;
4890     int seg_not_present, useable, lm;
4891     uint32_t *lp, entry_1, entry_2;
4892 
4893     if (bytecount != sizeof(ldt_info))
4894         return -TARGET_EINVAL;
4895     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4896         return -TARGET_EFAULT;
4897     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4898     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4899     ldt_info.limit = tswap32(target_ldt_info->limit);
4900     ldt_info.flags = tswap32(target_ldt_info->flags);
4901     unlock_user_struct(target_ldt_info, ptr, 0);
4902 
4903     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4904         return -TARGET_EINVAL;
4905     seg_32bit = ldt_info.flags & 1;
4906     contents = (ldt_info.flags >> 1) & 3;
4907     read_exec_only = (ldt_info.flags >> 3) & 1;
4908     limit_in_pages = (ldt_info.flags >> 4) & 1;
4909     seg_not_present = (ldt_info.flags >> 5) & 1;
4910     useable = (ldt_info.flags >> 6) & 1;
4911 #ifdef TARGET_ABI32
4912     lm = 0;
4913 #else
4914     lm = (ldt_info.flags >> 7) & 1;
4915 #endif
4916     if (contents == 3) {
4917         if (oldmode)
4918             return -TARGET_EINVAL;
4919         if (seg_not_present == 0)
4920             return -TARGET_EINVAL;
4921     }
4922     /* allocate the LDT */
4923     if (!ldt_table) {
4924         env->ldt.base = target_mmap(0,
4925                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4926                                     PROT_READ|PROT_WRITE,
4927                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4928         if (env->ldt.base == -1)
4929             return -TARGET_ENOMEM;
4930         memset(g2h(env->ldt.base), 0,
4931                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4932         env->ldt.limit = 0xffff;
4933         ldt_table = g2h(env->ldt.base);
4934     }
4935 
4936     /* NOTE: same code as Linux kernel */
4937     /* Allow LDTs to be cleared by the user. */
4938     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4939         if (oldmode ||
4940             (contents == 0		&&
4941              read_exec_only == 1	&&
4942              seg_32bit == 0		&&
4943              limit_in_pages == 0	&&
4944              seg_not_present == 1	&&
4945              useable == 0 )) {
4946             entry_1 = 0;
4947             entry_2 = 0;
4948             goto install;
4949         }
4950     }
4951 
4952     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4953         (ldt_info.limit & 0x0ffff);
4954     entry_2 = (ldt_info.base_addr & 0xff000000) |
4955         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4956         (ldt_info.limit & 0xf0000) |
4957         ((read_exec_only ^ 1) << 9) |
4958         (contents << 10) |
4959         ((seg_not_present ^ 1) << 15) |
4960         (seg_32bit << 22) |
4961         (limit_in_pages << 23) |
4962         (lm << 21) |
4963         0x7000;
4964     if (!oldmode)
4965         entry_2 |= (useable << 20);
4966 
4967     /* Install the new entry ...  */
4968 install:
4969     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4970     lp[0] = tswap32(entry_1);
4971     lp[1] = tswap32(entry_2);
4972     return 0;
4973 }
4974 
4975 /* specific and weird i386 syscalls */
4976 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4977                               unsigned long bytecount)
4978 {
4979     abi_long ret;
4980 
4981     switch (func) {
4982     case 0:
4983         ret = read_ldt(ptr, bytecount);
4984         break;
4985     case 1:
4986         ret = write_ldt(env, ptr, bytecount, 1);
4987         break;
4988     case 0x11:
4989         ret = write_ldt(env, ptr, bytecount, 0);
4990         break;
4991     default:
4992         ret = -TARGET_ENOSYS;
4993         break;
4994     }
4995     return ret;
4996 }
4997 
4998 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4999 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5000 {
5001     uint64_t *gdt_table = g2h(env->gdt.base);
5002     struct target_modify_ldt_ldt_s ldt_info;
5003     struct target_modify_ldt_ldt_s *target_ldt_info;
5004     int seg_32bit, contents, read_exec_only, limit_in_pages;
5005     int seg_not_present, useable, lm;
5006     uint32_t *lp, entry_1, entry_2;
5007     int i;
5008 
5009     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5010     if (!target_ldt_info)
5011         return -TARGET_EFAULT;
5012     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5013     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5014     ldt_info.limit = tswap32(target_ldt_info->limit);
5015     ldt_info.flags = tswap32(target_ldt_info->flags);
5016     if (ldt_info.entry_number == -1) {
5017         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5018             if (gdt_table[i] == 0) {
5019                 ldt_info.entry_number = i;
5020                 target_ldt_info->entry_number = tswap32(i);
5021                 break;
5022             }
5023         }
5024     }
5025     unlock_user_struct(target_ldt_info, ptr, 1);
5026 
5027     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5028         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5029            return -TARGET_EINVAL;
5030     seg_32bit = ldt_info.flags & 1;
5031     contents = (ldt_info.flags >> 1) & 3;
5032     read_exec_only = (ldt_info.flags >> 3) & 1;
5033     limit_in_pages = (ldt_info.flags >> 4) & 1;
5034     seg_not_present = (ldt_info.flags >> 5) & 1;
5035     useable = (ldt_info.flags >> 6) & 1;
5036 #ifdef TARGET_ABI32
5037     lm = 0;
5038 #else
5039     lm = (ldt_info.flags >> 7) & 1;
5040 #endif
5041 
5042     if (contents == 3) {
5043         if (seg_not_present == 0)
5044             return -TARGET_EINVAL;
5045     }
5046 
5047     /* NOTE: same code as Linux kernel */
5048     /* Allow LDTs to be cleared by the user. */
5049     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5050         if ((contents == 0             &&
5051              read_exec_only == 1       &&
5052              seg_32bit == 0            &&
5053              limit_in_pages == 0       &&
5054              seg_not_present == 1      &&
5055              useable == 0 )) {
5056             entry_1 = 0;
5057             entry_2 = 0;
5058             goto install;
5059         }
5060     }
5061 
5062     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5063         (ldt_info.limit & 0x0ffff);
5064     entry_2 = (ldt_info.base_addr & 0xff000000) |
5065         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5066         (ldt_info.limit & 0xf0000) |
5067         ((read_exec_only ^ 1) << 9) |
5068         (contents << 10) |
5069         ((seg_not_present ^ 1) << 15) |
5070         (seg_32bit << 22) |
5071         (limit_in_pages << 23) |
5072         (useable << 20) |
5073         (lm << 21) |
5074         0x7000;
5075 
5076     /* Install the new entry ...  */
5077 install:
5078     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5079     lp[0] = tswap32(entry_1);
5080     lp[1] = tswap32(entry_2);
5081     return 0;
5082 }
5083 
5084 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5085 {
5086     struct target_modify_ldt_ldt_s *target_ldt_info;
5087     uint64_t *gdt_table = g2h(env->gdt.base);
5088     uint32_t base_addr, limit, flags;
5089     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5090     int seg_not_present, useable, lm;
5091     uint32_t *lp, entry_1, entry_2;
5092 
5093     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5094     if (!target_ldt_info)
5095         return -TARGET_EFAULT;
5096     idx = tswap32(target_ldt_info->entry_number);
5097     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5098         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5099         unlock_user_struct(target_ldt_info, ptr, 1);
5100         return -TARGET_EINVAL;
5101     }
5102     lp = (uint32_t *)(gdt_table + idx);
5103     entry_1 = tswap32(lp[0]);
5104     entry_2 = tswap32(lp[1]);
5105 
5106     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5107     contents = (entry_2 >> 10) & 3;
5108     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5109     seg_32bit = (entry_2 >> 22) & 1;
5110     limit_in_pages = (entry_2 >> 23) & 1;
5111     useable = (entry_2 >> 20) & 1;
5112 #ifdef TARGET_ABI32
5113     lm = 0;
5114 #else
5115     lm = (entry_2 >> 21) & 1;
5116 #endif
5117     flags = (seg_32bit << 0) | (contents << 1) |
5118         (read_exec_only << 3) | (limit_in_pages << 4) |
5119         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5120     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5121     base_addr = (entry_1 >> 16) |
5122         (entry_2 & 0xff000000) |
5123         ((entry_2 & 0xff) << 16);
5124     target_ldt_info->base_addr = tswapal(base_addr);
5125     target_ldt_info->limit = tswap32(limit);
5126     target_ldt_info->flags = tswap32(flags);
5127     unlock_user_struct(target_ldt_info, ptr, 1);
5128     return 0;
5129 }
5130 #endif /* TARGET_I386 && TARGET_ABI32 */
5131 
5132 #ifndef TARGET_ABI32
5133 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5134 {
5135     abi_long ret = 0;
5136     abi_ulong val;
5137     int idx;
5138 
5139     switch(code) {
5140     case TARGET_ARCH_SET_GS:
5141     case TARGET_ARCH_SET_FS:
5142         if (code == TARGET_ARCH_SET_GS)
5143             idx = R_GS;
5144         else
5145             idx = R_FS;
5146         cpu_x86_load_seg(env, idx, 0);
5147         env->segs[idx].base = addr;
5148         break;
5149     case TARGET_ARCH_GET_GS:
5150     case TARGET_ARCH_GET_FS:
5151         if (code == TARGET_ARCH_GET_GS)
5152             idx = R_GS;
5153         else
5154             idx = R_FS;
5155         val = env->segs[idx].base;
5156         if (put_user(val, addr, abi_ulong))
5157             ret = -TARGET_EFAULT;
5158         break;
5159     default:
5160         ret = -TARGET_EINVAL;
5161         break;
5162     }
5163     return ret;
5164 }
5165 #endif
5166 
5167 #endif /* defined(TARGET_I386) */
5168 
5169 #define NEW_STACK_SIZE 0x40000
5170 
5171 
5172 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5173 typedef struct {
5174     CPUArchState *env;
5175     pthread_mutex_t mutex;
5176     pthread_cond_t cond;
5177     pthread_t thread;
5178     uint32_t tid;
5179     abi_ulong child_tidptr;
5180     abi_ulong parent_tidptr;
5181     sigset_t sigmask;
5182 } new_thread_info;
5183 
5184 static void *clone_func(void *arg)
5185 {
5186     new_thread_info *info = arg;
5187     CPUArchState *env;
5188     CPUState *cpu;
5189     TaskState *ts;
5190 
5191     rcu_register_thread();
5192     tcg_register_thread();
5193     env = info->env;
5194     cpu = ENV_GET_CPU(env);
5195     thread_cpu = cpu;
5196     ts = (TaskState *)cpu->opaque;
5197     info->tid = gettid();
5198     task_settid(ts);
5199     if (info->child_tidptr)
5200         put_user_u32(info->tid, info->child_tidptr);
5201     if (info->parent_tidptr)
5202         put_user_u32(info->tid, info->parent_tidptr);
5203     /* Enable signals.  */
5204     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5205     /* Signal to the parent that we're ready.  */
5206     pthread_mutex_lock(&info->mutex);
5207     pthread_cond_broadcast(&info->cond);
5208     pthread_mutex_unlock(&info->mutex);
5209     /* Wait until the parent has finished initializing the tls state.  */
5210     pthread_mutex_lock(&clone_lock);
5211     pthread_mutex_unlock(&clone_lock);
5212     cpu_loop(env);
5213     /* never exits */
5214     return NULL;
5215 }
5216 
5217 /* do_fork() Must return host values and target errnos (unlike most
5218    do_*() functions). */
5219 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5220                    abi_ulong parent_tidptr, target_ulong newtls,
5221                    abi_ulong child_tidptr)
5222 {
5223     CPUState *cpu = ENV_GET_CPU(env);
5224     int ret;
5225     TaskState *ts;
5226     CPUState *new_cpu;
5227     CPUArchState *new_env;
5228     sigset_t sigmask;
5229 
5230     flags &= ~CLONE_IGNORED_FLAGS;
5231 
5232     /* Emulate vfork() with fork() */
5233     if (flags & CLONE_VFORK)
5234         flags &= ~(CLONE_VFORK | CLONE_VM);
5235 
5236     if (flags & CLONE_VM) {
5237         TaskState *parent_ts = (TaskState *)cpu->opaque;
5238         new_thread_info info;
5239         pthread_attr_t attr;
5240 
5241         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5242             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5243             return -TARGET_EINVAL;
5244         }
5245 
5246         ts = g_new0(TaskState, 1);
5247         init_task_state(ts);
5248 
5249         /* Grab a mutex so that thread setup appears atomic.  */
5250         pthread_mutex_lock(&clone_lock);
5251 
5252         /* we create a new CPU instance. */
5253         new_env = cpu_copy(env);
5254         /* Init regs that differ from the parent.  */
5255         cpu_clone_regs(new_env, newsp);
5256         new_cpu = ENV_GET_CPU(new_env);
5257         new_cpu->opaque = ts;
5258         ts->bprm = parent_ts->bprm;
5259         ts->info = parent_ts->info;
5260         ts->signal_mask = parent_ts->signal_mask;
5261 
5262         if (flags & CLONE_CHILD_CLEARTID) {
5263             ts->child_tidptr = child_tidptr;
5264         }
5265 
5266         if (flags & CLONE_SETTLS) {
5267             cpu_set_tls (new_env, newtls);
5268         }
5269 
5270         memset(&info, 0, sizeof(info));
5271         pthread_mutex_init(&info.mutex, NULL);
5272         pthread_mutex_lock(&info.mutex);
5273         pthread_cond_init(&info.cond, NULL);
5274         info.env = new_env;
5275         if (flags & CLONE_CHILD_SETTID) {
5276             info.child_tidptr = child_tidptr;
5277         }
5278         if (flags & CLONE_PARENT_SETTID) {
5279             info.parent_tidptr = parent_tidptr;
5280         }
5281 
5282         ret = pthread_attr_init(&attr);
5283         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5284         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5285         /* It is not safe to deliver signals until the child has finished
5286            initializing, so temporarily block all signals.  */
5287         sigfillset(&sigmask);
5288         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5289 
5290         /* If this is our first additional thread, we need to ensure we
5291          * generate code for parallel execution and flush old translations.
5292          */
5293         if (!parallel_cpus) {
5294             parallel_cpus = true;
5295             tb_flush(cpu);
5296         }
5297 
5298         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5299         /* TODO: Free new CPU state if thread creation failed.  */
5300 
5301         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5302         pthread_attr_destroy(&attr);
5303         if (ret == 0) {
5304             /* Wait for the child to initialize.  */
5305             pthread_cond_wait(&info.cond, &info.mutex);
5306             ret = info.tid;
5307         } else {
5308             ret = -1;
5309         }
5310         pthread_mutex_unlock(&info.mutex);
5311         pthread_cond_destroy(&info.cond);
5312         pthread_mutex_destroy(&info.mutex);
5313         pthread_mutex_unlock(&clone_lock);
5314     } else {
5315         /* if no CLONE_VM, we consider it is a fork */
5316         if (flags & CLONE_INVALID_FORK_FLAGS) {
5317             return -TARGET_EINVAL;
5318         }
5319 
5320         /* We can't support custom termination signals */
5321         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5322             return -TARGET_EINVAL;
5323         }
5324 
5325         if (block_signals()) {
5326             return -TARGET_ERESTARTSYS;
5327         }
5328 
5329         fork_start();
5330         ret = fork();
5331         if (ret == 0) {
5332             /* Child Process.  */
5333             cpu_clone_regs(env, newsp);
5334             fork_end(1);
5335             /* There is a race condition here.  The parent process could
5336                theoretically read the TID in the child process before the child
5337                tid is set.  This would require using either ptrace
5338                (not implemented) or having *_tidptr to point at a shared memory
5339                mapping.  We can't repeat the spinlock hack used above because
5340                the child process gets its own copy of the lock.  */
5341             if (flags & CLONE_CHILD_SETTID)
5342                 put_user_u32(gettid(), child_tidptr);
5343             if (flags & CLONE_PARENT_SETTID)
5344                 put_user_u32(gettid(), parent_tidptr);
5345             ts = (TaskState *)cpu->opaque;
5346             if (flags & CLONE_SETTLS)
5347                 cpu_set_tls (env, newtls);
5348             if (flags & CLONE_CHILD_CLEARTID)
5349                 ts->child_tidptr = child_tidptr;
5350         } else {
5351             fork_end(0);
5352         }
5353     }
5354     return ret;
5355 }
5356 
5357 /* warning : doesn't handle linux specific flags... */
5358 static int target_to_host_fcntl_cmd(int cmd)
5359 {
5360     int ret;
5361 
5362     switch(cmd) {
5363     case TARGET_F_DUPFD:
5364     case TARGET_F_GETFD:
5365     case TARGET_F_SETFD:
5366     case TARGET_F_GETFL:
5367     case TARGET_F_SETFL:
5368         ret = cmd;
5369         break;
5370     case TARGET_F_GETLK:
5371         ret = F_GETLK64;
5372         break;
5373     case TARGET_F_SETLK:
5374         ret = F_SETLK64;
5375         break;
5376     case TARGET_F_SETLKW:
5377         ret = F_SETLKW64;
5378         break;
5379     case TARGET_F_GETOWN:
5380         ret = F_GETOWN;
5381         break;
5382     case TARGET_F_SETOWN:
5383         ret = F_SETOWN;
5384         break;
5385     case TARGET_F_GETSIG:
5386         ret = F_GETSIG;
5387         break;
5388     case TARGET_F_SETSIG:
5389         ret = F_SETSIG;
5390         break;
5391 #if TARGET_ABI_BITS == 32
5392     case TARGET_F_GETLK64:
5393         ret = F_GETLK64;
5394         break;
5395     case TARGET_F_SETLK64:
5396         ret = F_SETLK64;
5397         break;
5398     case TARGET_F_SETLKW64:
5399         ret = F_SETLKW64;
5400         break;
5401 #endif
5402     case TARGET_F_SETLEASE:
5403         ret = F_SETLEASE;
5404         break;
5405     case TARGET_F_GETLEASE:
5406         ret = F_GETLEASE;
5407         break;
5408 #ifdef F_DUPFD_CLOEXEC
5409     case TARGET_F_DUPFD_CLOEXEC:
5410         ret = F_DUPFD_CLOEXEC;
5411         break;
5412 #endif
5413     case TARGET_F_NOTIFY:
5414         ret = F_NOTIFY;
5415         break;
5416 #ifdef F_GETOWN_EX
5417     case TARGET_F_GETOWN_EX:
5418         ret = F_GETOWN_EX;
5419         break;
5420 #endif
5421 #ifdef F_SETOWN_EX
5422     case TARGET_F_SETOWN_EX:
5423         ret = F_SETOWN_EX;
5424         break;
5425 #endif
5426 #ifdef F_SETPIPE_SZ
5427     case TARGET_F_SETPIPE_SZ:
5428         ret = F_SETPIPE_SZ;
5429         break;
5430     case TARGET_F_GETPIPE_SZ:
5431         ret = F_GETPIPE_SZ;
5432         break;
5433 #endif
5434     default:
5435         ret = -TARGET_EINVAL;
5436         break;
5437     }
5438 
5439 #if defined(__powerpc64__)
5440     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5441      * is not supported by kernel. The glibc fcntl call actually adjusts
5442      * them to 5, 6 and 7 before making the syscall(). Since we make the
5443      * syscall directly, adjust to what is supported by the kernel.
5444      */
5445     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5446         ret -= F_GETLK64 - 5;
5447     }
5448 #endif
5449 
5450     return ret;
5451 }
5452 
5453 #define FLOCK_TRANSTBL \
5454     switch (type) { \
5455     TRANSTBL_CONVERT(F_RDLCK); \
5456     TRANSTBL_CONVERT(F_WRLCK); \
5457     TRANSTBL_CONVERT(F_UNLCK); \
5458     TRANSTBL_CONVERT(F_EXLCK); \
5459     TRANSTBL_CONVERT(F_SHLCK); \
5460     }
5461 
5462 static int target_to_host_flock(int type)
5463 {
5464 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5465     FLOCK_TRANSTBL
5466 #undef  TRANSTBL_CONVERT
5467     return -TARGET_EINVAL;
5468 }
5469 
5470 static int host_to_target_flock(int type)
5471 {
5472 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5473     FLOCK_TRANSTBL
5474 #undef  TRANSTBL_CONVERT
5475     /* if we don't know how to convert the value coming
5476      * from the host we copy to the target field as-is
5477      */
5478     return type;
5479 }
5480 
5481 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5482                                             abi_ulong target_flock_addr)
5483 {
5484     struct target_flock *target_fl;
5485     int l_type;
5486 
5487     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5488         return -TARGET_EFAULT;
5489     }
5490 
5491     __get_user(l_type, &target_fl->l_type);
5492     l_type = target_to_host_flock(l_type);
5493     if (l_type < 0) {
5494         return l_type;
5495     }
5496     fl->l_type = l_type;
5497     __get_user(fl->l_whence, &target_fl->l_whence);
5498     __get_user(fl->l_start, &target_fl->l_start);
5499     __get_user(fl->l_len, &target_fl->l_len);
5500     __get_user(fl->l_pid, &target_fl->l_pid);
5501     unlock_user_struct(target_fl, target_flock_addr, 0);
5502     return 0;
5503 }
5504 
5505 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5506                                           const struct flock64 *fl)
5507 {
5508     struct target_flock *target_fl;
5509     short l_type;
5510 
5511     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5512         return -TARGET_EFAULT;
5513     }
5514 
5515     l_type = host_to_target_flock(fl->l_type);
5516     __put_user(l_type, &target_fl->l_type);
5517     __put_user(fl->l_whence, &target_fl->l_whence);
5518     __put_user(fl->l_start, &target_fl->l_start);
5519     __put_user(fl->l_len, &target_fl->l_len);
5520     __put_user(fl->l_pid, &target_fl->l_pid);
5521     unlock_user_struct(target_fl, target_flock_addr, 1);
5522     return 0;
5523 }
5524 
5525 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5526 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5527 
5528 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5529 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5530                                                    abi_ulong target_flock_addr)
5531 {
5532     struct target_oabi_flock64 *target_fl;
5533     int l_type;
5534 
5535     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5536         return -TARGET_EFAULT;
5537     }
5538 
5539     __get_user(l_type, &target_fl->l_type);
5540     l_type = target_to_host_flock(l_type);
5541     if (l_type < 0) {
5542         return l_type;
5543     }
5544     fl->l_type = l_type;
5545     __get_user(fl->l_whence, &target_fl->l_whence);
5546     __get_user(fl->l_start, &target_fl->l_start);
5547     __get_user(fl->l_len, &target_fl->l_len);
5548     __get_user(fl->l_pid, &target_fl->l_pid);
5549     unlock_user_struct(target_fl, target_flock_addr, 0);
5550     return 0;
5551 }
5552 
5553 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5554                                                  const struct flock64 *fl)
5555 {
5556     struct target_oabi_flock64 *target_fl;
5557     short l_type;
5558 
5559     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5560         return -TARGET_EFAULT;
5561     }
5562 
5563     l_type = host_to_target_flock(fl->l_type);
5564     __put_user(l_type, &target_fl->l_type);
5565     __put_user(fl->l_whence, &target_fl->l_whence);
5566     __put_user(fl->l_start, &target_fl->l_start);
5567     __put_user(fl->l_len, &target_fl->l_len);
5568     __put_user(fl->l_pid, &target_fl->l_pid);
5569     unlock_user_struct(target_fl, target_flock_addr, 1);
5570     return 0;
5571 }
5572 #endif
5573 
5574 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5575                                               abi_ulong target_flock_addr)
5576 {
5577     struct target_flock64 *target_fl;
5578     int l_type;
5579 
5580     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5581         return -TARGET_EFAULT;
5582     }
5583 
5584     __get_user(l_type, &target_fl->l_type);
5585     l_type = target_to_host_flock(l_type);
5586     if (l_type < 0) {
5587         return l_type;
5588     }
5589     fl->l_type = l_type;
5590     __get_user(fl->l_whence, &target_fl->l_whence);
5591     __get_user(fl->l_start, &target_fl->l_start);
5592     __get_user(fl->l_len, &target_fl->l_len);
5593     __get_user(fl->l_pid, &target_fl->l_pid);
5594     unlock_user_struct(target_fl, target_flock_addr, 0);
5595     return 0;
5596 }
5597 
5598 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5599                                             const struct flock64 *fl)
5600 {
5601     struct target_flock64 *target_fl;
5602     short l_type;
5603 
5604     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5605         return -TARGET_EFAULT;
5606     }
5607 
5608     l_type = host_to_target_flock(fl->l_type);
5609     __put_user(l_type, &target_fl->l_type);
5610     __put_user(fl->l_whence, &target_fl->l_whence);
5611     __put_user(fl->l_start, &target_fl->l_start);
5612     __put_user(fl->l_len, &target_fl->l_len);
5613     __put_user(fl->l_pid, &target_fl->l_pid);
5614     unlock_user_struct(target_fl, target_flock_addr, 1);
5615     return 0;
5616 }
5617 
5618 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5619 {
5620     struct flock64 fl64;
5621 #ifdef F_GETOWN_EX
5622     struct f_owner_ex fox;
5623     struct target_f_owner_ex *target_fox;
5624 #endif
5625     abi_long ret;
5626     int host_cmd = target_to_host_fcntl_cmd(cmd);
5627 
5628     if (host_cmd == -TARGET_EINVAL)
5629 	    return host_cmd;
5630 
5631     switch(cmd) {
5632     case TARGET_F_GETLK:
5633         ret = copy_from_user_flock(&fl64, arg);
5634         if (ret) {
5635             return ret;
5636         }
5637         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5638         if (ret == 0) {
5639             ret = copy_to_user_flock(arg, &fl64);
5640         }
5641         break;
5642 
5643     case TARGET_F_SETLK:
5644     case TARGET_F_SETLKW:
5645         ret = copy_from_user_flock(&fl64, arg);
5646         if (ret) {
5647             return ret;
5648         }
5649         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5650         break;
5651 
5652     case TARGET_F_GETLK64:
5653         ret = copy_from_user_flock64(&fl64, arg);
5654         if (ret) {
5655             return ret;
5656         }
5657         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5658         if (ret == 0) {
5659             ret = copy_to_user_flock64(arg, &fl64);
5660         }
5661         break;
5662     case TARGET_F_SETLK64:
5663     case TARGET_F_SETLKW64:
5664         ret = copy_from_user_flock64(&fl64, arg);
5665         if (ret) {
5666             return ret;
5667         }
5668         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5669         break;
5670 
5671     case TARGET_F_GETFL:
5672         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5673         if (ret >= 0) {
5674             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5675         }
5676         break;
5677 
5678     case TARGET_F_SETFL:
5679         ret = get_errno(safe_fcntl(fd, host_cmd,
5680                                    target_to_host_bitmask(arg,
5681                                                           fcntl_flags_tbl)));
5682         break;
5683 
5684 #ifdef F_GETOWN_EX
5685     case TARGET_F_GETOWN_EX:
5686         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5687         if (ret >= 0) {
5688             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5689                 return -TARGET_EFAULT;
5690             target_fox->type = tswap32(fox.type);
5691             target_fox->pid = tswap32(fox.pid);
5692             unlock_user_struct(target_fox, arg, 1);
5693         }
5694         break;
5695 #endif
5696 
5697 #ifdef F_SETOWN_EX
5698     case TARGET_F_SETOWN_EX:
5699         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5700             return -TARGET_EFAULT;
5701         fox.type = tswap32(target_fox->type);
5702         fox.pid = tswap32(target_fox->pid);
5703         unlock_user_struct(target_fox, arg, 0);
5704         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5705         break;
5706 #endif
5707 
5708     case TARGET_F_SETOWN:
5709     case TARGET_F_GETOWN:
5710     case TARGET_F_SETSIG:
5711     case TARGET_F_GETSIG:
5712     case TARGET_F_SETLEASE:
5713     case TARGET_F_GETLEASE:
5714     case TARGET_F_SETPIPE_SZ:
5715     case TARGET_F_GETPIPE_SZ:
5716         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5717         break;
5718 
5719     default:
5720         ret = get_errno(safe_fcntl(fd, cmd, arg));
5721         break;
5722     }
5723     return ret;
5724 }
5725 
5726 #ifdef USE_UID16
5727 
5728 static inline int high2lowuid(int uid)
5729 {
5730     if (uid > 65535)
5731         return 65534;
5732     else
5733         return uid;
5734 }
5735 
5736 static inline int high2lowgid(int gid)
5737 {
5738     if (gid > 65535)
5739         return 65534;
5740     else
5741         return gid;
5742 }
5743 
5744 static inline int low2highuid(int uid)
5745 {
5746     if ((int16_t)uid == -1)
5747         return -1;
5748     else
5749         return uid;
5750 }
5751 
5752 static inline int low2highgid(int gid)
5753 {
5754     if ((int16_t)gid == -1)
5755         return -1;
5756     else
5757         return gid;
5758 }
5759 static inline int tswapid(int id)
5760 {
5761     return tswap16(id);
5762 }
5763 
5764 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5765 
5766 #else /* !USE_UID16 */
5767 static inline int high2lowuid(int uid)
5768 {
5769     return uid;
5770 }
5771 static inline int high2lowgid(int gid)
5772 {
5773     return gid;
5774 }
5775 static inline int low2highuid(int uid)
5776 {
5777     return uid;
5778 }
5779 static inline int low2highgid(int gid)
5780 {
5781     return gid;
5782 }
5783 static inline int tswapid(int id)
5784 {
5785     return tswap32(id);
5786 }
5787 
5788 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5789 
5790 #endif /* USE_UID16 */
5791 
5792 /* We must do direct syscalls for setting UID/GID, because we want to
5793  * implement the Linux system call semantics of "change only for this thread",
5794  * not the libc/POSIX semantics of "change for all threads in process".
5795  * (See http://ewontfix.com/17/ for more details.)
5796  * We use the 32-bit version of the syscalls if present; if it is not
5797  * then either the host architecture supports 32-bit UIDs natively with
5798  * the standard syscall, or the 16-bit UID is the best we can do.
5799  */
5800 #ifdef __NR_setuid32
5801 #define __NR_sys_setuid __NR_setuid32
5802 #else
5803 #define __NR_sys_setuid __NR_setuid
5804 #endif
5805 #ifdef __NR_setgid32
5806 #define __NR_sys_setgid __NR_setgid32
5807 #else
5808 #define __NR_sys_setgid __NR_setgid
5809 #endif
5810 #ifdef __NR_setresuid32
5811 #define __NR_sys_setresuid __NR_setresuid32
5812 #else
5813 #define __NR_sys_setresuid __NR_setresuid
5814 #endif
5815 #ifdef __NR_setresgid32
5816 #define __NR_sys_setresgid __NR_setresgid32
5817 #else
5818 #define __NR_sys_setresgid __NR_setresgid
5819 #endif
5820 
5821 _syscall1(int, sys_setuid, uid_t, uid)
5822 _syscall1(int, sys_setgid, gid_t, gid)
5823 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5824 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5825 
5826 void syscall_init(void)
5827 {
5828     IOCTLEntry *ie;
5829     const argtype *arg_type;
5830     int size;
5831     int i;
5832 
5833     thunk_init(STRUCT_MAX);
5834 
5835 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5836 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5837 #include "syscall_types.h"
5838 #undef STRUCT
5839 #undef STRUCT_SPECIAL
5840 
5841     /* Build target_to_host_errno_table[] table from
5842      * host_to_target_errno_table[]. */
5843     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5844         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5845     }
5846 
5847     /* we patch the ioctl size if necessary. We rely on the fact that
5848        no ioctl has all the bits at '1' in the size field */
5849     ie = ioctl_entries;
5850     while (ie->target_cmd != 0) {
5851         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5852             TARGET_IOC_SIZEMASK) {
5853             arg_type = ie->arg_type;
5854             if (arg_type[0] != TYPE_PTR) {
5855                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5856                         ie->target_cmd);
5857                 exit(1);
5858             }
5859             arg_type++;
5860             size = thunk_type_size(arg_type, 0);
5861             ie->target_cmd = (ie->target_cmd &
5862                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5863                 (size << TARGET_IOC_SIZESHIFT);
5864         }
5865 
5866         /* automatic consistency check if same arch */
5867 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5868     (defined(__x86_64__) && defined(TARGET_X86_64))
5869         if (unlikely(ie->target_cmd != ie->host_cmd)) {
5870             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5871                     ie->name, ie->target_cmd, ie->host_cmd);
5872         }
5873 #endif
5874         ie++;
5875     }
5876 }
5877 
5878 #if TARGET_ABI_BITS == 32
5879 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5880 {
5881 #ifdef TARGET_WORDS_BIGENDIAN
5882     return ((uint64_t)word0 << 32) | word1;
5883 #else
5884     return ((uint64_t)word1 << 32) | word0;
5885 #endif
5886 }
5887 #else /* TARGET_ABI_BITS == 32 */
5888 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5889 {
5890     return word0;
5891 }
5892 #endif /* TARGET_ABI_BITS != 32 */
5893 
5894 #ifdef TARGET_NR_truncate64
5895 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5896                                          abi_long arg2,
5897                                          abi_long arg3,
5898                                          abi_long arg4)
5899 {
5900     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
5901         arg2 = arg3;
5902         arg3 = arg4;
5903     }
5904     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5905 }
5906 #endif
5907 
5908 #ifdef TARGET_NR_ftruncate64
5909 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5910                                           abi_long arg2,
5911                                           abi_long arg3,
5912                                           abi_long arg4)
5913 {
5914     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
5915         arg2 = arg3;
5916         arg3 = arg4;
5917     }
5918     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5919 }
5920 #endif
5921 
5922 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5923                                                abi_ulong target_addr)
5924 {
5925     struct target_timespec *target_ts;
5926 
5927     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5928         return -TARGET_EFAULT;
5929     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5930     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5931     unlock_user_struct(target_ts, target_addr, 0);
5932     return 0;
5933 }
5934 
5935 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5936                                                struct timespec *host_ts)
5937 {
5938     struct target_timespec *target_ts;
5939 
5940     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5941         return -TARGET_EFAULT;
5942     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5943     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5944     unlock_user_struct(target_ts, target_addr, 1);
5945     return 0;
5946 }
5947 
5948 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5949                                                  abi_ulong target_addr)
5950 {
5951     struct target_itimerspec *target_itspec;
5952 
5953     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5954         return -TARGET_EFAULT;
5955     }
5956 
5957     host_itspec->it_interval.tv_sec =
5958                             tswapal(target_itspec->it_interval.tv_sec);
5959     host_itspec->it_interval.tv_nsec =
5960                             tswapal(target_itspec->it_interval.tv_nsec);
5961     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5962     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5963 
5964     unlock_user_struct(target_itspec, target_addr, 1);
5965     return 0;
5966 }
5967 
5968 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5969                                                struct itimerspec *host_its)
5970 {
5971     struct target_itimerspec *target_itspec;
5972 
5973     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5974         return -TARGET_EFAULT;
5975     }
5976 
5977     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5978     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5979 
5980     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5981     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5982 
5983     unlock_user_struct(target_itspec, target_addr, 0);
5984     return 0;
5985 }
5986 
5987 static inline abi_long target_to_host_timex(struct timex *host_tx,
5988                                             abi_long target_addr)
5989 {
5990     struct target_timex *target_tx;
5991 
5992     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
5993         return -TARGET_EFAULT;
5994     }
5995 
5996     __get_user(host_tx->modes, &target_tx->modes);
5997     __get_user(host_tx->offset, &target_tx->offset);
5998     __get_user(host_tx->freq, &target_tx->freq);
5999     __get_user(host_tx->maxerror, &target_tx->maxerror);
6000     __get_user(host_tx->esterror, &target_tx->esterror);
6001     __get_user(host_tx->status, &target_tx->status);
6002     __get_user(host_tx->constant, &target_tx->constant);
6003     __get_user(host_tx->precision, &target_tx->precision);
6004     __get_user(host_tx->tolerance, &target_tx->tolerance);
6005     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6006     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6007     __get_user(host_tx->tick, &target_tx->tick);
6008     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6009     __get_user(host_tx->jitter, &target_tx->jitter);
6010     __get_user(host_tx->shift, &target_tx->shift);
6011     __get_user(host_tx->stabil, &target_tx->stabil);
6012     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6013     __get_user(host_tx->calcnt, &target_tx->calcnt);
6014     __get_user(host_tx->errcnt, &target_tx->errcnt);
6015     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6016     __get_user(host_tx->tai, &target_tx->tai);
6017 
6018     unlock_user_struct(target_tx, target_addr, 0);
6019     return 0;
6020 }
6021 
6022 static inline abi_long host_to_target_timex(abi_long target_addr,
6023                                             struct timex *host_tx)
6024 {
6025     struct target_timex *target_tx;
6026 
6027     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6028         return -TARGET_EFAULT;
6029     }
6030 
6031     __put_user(host_tx->modes, &target_tx->modes);
6032     __put_user(host_tx->offset, &target_tx->offset);
6033     __put_user(host_tx->freq, &target_tx->freq);
6034     __put_user(host_tx->maxerror, &target_tx->maxerror);
6035     __put_user(host_tx->esterror, &target_tx->esterror);
6036     __put_user(host_tx->status, &target_tx->status);
6037     __put_user(host_tx->constant, &target_tx->constant);
6038     __put_user(host_tx->precision, &target_tx->precision);
6039     __put_user(host_tx->tolerance, &target_tx->tolerance);
6040     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6041     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6042     __put_user(host_tx->tick, &target_tx->tick);
6043     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6044     __put_user(host_tx->jitter, &target_tx->jitter);
6045     __put_user(host_tx->shift, &target_tx->shift);
6046     __put_user(host_tx->stabil, &target_tx->stabil);
6047     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6048     __put_user(host_tx->calcnt, &target_tx->calcnt);
6049     __put_user(host_tx->errcnt, &target_tx->errcnt);
6050     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6051     __put_user(host_tx->tai, &target_tx->tai);
6052 
6053     unlock_user_struct(target_tx, target_addr, 1);
6054     return 0;
6055 }
6056 
6057 
6058 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6059                                                abi_ulong target_addr)
6060 {
6061     struct target_sigevent *target_sevp;
6062 
6063     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6064         return -TARGET_EFAULT;
6065     }
6066 
6067     /* This union is awkward on 64 bit systems because it has a 32 bit
6068      * integer and a pointer in it; we follow the conversion approach
6069      * used for handling sigval types in signal.c so the guest should get
6070      * the correct value back even if we did a 64 bit byteswap and it's
6071      * using the 32 bit integer.
6072      */
6073     host_sevp->sigev_value.sival_ptr =
6074         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6075     host_sevp->sigev_signo =
6076         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6077     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6078     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6079 
6080     unlock_user_struct(target_sevp, target_addr, 1);
6081     return 0;
6082 }
6083 
6084 #if defined(TARGET_NR_mlockall)
6085 static inline int target_to_host_mlockall_arg(int arg)
6086 {
6087     int result = 0;
6088 
6089     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6090         result |= MCL_CURRENT;
6091     }
6092     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6093         result |= MCL_FUTURE;
6094     }
6095     return result;
6096 }
6097 #endif
6098 
6099 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6100      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6101      defined(TARGET_NR_newfstatat))
6102 static inline abi_long host_to_target_stat64(void *cpu_env,
6103                                              abi_ulong target_addr,
6104                                              struct stat *host_st)
6105 {
6106 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6107     if (((CPUARMState *)cpu_env)->eabi) {
6108         struct target_eabi_stat64 *target_st;
6109 
6110         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6111             return -TARGET_EFAULT;
6112         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6113         __put_user(host_st->st_dev, &target_st->st_dev);
6114         __put_user(host_st->st_ino, &target_st->st_ino);
6115 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6116         __put_user(host_st->st_ino, &target_st->__st_ino);
6117 #endif
6118         __put_user(host_st->st_mode, &target_st->st_mode);
6119         __put_user(host_st->st_nlink, &target_st->st_nlink);
6120         __put_user(host_st->st_uid, &target_st->st_uid);
6121         __put_user(host_st->st_gid, &target_st->st_gid);
6122         __put_user(host_st->st_rdev, &target_st->st_rdev);
6123         __put_user(host_st->st_size, &target_st->st_size);
6124         __put_user(host_st->st_blksize, &target_st->st_blksize);
6125         __put_user(host_st->st_blocks, &target_st->st_blocks);
6126         __put_user(host_st->st_atime, &target_st->target_st_atime);
6127         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6128         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6129         unlock_user_struct(target_st, target_addr, 1);
6130     } else
6131 #endif
6132     {
6133 #if defined(TARGET_HAS_STRUCT_STAT64)
6134         struct target_stat64 *target_st;
6135 #else
6136         struct target_stat *target_st;
6137 #endif
6138 
6139         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6140             return -TARGET_EFAULT;
6141         memset(target_st, 0, sizeof(*target_st));
6142         __put_user(host_st->st_dev, &target_st->st_dev);
6143         __put_user(host_st->st_ino, &target_st->st_ino);
6144 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6145         __put_user(host_st->st_ino, &target_st->__st_ino);
6146 #endif
6147         __put_user(host_st->st_mode, &target_st->st_mode);
6148         __put_user(host_st->st_nlink, &target_st->st_nlink);
6149         __put_user(host_st->st_uid, &target_st->st_uid);
6150         __put_user(host_st->st_gid, &target_st->st_gid);
6151         __put_user(host_st->st_rdev, &target_st->st_rdev);
6152         /* XXX: better use of kernel struct */
6153         __put_user(host_st->st_size, &target_st->st_size);
6154         __put_user(host_st->st_blksize, &target_st->st_blksize);
6155         __put_user(host_st->st_blocks, &target_st->st_blocks);
6156         __put_user(host_st->st_atime, &target_st->target_st_atime);
6157         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6158         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6159         unlock_user_struct(target_st, target_addr, 1);
6160     }
6161 
6162     return 0;
6163 }
6164 #endif
6165 
6166 /* ??? Using host futex calls even when target atomic operations
6167    are not really atomic probably breaks things.  However implementing
6168    futexes locally would make futexes shared between multiple processes
6169    tricky.  However they're probably useless because guest atomic
6170    operations won't work either.  */
6171 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6172                     target_ulong uaddr2, int val3)
6173 {
6174     struct timespec ts, *pts;
6175     int base_op;
6176 
6177     /* ??? We assume FUTEX_* constants are the same on both host
6178        and target.  */
6179 #ifdef FUTEX_CMD_MASK
6180     base_op = op & FUTEX_CMD_MASK;
6181 #else
6182     base_op = op;
6183 #endif
6184     switch (base_op) {
6185     case FUTEX_WAIT:
6186     case FUTEX_WAIT_BITSET:
6187         if (timeout) {
6188             pts = &ts;
6189             target_to_host_timespec(pts, timeout);
6190         } else {
6191             pts = NULL;
6192         }
6193         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6194                          pts, NULL, val3));
6195     case FUTEX_WAKE:
6196         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6197     case FUTEX_FD:
6198         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6199     case FUTEX_REQUEUE:
6200     case FUTEX_CMP_REQUEUE:
6201     case FUTEX_WAKE_OP:
6202         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6203            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6204            But the prototype takes a `struct timespec *'; insert casts
6205            to satisfy the compiler.  We do not need to tswap TIMEOUT
6206            since it's not compared to guest memory.  */
6207         pts = (struct timespec *)(uintptr_t) timeout;
6208         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6209                                     g2h(uaddr2),
6210                                     (base_op == FUTEX_CMP_REQUEUE
6211                                      ? tswap32(val3)
6212                                      : val3)));
6213     default:
6214         return -TARGET_ENOSYS;
6215     }
6216 }
6217 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6218 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6219                                      abi_long handle, abi_long mount_id,
6220                                      abi_long flags)
6221 {
6222     struct file_handle *target_fh;
6223     struct file_handle *fh;
6224     int mid = 0;
6225     abi_long ret;
6226     char *name;
6227     unsigned int size, total_size;
6228 
6229     if (get_user_s32(size, handle)) {
6230         return -TARGET_EFAULT;
6231     }
6232 
6233     name = lock_user_string(pathname);
6234     if (!name) {
6235         return -TARGET_EFAULT;
6236     }
6237 
6238     total_size = sizeof(struct file_handle) + size;
6239     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6240     if (!target_fh) {
6241         unlock_user(name, pathname, 0);
6242         return -TARGET_EFAULT;
6243     }
6244 
6245     fh = g_malloc0(total_size);
6246     fh->handle_bytes = size;
6247 
6248     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6249     unlock_user(name, pathname, 0);
6250 
6251     /* man name_to_handle_at(2):
6252      * Other than the use of the handle_bytes field, the caller should treat
6253      * the file_handle structure as an opaque data type
6254      */
6255 
6256     memcpy(target_fh, fh, total_size);
6257     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6258     target_fh->handle_type = tswap32(fh->handle_type);
6259     g_free(fh);
6260     unlock_user(target_fh, handle, total_size);
6261 
6262     if (put_user_s32(mid, mount_id)) {
6263         return -TARGET_EFAULT;
6264     }
6265 
6266     return ret;
6267 
6268 }
6269 #endif
6270 
6271 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6272 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6273                                      abi_long flags)
6274 {
6275     struct file_handle *target_fh;
6276     struct file_handle *fh;
6277     unsigned int size, total_size;
6278     abi_long ret;
6279 
6280     if (get_user_s32(size, handle)) {
6281         return -TARGET_EFAULT;
6282     }
6283 
6284     total_size = sizeof(struct file_handle) + size;
6285     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6286     if (!target_fh) {
6287         return -TARGET_EFAULT;
6288     }
6289 
6290     fh = g_memdup(target_fh, total_size);
6291     fh->handle_bytes = size;
6292     fh->handle_type = tswap32(target_fh->handle_type);
6293 
6294     ret = get_errno(open_by_handle_at(mount_fd, fh,
6295                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6296 
6297     g_free(fh);
6298 
6299     unlock_user(target_fh, handle, total_size);
6300 
6301     return ret;
6302 }
6303 #endif
6304 
6305 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6306 
6307 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6308 {
6309     int host_flags;
6310     target_sigset_t *target_mask;
6311     sigset_t host_mask;
6312     abi_long ret;
6313 
6314     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6315         return -TARGET_EINVAL;
6316     }
6317     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6318         return -TARGET_EFAULT;
6319     }
6320 
6321     target_to_host_sigset(&host_mask, target_mask);
6322 
6323     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6324 
6325     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6326     if (ret >= 0) {
6327         fd_trans_register(ret, &target_signalfd_trans);
6328     }
6329 
6330     unlock_user_struct(target_mask, mask, 0);
6331 
6332     return ret;
6333 }
6334 #endif
6335 
6336 /* Map host to target signal numbers for the wait family of syscalls.
6337    Assume all other status bits are the same.  */
6338 int host_to_target_waitstatus(int status)
6339 {
6340     if (WIFSIGNALED(status)) {
6341         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6342     }
6343     if (WIFSTOPPED(status)) {
6344         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6345                | (status & 0xff);
6346     }
6347     return status;
6348 }
6349 
6350 static int open_self_cmdline(void *cpu_env, int fd)
6351 {
6352     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6353     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6354     int i;
6355 
6356     for (i = 0; i < bprm->argc; i++) {
6357         size_t len = strlen(bprm->argv[i]) + 1;
6358 
6359         if (write(fd, bprm->argv[i], len) != len) {
6360             return -1;
6361         }
6362     }
6363 
6364     return 0;
6365 }
6366 
6367 static int open_self_maps(void *cpu_env, int fd)
6368 {
6369     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6370     TaskState *ts = cpu->opaque;
6371     FILE *fp;
6372     char *line = NULL;
6373     size_t len = 0;
6374     ssize_t read;
6375 
6376     fp = fopen("/proc/self/maps", "r");
6377     if (fp == NULL) {
6378         return -1;
6379     }
6380 
6381     while ((read = getline(&line, &len, fp)) != -1) {
6382         int fields, dev_maj, dev_min, inode;
6383         uint64_t min, max, offset;
6384         char flag_r, flag_w, flag_x, flag_p;
6385         char path[512] = "";
6386         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6387                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6388                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6389 
6390         if ((fields < 10) || (fields > 11)) {
6391             continue;
6392         }
6393         if (h2g_valid(min)) {
6394             int flags = page_get_flags(h2g(min));
6395             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6396             if (page_check_range(h2g(min), max - min, flags) == -1) {
6397                 continue;
6398             }
6399             if (h2g(min) == ts->info->stack_limit) {
6400                 pstrcpy(path, sizeof(path), "      [stack]");
6401             }
6402             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6403                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6404                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6405                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6406                     path[0] ? "         " : "", path);
6407         }
6408     }
6409 
6410     free(line);
6411     fclose(fp);
6412 
6413     return 0;
6414 }
6415 
6416 static int open_self_stat(void *cpu_env, int fd)
6417 {
6418     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6419     TaskState *ts = cpu->opaque;
6420     abi_ulong start_stack = ts->info->start_stack;
6421     int i;
6422 
6423     for (i = 0; i < 44; i++) {
6424       char buf[128];
6425       int len;
6426       uint64_t val = 0;
6427 
6428       if (i == 0) {
6429         /* pid */
6430         val = getpid();
6431         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6432       } else if (i == 1) {
6433         /* app name */
6434         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6435       } else if (i == 27) {
6436         /* stack bottom */
6437         val = start_stack;
6438         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6439       } else {
6440         /* for the rest, there is MasterCard */
6441         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6442       }
6443 
6444       len = strlen(buf);
6445       if (write(fd, buf, len) != len) {
6446           return -1;
6447       }
6448     }
6449 
6450     return 0;
6451 }
6452 
6453 static int open_self_auxv(void *cpu_env, int fd)
6454 {
6455     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6456     TaskState *ts = cpu->opaque;
6457     abi_ulong auxv = ts->info->saved_auxv;
6458     abi_ulong len = ts->info->auxv_len;
6459     char *ptr;
6460 
6461     /*
6462      * Auxiliary vector is stored in target process stack.
6463      * read in whole auxv vector and copy it to file
6464      */
6465     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6466     if (ptr != NULL) {
6467         while (len > 0) {
6468             ssize_t r;
6469             r = write(fd, ptr, len);
6470             if (r <= 0) {
6471                 break;
6472             }
6473             len -= r;
6474             ptr += r;
6475         }
6476         lseek(fd, 0, SEEK_SET);
6477         unlock_user(ptr, auxv, len);
6478     }
6479 
6480     return 0;
6481 }
6482 
6483 static int is_proc_myself(const char *filename, const char *entry)
6484 {
6485     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6486         filename += strlen("/proc/");
6487         if (!strncmp(filename, "self/", strlen("self/"))) {
6488             filename += strlen("self/");
6489         } else if (*filename >= '1' && *filename <= '9') {
6490             char myself[80];
6491             snprintf(myself, sizeof(myself), "%d/", getpid());
6492             if (!strncmp(filename, myself, strlen(myself))) {
6493                 filename += strlen(myself);
6494             } else {
6495                 return 0;
6496             }
6497         } else {
6498             return 0;
6499         }
6500         if (!strcmp(filename, entry)) {
6501             return 1;
6502         }
6503     }
6504     return 0;
6505 }
6506 
6507 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6508 static int is_proc(const char *filename, const char *entry)
6509 {
6510     return strcmp(filename, entry) == 0;
6511 }
6512 
6513 static int open_net_route(void *cpu_env, int fd)
6514 {
6515     FILE *fp;
6516     char *line = NULL;
6517     size_t len = 0;
6518     ssize_t read;
6519 
6520     fp = fopen("/proc/net/route", "r");
6521     if (fp == NULL) {
6522         return -1;
6523     }
6524 
6525     /* read header */
6526 
6527     read = getline(&line, &len, fp);
6528     dprintf(fd, "%s", line);
6529 
6530     /* read routes */
6531 
6532     while ((read = getline(&line, &len, fp)) != -1) {
6533         char iface[16];
6534         uint32_t dest, gw, mask;
6535         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6536         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6537                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6538                      &mask, &mtu, &window, &irtt);
6539         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6540                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6541                 metric, tswap32(mask), mtu, window, irtt);
6542     }
6543 
6544     free(line);
6545     fclose(fp);
6546 
6547     return 0;
6548 }
6549 #endif
6550 
6551 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6552 {
6553     struct fake_open {
6554         const char *filename;
6555         int (*fill)(void *cpu_env, int fd);
6556         int (*cmp)(const char *s1, const char *s2);
6557     };
6558     const struct fake_open *fake_open;
6559     static const struct fake_open fakes[] = {
6560         { "maps", open_self_maps, is_proc_myself },
6561         { "stat", open_self_stat, is_proc_myself },
6562         { "auxv", open_self_auxv, is_proc_myself },
6563         { "cmdline", open_self_cmdline, is_proc_myself },
6564 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6565         { "/proc/net/route", open_net_route, is_proc },
6566 #endif
6567         { NULL, NULL, NULL }
6568     };
6569 
6570     if (is_proc_myself(pathname, "exe")) {
6571         int execfd = qemu_getauxval(AT_EXECFD);
6572         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6573     }
6574 
6575     for (fake_open = fakes; fake_open->filename; fake_open++) {
6576         if (fake_open->cmp(pathname, fake_open->filename)) {
6577             break;
6578         }
6579     }
6580 
6581     if (fake_open->filename) {
6582         const char *tmpdir;
6583         char filename[PATH_MAX];
6584         int fd, r;
6585 
6586         /* create temporary file to map stat to */
6587         tmpdir = getenv("TMPDIR");
6588         if (!tmpdir)
6589             tmpdir = "/tmp";
6590         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6591         fd = mkstemp(filename);
6592         if (fd < 0) {
6593             return fd;
6594         }
6595         unlink(filename);
6596 
6597         if ((r = fake_open->fill(cpu_env, fd))) {
6598             int e = errno;
6599             close(fd);
6600             errno = e;
6601             return r;
6602         }
6603         lseek(fd, 0, SEEK_SET);
6604 
6605         return fd;
6606     }
6607 
6608     return safe_openat(dirfd, path(pathname), flags, mode);
6609 }
6610 
6611 #define TIMER_MAGIC 0x0caf0000
6612 #define TIMER_MAGIC_MASK 0xffff0000
6613 
6614 /* Convert QEMU provided timer ID back to internal 16bit index format */
6615 static target_timer_t get_timer_id(abi_long arg)
6616 {
6617     target_timer_t timerid = arg;
6618 
6619     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6620         return -TARGET_EINVAL;
6621     }
6622 
6623     timerid &= 0xffff;
6624 
6625     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6626         return -TARGET_EINVAL;
6627     }
6628 
6629     return timerid;
6630 }
6631 
6632 static int target_to_host_cpu_mask(unsigned long *host_mask,
6633                                    size_t host_size,
6634                                    abi_ulong target_addr,
6635                                    size_t target_size)
6636 {
6637     unsigned target_bits = sizeof(abi_ulong) * 8;
6638     unsigned host_bits = sizeof(*host_mask) * 8;
6639     abi_ulong *target_mask;
6640     unsigned i, j;
6641 
6642     assert(host_size >= target_size);
6643 
6644     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6645     if (!target_mask) {
6646         return -TARGET_EFAULT;
6647     }
6648     memset(host_mask, 0, host_size);
6649 
6650     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6651         unsigned bit = i * target_bits;
6652         abi_ulong val;
6653 
6654         __get_user(val, &target_mask[i]);
6655         for (j = 0; j < target_bits; j++, bit++) {
6656             if (val & (1UL << j)) {
6657                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6658             }
6659         }
6660     }
6661 
6662     unlock_user(target_mask, target_addr, 0);
6663     return 0;
6664 }
6665 
6666 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6667                                    size_t host_size,
6668                                    abi_ulong target_addr,
6669                                    size_t target_size)
6670 {
6671     unsigned target_bits = sizeof(abi_ulong) * 8;
6672     unsigned host_bits = sizeof(*host_mask) * 8;
6673     abi_ulong *target_mask;
6674     unsigned i, j;
6675 
6676     assert(host_size >= target_size);
6677 
6678     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6679     if (!target_mask) {
6680         return -TARGET_EFAULT;
6681     }
6682 
6683     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6684         unsigned bit = i * target_bits;
6685         abi_ulong val = 0;
6686 
6687         for (j = 0; j < target_bits; j++, bit++) {
6688             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6689                 val |= 1UL << j;
6690             }
6691         }
6692         __put_user(val, &target_mask[i]);
6693     }
6694 
6695     unlock_user(target_mask, target_addr, target_size);
6696     return 0;
6697 }
6698 
6699 /* This is an internal helper for do_syscall so that it is easier
6700  * to have a single return point, so that actions, such as logging
6701  * of syscall results, can be performed.
6702  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6703  */
6704 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6705                             abi_long arg2, abi_long arg3, abi_long arg4,
6706                             abi_long arg5, abi_long arg6, abi_long arg7,
6707                             abi_long arg8)
6708 {
6709     CPUState *cpu = ENV_GET_CPU(cpu_env);
6710     abi_long ret;
6711 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6712     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6713     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6714     struct stat st;
6715 #endif
6716 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6717     || defined(TARGET_NR_fstatfs)
6718     struct statfs stfs;
6719 #endif
6720     void *p;
6721 
6722     switch(num) {
6723     case TARGET_NR_exit:
6724         /* In old applications this may be used to implement _exit(2).
6725            However in threaded applictions it is used for thread termination,
6726            and _exit_group is used for application termination.
6727            Do thread termination if we have more then one thread.  */
6728 
6729         if (block_signals()) {
6730             return -TARGET_ERESTARTSYS;
6731         }
6732 
6733         cpu_list_lock();
6734 
6735         if (CPU_NEXT(first_cpu)) {
6736             TaskState *ts;
6737 
6738             /* Remove the CPU from the list.  */
6739             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6740 
6741             cpu_list_unlock();
6742 
6743             ts = cpu->opaque;
6744             if (ts->child_tidptr) {
6745                 put_user_u32(0, ts->child_tidptr);
6746                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6747                           NULL, NULL, 0);
6748             }
6749             thread_cpu = NULL;
6750             object_unref(OBJECT(cpu));
6751             g_free(ts);
6752             rcu_unregister_thread();
6753             pthread_exit(NULL);
6754         }
6755 
6756         cpu_list_unlock();
6757         preexit_cleanup(cpu_env, arg1);
6758         _exit(arg1);
6759         return 0; /* avoid warning */
6760     case TARGET_NR_read:
6761         if (arg3 == 0) {
6762             return 0;
6763         } else {
6764             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6765                 return -TARGET_EFAULT;
6766             ret = get_errno(safe_read(arg1, p, arg3));
6767             if (ret >= 0 &&
6768                 fd_trans_host_to_target_data(arg1)) {
6769                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6770             }
6771             unlock_user(p, arg2, ret);
6772         }
6773         return ret;
6774     case TARGET_NR_write:
6775         if (arg2 == 0 && arg3 == 0) {
6776             return get_errno(safe_write(arg1, 0, 0));
6777         }
6778         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6779             return -TARGET_EFAULT;
6780         if (fd_trans_target_to_host_data(arg1)) {
6781             void *copy = g_malloc(arg3);
6782             memcpy(copy, p, arg3);
6783             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
6784             if (ret >= 0) {
6785                 ret = get_errno(safe_write(arg1, copy, ret));
6786             }
6787             g_free(copy);
6788         } else {
6789             ret = get_errno(safe_write(arg1, p, arg3));
6790         }
6791         unlock_user(p, arg2, 0);
6792         return ret;
6793 
6794 #ifdef TARGET_NR_open
6795     case TARGET_NR_open:
6796         if (!(p = lock_user_string(arg1)))
6797             return -TARGET_EFAULT;
6798         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6799                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
6800                                   arg3));
6801         fd_trans_unregister(ret);
6802         unlock_user(p, arg1, 0);
6803         return ret;
6804 #endif
6805     case TARGET_NR_openat:
6806         if (!(p = lock_user_string(arg2)))
6807             return -TARGET_EFAULT;
6808         ret = get_errno(do_openat(cpu_env, arg1, p,
6809                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
6810                                   arg4));
6811         fd_trans_unregister(ret);
6812         unlock_user(p, arg2, 0);
6813         return ret;
6814 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6815     case TARGET_NR_name_to_handle_at:
6816         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6817         return ret;
6818 #endif
6819 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6820     case TARGET_NR_open_by_handle_at:
6821         ret = do_open_by_handle_at(arg1, arg2, arg3);
6822         fd_trans_unregister(ret);
6823         return ret;
6824 #endif
6825     case TARGET_NR_close:
6826         fd_trans_unregister(arg1);
6827         return get_errno(close(arg1));
6828 
6829     case TARGET_NR_brk:
6830         return do_brk(arg1);
6831 #ifdef TARGET_NR_fork
6832     case TARGET_NR_fork:
6833         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
6834 #endif
6835 #ifdef TARGET_NR_waitpid
6836     case TARGET_NR_waitpid:
6837         {
6838             int status;
6839             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6840             if (!is_error(ret) && arg2 && ret
6841                 && put_user_s32(host_to_target_waitstatus(status), arg2))
6842                 return -TARGET_EFAULT;
6843         }
6844         return ret;
6845 #endif
6846 #ifdef TARGET_NR_waitid
6847     case TARGET_NR_waitid:
6848         {
6849             siginfo_t info;
6850             info.si_pid = 0;
6851             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6852             if (!is_error(ret) && arg3 && info.si_pid != 0) {
6853                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6854                     return -TARGET_EFAULT;
6855                 host_to_target_siginfo(p, &info);
6856                 unlock_user(p, arg3, sizeof(target_siginfo_t));
6857             }
6858         }
6859         return ret;
6860 #endif
6861 #ifdef TARGET_NR_creat /* not on alpha */
6862     case TARGET_NR_creat:
6863         if (!(p = lock_user_string(arg1)))
6864             return -TARGET_EFAULT;
6865         ret = get_errno(creat(p, arg2));
6866         fd_trans_unregister(ret);
6867         unlock_user(p, arg1, 0);
6868         return ret;
6869 #endif
6870 #ifdef TARGET_NR_link
6871     case TARGET_NR_link:
6872         {
6873             void * p2;
6874             p = lock_user_string(arg1);
6875             p2 = lock_user_string(arg2);
6876             if (!p || !p2)
6877                 ret = -TARGET_EFAULT;
6878             else
6879                 ret = get_errno(link(p, p2));
6880             unlock_user(p2, arg2, 0);
6881             unlock_user(p, arg1, 0);
6882         }
6883         return ret;
6884 #endif
6885 #if defined(TARGET_NR_linkat)
6886     case TARGET_NR_linkat:
6887         {
6888             void * p2 = NULL;
6889             if (!arg2 || !arg4)
6890                 return -TARGET_EFAULT;
6891             p  = lock_user_string(arg2);
6892             p2 = lock_user_string(arg4);
6893             if (!p || !p2)
6894                 ret = -TARGET_EFAULT;
6895             else
6896                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6897             unlock_user(p, arg2, 0);
6898             unlock_user(p2, arg4, 0);
6899         }
6900         return ret;
6901 #endif
6902 #ifdef TARGET_NR_unlink
6903     case TARGET_NR_unlink:
6904         if (!(p = lock_user_string(arg1)))
6905             return -TARGET_EFAULT;
6906         ret = get_errno(unlink(p));
6907         unlock_user(p, arg1, 0);
6908         return ret;
6909 #endif
6910 #if defined(TARGET_NR_unlinkat)
6911     case TARGET_NR_unlinkat:
6912         if (!(p = lock_user_string(arg2)))
6913             return -TARGET_EFAULT;
6914         ret = get_errno(unlinkat(arg1, p, arg3));
6915         unlock_user(p, arg2, 0);
6916         return ret;
6917 #endif
6918     case TARGET_NR_execve:
6919         {
6920             char **argp, **envp;
6921             int argc, envc;
6922             abi_ulong gp;
6923             abi_ulong guest_argp;
6924             abi_ulong guest_envp;
6925             abi_ulong addr;
6926             char **q;
6927             int total_size = 0;
6928 
6929             argc = 0;
6930             guest_argp = arg2;
6931             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6932                 if (get_user_ual(addr, gp))
6933                     return -TARGET_EFAULT;
6934                 if (!addr)
6935                     break;
6936                 argc++;
6937             }
6938             envc = 0;
6939             guest_envp = arg3;
6940             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6941                 if (get_user_ual(addr, gp))
6942                     return -TARGET_EFAULT;
6943                 if (!addr)
6944                     break;
6945                 envc++;
6946             }
6947 
6948             argp = g_new0(char *, argc + 1);
6949             envp = g_new0(char *, envc + 1);
6950 
6951             for (gp = guest_argp, q = argp; gp;
6952                   gp += sizeof(abi_ulong), q++) {
6953                 if (get_user_ual(addr, gp))
6954                     goto execve_efault;
6955                 if (!addr)
6956                     break;
6957                 if (!(*q = lock_user_string(addr)))
6958                     goto execve_efault;
6959                 total_size += strlen(*q) + 1;
6960             }
6961             *q = NULL;
6962 
6963             for (gp = guest_envp, q = envp; gp;
6964                   gp += sizeof(abi_ulong), q++) {
6965                 if (get_user_ual(addr, gp))
6966                     goto execve_efault;
6967                 if (!addr)
6968                     break;
6969                 if (!(*q = lock_user_string(addr)))
6970                     goto execve_efault;
6971                 total_size += strlen(*q) + 1;
6972             }
6973             *q = NULL;
6974 
6975             if (!(p = lock_user_string(arg1)))
6976                 goto execve_efault;
6977             /* Although execve() is not an interruptible syscall it is
6978              * a special case where we must use the safe_syscall wrapper:
6979              * if we allow a signal to happen before we make the host
6980              * syscall then we will 'lose' it, because at the point of
6981              * execve the process leaves QEMU's control. So we use the
6982              * safe syscall wrapper to ensure that we either take the
6983              * signal as a guest signal, or else it does not happen
6984              * before the execve completes and makes it the other
6985              * program's problem.
6986              */
6987             ret = get_errno(safe_execve(p, argp, envp));
6988             unlock_user(p, arg1, 0);
6989 
6990             goto execve_end;
6991 
6992         execve_efault:
6993             ret = -TARGET_EFAULT;
6994 
6995         execve_end:
6996             for (gp = guest_argp, q = argp; *q;
6997                   gp += sizeof(abi_ulong), q++) {
6998                 if (get_user_ual(addr, gp)
6999                     || !addr)
7000                     break;
7001                 unlock_user(*q, addr, 0);
7002             }
7003             for (gp = guest_envp, q = envp; *q;
7004                   gp += sizeof(abi_ulong), q++) {
7005                 if (get_user_ual(addr, gp)
7006                     || !addr)
7007                     break;
7008                 unlock_user(*q, addr, 0);
7009             }
7010 
7011             g_free(argp);
7012             g_free(envp);
7013         }
7014         return ret;
7015     case TARGET_NR_chdir:
7016         if (!(p = lock_user_string(arg1)))
7017             return -TARGET_EFAULT;
7018         ret = get_errno(chdir(p));
7019         unlock_user(p, arg1, 0);
7020         return ret;
7021 #ifdef TARGET_NR_time
7022     case TARGET_NR_time:
7023         {
7024             time_t host_time;
7025             ret = get_errno(time(&host_time));
7026             if (!is_error(ret)
7027                 && arg1
7028                 && put_user_sal(host_time, arg1))
7029                 return -TARGET_EFAULT;
7030         }
7031         return ret;
7032 #endif
7033 #ifdef TARGET_NR_mknod
7034     case TARGET_NR_mknod:
7035         if (!(p = lock_user_string(arg1)))
7036             return -TARGET_EFAULT;
7037         ret = get_errno(mknod(p, arg2, arg3));
7038         unlock_user(p, arg1, 0);
7039         return ret;
7040 #endif
7041 #if defined(TARGET_NR_mknodat)
7042     case TARGET_NR_mknodat:
7043         if (!(p = lock_user_string(arg2)))
7044             return -TARGET_EFAULT;
7045         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7046         unlock_user(p, arg2, 0);
7047         return ret;
7048 #endif
7049 #ifdef TARGET_NR_chmod
7050     case TARGET_NR_chmod:
7051         if (!(p = lock_user_string(arg1)))
7052             return -TARGET_EFAULT;
7053         ret = get_errno(chmod(p, arg2));
7054         unlock_user(p, arg1, 0);
7055         return ret;
7056 #endif
7057 #ifdef TARGET_NR_lseek
7058     case TARGET_NR_lseek:
7059         return get_errno(lseek(arg1, arg2, arg3));
7060 #endif
7061 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7062     /* Alpha specific */
7063     case TARGET_NR_getxpid:
7064         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7065         return get_errno(getpid());
7066 #endif
7067 #ifdef TARGET_NR_getpid
7068     case TARGET_NR_getpid:
7069         return get_errno(getpid());
7070 #endif
7071     case TARGET_NR_mount:
7072         {
7073             /* need to look at the data field */
7074             void *p2, *p3;
7075 
7076             if (arg1) {
7077                 p = lock_user_string(arg1);
7078                 if (!p) {
7079                     return -TARGET_EFAULT;
7080                 }
7081             } else {
7082                 p = NULL;
7083             }
7084 
7085             p2 = lock_user_string(arg2);
7086             if (!p2) {
7087                 if (arg1) {
7088                     unlock_user(p, arg1, 0);
7089                 }
7090                 return -TARGET_EFAULT;
7091             }
7092 
7093             if (arg3) {
7094                 p3 = lock_user_string(arg3);
7095                 if (!p3) {
7096                     if (arg1) {
7097                         unlock_user(p, arg1, 0);
7098                     }
7099                     unlock_user(p2, arg2, 0);
7100                     return -TARGET_EFAULT;
7101                 }
7102             } else {
7103                 p3 = NULL;
7104             }
7105 
7106             /* FIXME - arg5 should be locked, but it isn't clear how to
7107              * do that since it's not guaranteed to be a NULL-terminated
7108              * string.
7109              */
7110             if (!arg5) {
7111                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7112             } else {
7113                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7114             }
7115             ret = get_errno(ret);
7116 
7117             if (arg1) {
7118                 unlock_user(p, arg1, 0);
7119             }
7120             unlock_user(p2, arg2, 0);
7121             if (arg3) {
7122                 unlock_user(p3, arg3, 0);
7123             }
7124         }
7125         return ret;
7126 #ifdef TARGET_NR_umount
7127     case TARGET_NR_umount:
7128         if (!(p = lock_user_string(arg1)))
7129             return -TARGET_EFAULT;
7130         ret = get_errno(umount(p));
7131         unlock_user(p, arg1, 0);
7132         return ret;
7133 #endif
7134 #ifdef TARGET_NR_stime /* not on alpha */
7135     case TARGET_NR_stime:
7136         {
7137             time_t host_time;
7138             if (get_user_sal(host_time, arg1))
7139                 return -TARGET_EFAULT;
7140             return get_errno(stime(&host_time));
7141         }
7142 #endif
7143 #ifdef TARGET_NR_alarm /* not on alpha */
7144     case TARGET_NR_alarm:
7145         return alarm(arg1);
7146 #endif
7147 #ifdef TARGET_NR_pause /* not on alpha */
7148     case TARGET_NR_pause:
7149         if (!block_signals()) {
7150             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7151         }
7152         return -TARGET_EINTR;
7153 #endif
7154 #ifdef TARGET_NR_utime
7155     case TARGET_NR_utime:
7156         {
7157             struct utimbuf tbuf, *host_tbuf;
7158             struct target_utimbuf *target_tbuf;
7159             if (arg2) {
7160                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7161                     return -TARGET_EFAULT;
7162                 tbuf.actime = tswapal(target_tbuf->actime);
7163                 tbuf.modtime = tswapal(target_tbuf->modtime);
7164                 unlock_user_struct(target_tbuf, arg2, 0);
7165                 host_tbuf = &tbuf;
7166             } else {
7167                 host_tbuf = NULL;
7168             }
7169             if (!(p = lock_user_string(arg1)))
7170                 return -TARGET_EFAULT;
7171             ret = get_errno(utime(p, host_tbuf));
7172             unlock_user(p, arg1, 0);
7173         }
7174         return ret;
7175 #endif
7176 #ifdef TARGET_NR_utimes
7177     case TARGET_NR_utimes:
7178         {
7179             struct timeval *tvp, tv[2];
7180             if (arg2) {
7181                 if (copy_from_user_timeval(&tv[0], arg2)
7182                     || copy_from_user_timeval(&tv[1],
7183                                               arg2 + sizeof(struct target_timeval)))
7184                     return -TARGET_EFAULT;
7185                 tvp = tv;
7186             } else {
7187                 tvp = NULL;
7188             }
7189             if (!(p = lock_user_string(arg1)))
7190                 return -TARGET_EFAULT;
7191             ret = get_errno(utimes(p, tvp));
7192             unlock_user(p, arg1, 0);
7193         }
7194         return ret;
7195 #endif
7196 #if defined(TARGET_NR_futimesat)
7197     case TARGET_NR_futimesat:
7198         {
7199             struct timeval *tvp, tv[2];
7200             if (arg3) {
7201                 if (copy_from_user_timeval(&tv[0], arg3)
7202                     || copy_from_user_timeval(&tv[1],
7203                                               arg3 + sizeof(struct target_timeval)))
7204                     return -TARGET_EFAULT;
7205                 tvp = tv;
7206             } else {
7207                 tvp = NULL;
7208             }
7209             if (!(p = lock_user_string(arg2))) {
7210                 return -TARGET_EFAULT;
7211             }
7212             ret = get_errno(futimesat(arg1, path(p), tvp));
7213             unlock_user(p, arg2, 0);
7214         }
7215         return ret;
7216 #endif
7217 #ifdef TARGET_NR_access
7218     case TARGET_NR_access:
7219         if (!(p = lock_user_string(arg1))) {
7220             return -TARGET_EFAULT;
7221         }
7222         ret = get_errno(access(path(p), arg2));
7223         unlock_user(p, arg1, 0);
7224         return ret;
7225 #endif
7226 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7227     case TARGET_NR_faccessat:
7228         if (!(p = lock_user_string(arg2))) {
7229             return -TARGET_EFAULT;
7230         }
7231         ret = get_errno(faccessat(arg1, p, arg3, 0));
7232         unlock_user(p, arg2, 0);
7233         return ret;
7234 #endif
7235 #ifdef TARGET_NR_nice /* not on alpha */
7236     case TARGET_NR_nice:
7237         return get_errno(nice(arg1));
7238 #endif
7239     case TARGET_NR_sync:
7240         sync();
7241         return 0;
7242 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7243     case TARGET_NR_syncfs:
7244         return get_errno(syncfs(arg1));
7245 #endif
7246     case TARGET_NR_kill:
7247         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7248 #ifdef TARGET_NR_rename
7249     case TARGET_NR_rename:
7250         {
7251             void *p2;
7252             p = lock_user_string(arg1);
7253             p2 = lock_user_string(arg2);
7254             if (!p || !p2)
7255                 ret = -TARGET_EFAULT;
7256             else
7257                 ret = get_errno(rename(p, p2));
7258             unlock_user(p2, arg2, 0);
7259             unlock_user(p, arg1, 0);
7260         }
7261         return ret;
7262 #endif
7263 #if defined(TARGET_NR_renameat)
7264     case TARGET_NR_renameat:
7265         {
7266             void *p2;
7267             p  = lock_user_string(arg2);
7268             p2 = lock_user_string(arg4);
7269             if (!p || !p2)
7270                 ret = -TARGET_EFAULT;
7271             else
7272                 ret = get_errno(renameat(arg1, p, arg3, p2));
7273             unlock_user(p2, arg4, 0);
7274             unlock_user(p, arg2, 0);
7275         }
7276         return ret;
7277 #endif
7278 #if defined(TARGET_NR_renameat2)
7279     case TARGET_NR_renameat2:
7280         {
7281             void *p2;
7282             p  = lock_user_string(arg2);
7283             p2 = lock_user_string(arg4);
7284             if (!p || !p2) {
7285                 ret = -TARGET_EFAULT;
7286             } else {
7287                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7288             }
7289             unlock_user(p2, arg4, 0);
7290             unlock_user(p, arg2, 0);
7291         }
7292         return ret;
7293 #endif
7294 #ifdef TARGET_NR_mkdir
7295     case TARGET_NR_mkdir:
7296         if (!(p = lock_user_string(arg1)))
7297             return -TARGET_EFAULT;
7298         ret = get_errno(mkdir(p, arg2));
7299         unlock_user(p, arg1, 0);
7300         return ret;
7301 #endif
7302 #if defined(TARGET_NR_mkdirat)
7303     case TARGET_NR_mkdirat:
7304         if (!(p = lock_user_string(arg2)))
7305             return -TARGET_EFAULT;
7306         ret = get_errno(mkdirat(arg1, p, arg3));
7307         unlock_user(p, arg2, 0);
7308         return ret;
7309 #endif
7310 #ifdef TARGET_NR_rmdir
7311     case TARGET_NR_rmdir:
7312         if (!(p = lock_user_string(arg1)))
7313             return -TARGET_EFAULT;
7314         ret = get_errno(rmdir(p));
7315         unlock_user(p, arg1, 0);
7316         return ret;
7317 #endif
7318     case TARGET_NR_dup:
7319         ret = get_errno(dup(arg1));
7320         if (ret >= 0) {
7321             fd_trans_dup(arg1, ret);
7322         }
7323         return ret;
7324 #ifdef TARGET_NR_pipe
7325     case TARGET_NR_pipe:
7326         return do_pipe(cpu_env, arg1, 0, 0);
7327 #endif
7328 #ifdef TARGET_NR_pipe2
7329     case TARGET_NR_pipe2:
7330         return do_pipe(cpu_env, arg1,
7331                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7332 #endif
7333     case TARGET_NR_times:
7334         {
7335             struct target_tms *tmsp;
7336             struct tms tms;
7337             ret = get_errno(times(&tms));
7338             if (arg1) {
7339                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7340                 if (!tmsp)
7341                     return -TARGET_EFAULT;
7342                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7343                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7344                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7345                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7346             }
7347             if (!is_error(ret))
7348                 ret = host_to_target_clock_t(ret);
7349         }
7350         return ret;
7351     case TARGET_NR_acct:
7352         if (arg1 == 0) {
7353             ret = get_errno(acct(NULL));
7354         } else {
7355             if (!(p = lock_user_string(arg1))) {
7356                 return -TARGET_EFAULT;
7357             }
7358             ret = get_errno(acct(path(p)));
7359             unlock_user(p, arg1, 0);
7360         }
7361         return ret;
7362 #ifdef TARGET_NR_umount2
7363     case TARGET_NR_umount2:
7364         if (!(p = lock_user_string(arg1)))
7365             return -TARGET_EFAULT;
7366         ret = get_errno(umount2(p, arg2));
7367         unlock_user(p, arg1, 0);
7368         return ret;
7369 #endif
7370     case TARGET_NR_ioctl:
7371         return do_ioctl(arg1, arg2, arg3);
7372 #ifdef TARGET_NR_fcntl
7373     case TARGET_NR_fcntl:
7374         return do_fcntl(arg1, arg2, arg3);
7375 #endif
7376     case TARGET_NR_setpgid:
7377         return get_errno(setpgid(arg1, arg2));
7378     case TARGET_NR_umask:
7379         return get_errno(umask(arg1));
7380     case TARGET_NR_chroot:
7381         if (!(p = lock_user_string(arg1)))
7382             return -TARGET_EFAULT;
7383         ret = get_errno(chroot(p));
7384         unlock_user(p, arg1, 0);
7385         return ret;
7386 #ifdef TARGET_NR_dup2
7387     case TARGET_NR_dup2:
7388         ret = get_errno(dup2(arg1, arg2));
7389         if (ret >= 0) {
7390             fd_trans_dup(arg1, arg2);
7391         }
7392         return ret;
7393 #endif
7394 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7395     case TARGET_NR_dup3:
7396     {
7397         int host_flags;
7398 
7399         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7400             return -EINVAL;
7401         }
7402         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7403         ret = get_errno(dup3(arg1, arg2, host_flags));
7404         if (ret >= 0) {
7405             fd_trans_dup(arg1, arg2);
7406         }
7407         return ret;
7408     }
7409 #endif
7410 #ifdef TARGET_NR_getppid /* not on alpha */
7411     case TARGET_NR_getppid:
7412         return get_errno(getppid());
7413 #endif
7414 #ifdef TARGET_NR_getpgrp
7415     case TARGET_NR_getpgrp:
7416         return get_errno(getpgrp());
7417 #endif
7418     case TARGET_NR_setsid:
7419         return get_errno(setsid());
7420 #ifdef TARGET_NR_sigaction
7421     case TARGET_NR_sigaction:
7422         {
7423 #if defined(TARGET_ALPHA)
7424             struct target_sigaction act, oact, *pact = 0;
7425             struct target_old_sigaction *old_act;
7426             if (arg2) {
7427                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7428                     return -TARGET_EFAULT;
7429                 act._sa_handler = old_act->_sa_handler;
7430                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7431                 act.sa_flags = old_act->sa_flags;
7432                 act.sa_restorer = 0;
7433                 unlock_user_struct(old_act, arg2, 0);
7434                 pact = &act;
7435             }
7436             ret = get_errno(do_sigaction(arg1, pact, &oact));
7437             if (!is_error(ret) && arg3) {
7438                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7439                     return -TARGET_EFAULT;
7440                 old_act->_sa_handler = oact._sa_handler;
7441                 old_act->sa_mask = oact.sa_mask.sig[0];
7442                 old_act->sa_flags = oact.sa_flags;
7443                 unlock_user_struct(old_act, arg3, 1);
7444             }
7445 #elif defined(TARGET_MIPS)
7446 	    struct target_sigaction act, oact, *pact, *old_act;
7447 
7448 	    if (arg2) {
7449                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7450                     return -TARGET_EFAULT;
7451 		act._sa_handler = old_act->_sa_handler;
7452 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7453 		act.sa_flags = old_act->sa_flags;
7454 		unlock_user_struct(old_act, arg2, 0);
7455 		pact = &act;
7456 	    } else {
7457 		pact = NULL;
7458 	    }
7459 
7460 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7461 
7462 	    if (!is_error(ret) && arg3) {
7463                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7464                     return -TARGET_EFAULT;
7465 		old_act->_sa_handler = oact._sa_handler;
7466 		old_act->sa_flags = oact.sa_flags;
7467 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7468 		old_act->sa_mask.sig[1] = 0;
7469 		old_act->sa_mask.sig[2] = 0;
7470 		old_act->sa_mask.sig[3] = 0;
7471 		unlock_user_struct(old_act, arg3, 1);
7472 	    }
7473 #else
7474             struct target_old_sigaction *old_act;
7475             struct target_sigaction act, oact, *pact;
7476             if (arg2) {
7477                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7478                     return -TARGET_EFAULT;
7479                 act._sa_handler = old_act->_sa_handler;
7480                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7481                 act.sa_flags = old_act->sa_flags;
7482                 act.sa_restorer = old_act->sa_restorer;
7483 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7484                 act.ka_restorer = 0;
7485 #endif
7486                 unlock_user_struct(old_act, arg2, 0);
7487                 pact = &act;
7488             } else {
7489                 pact = NULL;
7490             }
7491             ret = get_errno(do_sigaction(arg1, pact, &oact));
7492             if (!is_error(ret) && arg3) {
7493                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7494                     return -TARGET_EFAULT;
7495                 old_act->_sa_handler = oact._sa_handler;
7496                 old_act->sa_mask = oact.sa_mask.sig[0];
7497                 old_act->sa_flags = oact.sa_flags;
7498                 old_act->sa_restorer = oact.sa_restorer;
7499                 unlock_user_struct(old_act, arg3, 1);
7500             }
7501 #endif
7502         }
7503         return ret;
7504 #endif
7505     case TARGET_NR_rt_sigaction:
7506         {
7507 #if defined(TARGET_ALPHA)
7508             /* For Alpha and SPARC this is a 5 argument syscall, with
7509              * a 'restorer' parameter which must be copied into the
7510              * sa_restorer field of the sigaction struct.
7511              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7512              * and arg5 is the sigsetsize.
7513              * Alpha also has a separate rt_sigaction struct that it uses
7514              * here; SPARC uses the usual sigaction struct.
7515              */
7516             struct target_rt_sigaction *rt_act;
7517             struct target_sigaction act, oact, *pact = 0;
7518 
7519             if (arg4 != sizeof(target_sigset_t)) {
7520                 return -TARGET_EINVAL;
7521             }
7522             if (arg2) {
7523                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7524                     return -TARGET_EFAULT;
7525                 act._sa_handler = rt_act->_sa_handler;
7526                 act.sa_mask = rt_act->sa_mask;
7527                 act.sa_flags = rt_act->sa_flags;
7528                 act.sa_restorer = arg5;
7529                 unlock_user_struct(rt_act, arg2, 0);
7530                 pact = &act;
7531             }
7532             ret = get_errno(do_sigaction(arg1, pact, &oact));
7533             if (!is_error(ret) && arg3) {
7534                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7535                     return -TARGET_EFAULT;
7536                 rt_act->_sa_handler = oact._sa_handler;
7537                 rt_act->sa_mask = oact.sa_mask;
7538                 rt_act->sa_flags = oact.sa_flags;
7539                 unlock_user_struct(rt_act, arg3, 1);
7540             }
7541 #else
7542 #ifdef TARGET_SPARC
7543             target_ulong restorer = arg4;
7544             target_ulong sigsetsize = arg5;
7545 #else
7546             target_ulong sigsetsize = arg4;
7547 #endif
7548             struct target_sigaction *act;
7549             struct target_sigaction *oact;
7550 
7551             if (sigsetsize != sizeof(target_sigset_t)) {
7552                 return -TARGET_EINVAL;
7553             }
7554             if (arg2) {
7555                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7556                     return -TARGET_EFAULT;
7557                 }
7558 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7559                 act->ka_restorer = restorer;
7560 #endif
7561             } else {
7562                 act = NULL;
7563             }
7564             if (arg3) {
7565                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7566                     ret = -TARGET_EFAULT;
7567                     goto rt_sigaction_fail;
7568                 }
7569             } else
7570                 oact = NULL;
7571             ret = get_errno(do_sigaction(arg1, act, oact));
7572 	rt_sigaction_fail:
7573             if (act)
7574                 unlock_user_struct(act, arg2, 0);
7575             if (oact)
7576                 unlock_user_struct(oact, arg3, 1);
7577 #endif
7578         }
7579         return ret;
7580 #ifdef TARGET_NR_sgetmask /* not on alpha */
7581     case TARGET_NR_sgetmask:
7582         {
7583             sigset_t cur_set;
7584             abi_ulong target_set;
7585             ret = do_sigprocmask(0, NULL, &cur_set);
7586             if (!ret) {
7587                 host_to_target_old_sigset(&target_set, &cur_set);
7588                 ret = target_set;
7589             }
7590         }
7591         return ret;
7592 #endif
7593 #ifdef TARGET_NR_ssetmask /* not on alpha */
7594     case TARGET_NR_ssetmask:
7595         {
7596             sigset_t set, oset;
7597             abi_ulong target_set = arg1;
7598             target_to_host_old_sigset(&set, &target_set);
7599             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7600             if (!ret) {
7601                 host_to_target_old_sigset(&target_set, &oset);
7602                 ret = target_set;
7603             }
7604         }
7605         return ret;
7606 #endif
7607 #ifdef TARGET_NR_sigprocmask
7608     case TARGET_NR_sigprocmask:
7609         {
7610 #if defined(TARGET_ALPHA)
7611             sigset_t set, oldset;
7612             abi_ulong mask;
7613             int how;
7614 
7615             switch (arg1) {
7616             case TARGET_SIG_BLOCK:
7617                 how = SIG_BLOCK;
7618                 break;
7619             case TARGET_SIG_UNBLOCK:
7620                 how = SIG_UNBLOCK;
7621                 break;
7622             case TARGET_SIG_SETMASK:
7623                 how = SIG_SETMASK;
7624                 break;
7625             default:
7626                 return -TARGET_EINVAL;
7627             }
7628             mask = arg2;
7629             target_to_host_old_sigset(&set, &mask);
7630 
7631             ret = do_sigprocmask(how, &set, &oldset);
7632             if (!is_error(ret)) {
7633                 host_to_target_old_sigset(&mask, &oldset);
7634                 ret = mask;
7635                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7636             }
7637 #else
7638             sigset_t set, oldset, *set_ptr;
7639             int how;
7640 
7641             if (arg2) {
7642                 switch (arg1) {
7643                 case TARGET_SIG_BLOCK:
7644                     how = SIG_BLOCK;
7645                     break;
7646                 case TARGET_SIG_UNBLOCK:
7647                     how = SIG_UNBLOCK;
7648                     break;
7649                 case TARGET_SIG_SETMASK:
7650                     how = SIG_SETMASK;
7651                     break;
7652                 default:
7653                     return -TARGET_EINVAL;
7654                 }
7655                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7656                     return -TARGET_EFAULT;
7657                 target_to_host_old_sigset(&set, p);
7658                 unlock_user(p, arg2, 0);
7659                 set_ptr = &set;
7660             } else {
7661                 how = 0;
7662                 set_ptr = NULL;
7663             }
7664             ret = do_sigprocmask(how, set_ptr, &oldset);
7665             if (!is_error(ret) && arg3) {
7666                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7667                     return -TARGET_EFAULT;
7668                 host_to_target_old_sigset(p, &oldset);
7669                 unlock_user(p, arg3, sizeof(target_sigset_t));
7670             }
7671 #endif
7672         }
7673         return ret;
7674 #endif
7675     case TARGET_NR_rt_sigprocmask:
7676         {
7677             int how = arg1;
7678             sigset_t set, oldset, *set_ptr;
7679 
7680             if (arg4 != sizeof(target_sigset_t)) {
7681                 return -TARGET_EINVAL;
7682             }
7683 
7684             if (arg2) {
7685                 switch(how) {
7686                 case TARGET_SIG_BLOCK:
7687                     how = SIG_BLOCK;
7688                     break;
7689                 case TARGET_SIG_UNBLOCK:
7690                     how = SIG_UNBLOCK;
7691                     break;
7692                 case TARGET_SIG_SETMASK:
7693                     how = SIG_SETMASK;
7694                     break;
7695                 default:
7696                     return -TARGET_EINVAL;
7697                 }
7698                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7699                     return -TARGET_EFAULT;
7700                 target_to_host_sigset(&set, p);
7701                 unlock_user(p, arg2, 0);
7702                 set_ptr = &set;
7703             } else {
7704                 how = 0;
7705                 set_ptr = NULL;
7706             }
7707             ret = do_sigprocmask(how, set_ptr, &oldset);
7708             if (!is_error(ret) && arg3) {
7709                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7710                     return -TARGET_EFAULT;
7711                 host_to_target_sigset(p, &oldset);
7712                 unlock_user(p, arg3, sizeof(target_sigset_t));
7713             }
7714         }
7715         return ret;
7716 #ifdef TARGET_NR_sigpending
7717     case TARGET_NR_sigpending:
7718         {
7719             sigset_t set;
7720             ret = get_errno(sigpending(&set));
7721             if (!is_error(ret)) {
7722                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7723                     return -TARGET_EFAULT;
7724                 host_to_target_old_sigset(p, &set);
7725                 unlock_user(p, arg1, sizeof(target_sigset_t));
7726             }
7727         }
7728         return ret;
7729 #endif
7730     case TARGET_NR_rt_sigpending:
7731         {
7732             sigset_t set;
7733 
7734             /* Yes, this check is >, not != like most. We follow the kernel's
7735              * logic and it does it like this because it implements
7736              * NR_sigpending through the same code path, and in that case
7737              * the old_sigset_t is smaller in size.
7738              */
7739             if (arg2 > sizeof(target_sigset_t)) {
7740                 return -TARGET_EINVAL;
7741             }
7742 
7743             ret = get_errno(sigpending(&set));
7744             if (!is_error(ret)) {
7745                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7746                     return -TARGET_EFAULT;
7747                 host_to_target_sigset(p, &set);
7748                 unlock_user(p, arg1, sizeof(target_sigset_t));
7749             }
7750         }
7751         return ret;
7752 #ifdef TARGET_NR_sigsuspend
7753     case TARGET_NR_sigsuspend:
7754         {
7755             TaskState *ts = cpu->opaque;
7756 #if defined(TARGET_ALPHA)
7757             abi_ulong mask = arg1;
7758             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7759 #else
7760             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7761                 return -TARGET_EFAULT;
7762             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7763             unlock_user(p, arg1, 0);
7764 #endif
7765             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7766                                                SIGSET_T_SIZE));
7767             if (ret != -TARGET_ERESTARTSYS) {
7768                 ts->in_sigsuspend = 1;
7769             }
7770         }
7771         return ret;
7772 #endif
7773     case TARGET_NR_rt_sigsuspend:
7774         {
7775             TaskState *ts = cpu->opaque;
7776 
7777             if (arg2 != sizeof(target_sigset_t)) {
7778                 return -TARGET_EINVAL;
7779             }
7780             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7781                 return -TARGET_EFAULT;
7782             target_to_host_sigset(&ts->sigsuspend_mask, p);
7783             unlock_user(p, arg1, 0);
7784             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7785                                                SIGSET_T_SIZE));
7786             if (ret != -TARGET_ERESTARTSYS) {
7787                 ts->in_sigsuspend = 1;
7788             }
7789         }
7790         return ret;
7791     case TARGET_NR_rt_sigtimedwait:
7792         {
7793             sigset_t set;
7794             struct timespec uts, *puts;
7795             siginfo_t uinfo;
7796 
7797             if (arg4 != sizeof(target_sigset_t)) {
7798                 return -TARGET_EINVAL;
7799             }
7800 
7801             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7802                 return -TARGET_EFAULT;
7803             target_to_host_sigset(&set, p);
7804             unlock_user(p, arg1, 0);
7805             if (arg3) {
7806                 puts = &uts;
7807                 target_to_host_timespec(puts, arg3);
7808             } else {
7809                 puts = NULL;
7810             }
7811             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7812                                                  SIGSET_T_SIZE));
7813             if (!is_error(ret)) {
7814                 if (arg2) {
7815                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7816                                   0);
7817                     if (!p) {
7818                         return -TARGET_EFAULT;
7819                     }
7820                     host_to_target_siginfo(p, &uinfo);
7821                     unlock_user(p, arg2, sizeof(target_siginfo_t));
7822                 }
7823                 ret = host_to_target_signal(ret);
7824             }
7825         }
7826         return ret;
7827     case TARGET_NR_rt_sigqueueinfo:
7828         {
7829             siginfo_t uinfo;
7830 
7831             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
7832             if (!p) {
7833                 return -TARGET_EFAULT;
7834             }
7835             target_to_host_siginfo(&uinfo, p);
7836             unlock_user(p, arg3, 0);
7837             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7838         }
7839         return ret;
7840     case TARGET_NR_rt_tgsigqueueinfo:
7841         {
7842             siginfo_t uinfo;
7843 
7844             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
7845             if (!p) {
7846                 return -TARGET_EFAULT;
7847             }
7848             target_to_host_siginfo(&uinfo, p);
7849             unlock_user(p, arg4, 0);
7850             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
7851         }
7852         return ret;
7853 #ifdef TARGET_NR_sigreturn
7854     case TARGET_NR_sigreturn:
7855         if (block_signals()) {
7856             return -TARGET_ERESTARTSYS;
7857         }
7858         return do_sigreturn(cpu_env);
7859 #endif
7860     case TARGET_NR_rt_sigreturn:
7861         if (block_signals()) {
7862             return -TARGET_ERESTARTSYS;
7863         }
7864         return do_rt_sigreturn(cpu_env);
7865     case TARGET_NR_sethostname:
7866         if (!(p = lock_user_string(arg1)))
7867             return -TARGET_EFAULT;
7868         ret = get_errno(sethostname(p, arg2));
7869         unlock_user(p, arg1, 0);
7870         return ret;
7871 #ifdef TARGET_NR_setrlimit
7872     case TARGET_NR_setrlimit:
7873         {
7874             int resource = target_to_host_resource(arg1);
7875             struct target_rlimit *target_rlim;
7876             struct rlimit rlim;
7877             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7878                 return -TARGET_EFAULT;
7879             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7880             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7881             unlock_user_struct(target_rlim, arg2, 0);
7882             /*
7883              * If we just passed through resource limit settings for memory then
7884              * they would also apply to QEMU's own allocations, and QEMU will
7885              * crash or hang or die if its allocations fail. Ideally we would
7886              * track the guest allocations in QEMU and apply the limits ourselves.
7887              * For now, just tell the guest the call succeeded but don't actually
7888              * limit anything.
7889              */
7890             if (resource != RLIMIT_AS &&
7891                 resource != RLIMIT_DATA &&
7892                 resource != RLIMIT_STACK) {
7893                 return get_errno(setrlimit(resource, &rlim));
7894             } else {
7895                 return 0;
7896             }
7897         }
7898 #endif
7899 #ifdef TARGET_NR_getrlimit
7900     case TARGET_NR_getrlimit:
7901         {
7902             int resource = target_to_host_resource(arg1);
7903             struct target_rlimit *target_rlim;
7904             struct rlimit rlim;
7905 
7906             ret = get_errno(getrlimit(resource, &rlim));
7907             if (!is_error(ret)) {
7908                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7909                     return -TARGET_EFAULT;
7910                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7911                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7912                 unlock_user_struct(target_rlim, arg2, 1);
7913             }
7914         }
7915         return ret;
7916 #endif
7917     case TARGET_NR_getrusage:
7918         {
7919             struct rusage rusage;
7920             ret = get_errno(getrusage(arg1, &rusage));
7921             if (!is_error(ret)) {
7922                 ret = host_to_target_rusage(arg2, &rusage);
7923             }
7924         }
7925         return ret;
7926     case TARGET_NR_gettimeofday:
7927         {
7928             struct timeval tv;
7929             ret = get_errno(gettimeofday(&tv, NULL));
7930             if (!is_error(ret)) {
7931                 if (copy_to_user_timeval(arg1, &tv))
7932                     return -TARGET_EFAULT;
7933             }
7934         }
7935         return ret;
7936     case TARGET_NR_settimeofday:
7937         {
7938             struct timeval tv, *ptv = NULL;
7939             struct timezone tz, *ptz = NULL;
7940 
7941             if (arg1) {
7942                 if (copy_from_user_timeval(&tv, arg1)) {
7943                     return -TARGET_EFAULT;
7944                 }
7945                 ptv = &tv;
7946             }
7947 
7948             if (arg2) {
7949                 if (copy_from_user_timezone(&tz, arg2)) {
7950                     return -TARGET_EFAULT;
7951                 }
7952                 ptz = &tz;
7953             }
7954 
7955             return get_errno(settimeofday(ptv, ptz));
7956         }
7957 #if defined(TARGET_NR_select)
7958     case TARGET_NR_select:
7959 #if defined(TARGET_WANT_NI_OLD_SELECT)
7960         /* some architectures used to have old_select here
7961          * but now ENOSYS it.
7962          */
7963         ret = -TARGET_ENOSYS;
7964 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
7965         ret = do_old_select(arg1);
7966 #else
7967         ret = do_select(arg1, arg2, arg3, arg4, arg5);
7968 #endif
7969         return ret;
7970 #endif
7971 #ifdef TARGET_NR_pselect6
7972     case TARGET_NR_pselect6:
7973         {
7974             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7975             fd_set rfds, wfds, efds;
7976             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7977             struct timespec ts, *ts_ptr;
7978 
7979             /*
7980              * The 6th arg is actually two args smashed together,
7981              * so we cannot use the C library.
7982              */
7983             sigset_t set;
7984             struct {
7985                 sigset_t *set;
7986                 size_t size;
7987             } sig, *sig_ptr;
7988 
7989             abi_ulong arg_sigset, arg_sigsize, *arg7;
7990             target_sigset_t *target_sigset;
7991 
7992             n = arg1;
7993             rfd_addr = arg2;
7994             wfd_addr = arg3;
7995             efd_addr = arg4;
7996             ts_addr = arg5;
7997 
7998             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7999             if (ret) {
8000                 return ret;
8001             }
8002             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8003             if (ret) {
8004                 return ret;
8005             }
8006             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8007             if (ret) {
8008                 return ret;
8009             }
8010 
8011             /*
8012              * This takes a timespec, and not a timeval, so we cannot
8013              * use the do_select() helper ...
8014              */
8015             if (ts_addr) {
8016                 if (target_to_host_timespec(&ts, ts_addr)) {
8017                     return -TARGET_EFAULT;
8018                 }
8019                 ts_ptr = &ts;
8020             } else {
8021                 ts_ptr = NULL;
8022             }
8023 
8024             /* Extract the two packed args for the sigset */
8025             if (arg6) {
8026                 sig_ptr = &sig;
8027                 sig.size = SIGSET_T_SIZE;
8028 
8029                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8030                 if (!arg7) {
8031                     return -TARGET_EFAULT;
8032                 }
8033                 arg_sigset = tswapal(arg7[0]);
8034                 arg_sigsize = tswapal(arg7[1]);
8035                 unlock_user(arg7, arg6, 0);
8036 
8037                 if (arg_sigset) {
8038                     sig.set = &set;
8039                     if (arg_sigsize != sizeof(*target_sigset)) {
8040                         /* Like the kernel, we enforce correct size sigsets */
8041                         return -TARGET_EINVAL;
8042                     }
8043                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8044                                               sizeof(*target_sigset), 1);
8045                     if (!target_sigset) {
8046                         return -TARGET_EFAULT;
8047                     }
8048                     target_to_host_sigset(&set, target_sigset);
8049                     unlock_user(target_sigset, arg_sigset, 0);
8050                 } else {
8051                     sig.set = NULL;
8052                 }
8053             } else {
8054                 sig_ptr = NULL;
8055             }
8056 
8057             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8058                                           ts_ptr, sig_ptr));
8059 
8060             if (!is_error(ret)) {
8061                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8062                     return -TARGET_EFAULT;
8063                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8064                     return -TARGET_EFAULT;
8065                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8066                     return -TARGET_EFAULT;
8067 
8068                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8069                     return -TARGET_EFAULT;
8070             }
8071         }
8072         return ret;
8073 #endif
8074 #ifdef TARGET_NR_symlink
8075     case TARGET_NR_symlink:
8076         {
8077             void *p2;
8078             p = lock_user_string(arg1);
8079             p2 = lock_user_string(arg2);
8080             if (!p || !p2)
8081                 ret = -TARGET_EFAULT;
8082             else
8083                 ret = get_errno(symlink(p, p2));
8084             unlock_user(p2, arg2, 0);
8085             unlock_user(p, arg1, 0);
8086         }
8087         return ret;
8088 #endif
8089 #if defined(TARGET_NR_symlinkat)
8090     case TARGET_NR_symlinkat:
8091         {
8092             void *p2;
8093             p  = lock_user_string(arg1);
8094             p2 = lock_user_string(arg3);
8095             if (!p || !p2)
8096                 ret = -TARGET_EFAULT;
8097             else
8098                 ret = get_errno(symlinkat(p, arg2, p2));
8099             unlock_user(p2, arg3, 0);
8100             unlock_user(p, arg1, 0);
8101         }
8102         return ret;
8103 #endif
8104 #ifdef TARGET_NR_readlink
8105     case TARGET_NR_readlink:
8106         {
8107             void *p2;
8108             p = lock_user_string(arg1);
8109             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8110             if (!p || !p2) {
8111                 ret = -TARGET_EFAULT;
8112             } else if (!arg3) {
8113                 /* Short circuit this for the magic exe check. */
8114                 ret = -TARGET_EINVAL;
8115             } else if (is_proc_myself((const char *)p, "exe")) {
8116                 char real[PATH_MAX], *temp;
8117                 temp = realpath(exec_path, real);
8118                 /* Return value is # of bytes that we wrote to the buffer. */
8119                 if (temp == NULL) {
8120                     ret = get_errno(-1);
8121                 } else {
8122                     /* Don't worry about sign mismatch as earlier mapping
8123                      * logic would have thrown a bad address error. */
8124                     ret = MIN(strlen(real), arg3);
8125                     /* We cannot NUL terminate the string. */
8126                     memcpy(p2, real, ret);
8127                 }
8128             } else {
8129                 ret = get_errno(readlink(path(p), p2, arg3));
8130             }
8131             unlock_user(p2, arg2, ret);
8132             unlock_user(p, arg1, 0);
8133         }
8134         return ret;
8135 #endif
8136 #if defined(TARGET_NR_readlinkat)
8137     case TARGET_NR_readlinkat:
8138         {
8139             void *p2;
8140             p  = lock_user_string(arg2);
8141             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8142             if (!p || !p2) {
8143                 ret = -TARGET_EFAULT;
8144             } else if (is_proc_myself((const char *)p, "exe")) {
8145                 char real[PATH_MAX], *temp;
8146                 temp = realpath(exec_path, real);
8147                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8148                 snprintf((char *)p2, arg4, "%s", real);
8149             } else {
8150                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8151             }
8152             unlock_user(p2, arg3, ret);
8153             unlock_user(p, arg2, 0);
8154         }
8155         return ret;
8156 #endif
8157 #ifdef TARGET_NR_swapon
8158     case TARGET_NR_swapon:
8159         if (!(p = lock_user_string(arg1)))
8160             return -TARGET_EFAULT;
8161         ret = get_errno(swapon(p, arg2));
8162         unlock_user(p, arg1, 0);
8163         return ret;
8164 #endif
8165     case TARGET_NR_reboot:
8166         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8167            /* arg4 must be ignored in all other cases */
8168            p = lock_user_string(arg4);
8169            if (!p) {
8170                return -TARGET_EFAULT;
8171            }
8172            ret = get_errno(reboot(arg1, arg2, arg3, p));
8173            unlock_user(p, arg4, 0);
8174         } else {
8175            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8176         }
8177         return ret;
8178 #ifdef TARGET_NR_mmap
8179     case TARGET_NR_mmap:
8180 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8181     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8182     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8183     || defined(TARGET_S390X)
8184         {
8185             abi_ulong *v;
8186             abi_ulong v1, v2, v3, v4, v5, v6;
8187             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8188                 return -TARGET_EFAULT;
8189             v1 = tswapal(v[0]);
8190             v2 = tswapal(v[1]);
8191             v3 = tswapal(v[2]);
8192             v4 = tswapal(v[3]);
8193             v5 = tswapal(v[4]);
8194             v6 = tswapal(v[5]);
8195             unlock_user(v, arg1, 0);
8196             ret = get_errno(target_mmap(v1, v2, v3,
8197                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8198                                         v5, v6));
8199         }
8200 #else
8201         ret = get_errno(target_mmap(arg1, arg2, arg3,
8202                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8203                                     arg5,
8204                                     arg6));
8205 #endif
8206         return ret;
8207 #endif
8208 #ifdef TARGET_NR_mmap2
8209     case TARGET_NR_mmap2:
8210 #ifndef MMAP_SHIFT
8211 #define MMAP_SHIFT 12
8212 #endif
8213         ret = target_mmap(arg1, arg2, arg3,
8214                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8215                           arg5, arg6 << MMAP_SHIFT);
8216         return get_errno(ret);
8217 #endif
8218     case TARGET_NR_munmap:
8219         return get_errno(target_munmap(arg1, arg2));
8220     case TARGET_NR_mprotect:
8221         {
8222             TaskState *ts = cpu->opaque;
8223             /* Special hack to detect libc making the stack executable.  */
8224             if ((arg3 & PROT_GROWSDOWN)
8225                 && arg1 >= ts->info->stack_limit
8226                 && arg1 <= ts->info->start_stack) {
8227                 arg3 &= ~PROT_GROWSDOWN;
8228                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8229                 arg1 = ts->info->stack_limit;
8230             }
8231         }
8232         return get_errno(target_mprotect(arg1, arg2, arg3));
8233 #ifdef TARGET_NR_mremap
8234     case TARGET_NR_mremap:
8235         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8236 #endif
8237         /* ??? msync/mlock/munlock are broken for softmmu.  */
8238 #ifdef TARGET_NR_msync
8239     case TARGET_NR_msync:
8240         return get_errno(msync(g2h(arg1), arg2, arg3));
8241 #endif
8242 #ifdef TARGET_NR_mlock
8243     case TARGET_NR_mlock:
8244         return get_errno(mlock(g2h(arg1), arg2));
8245 #endif
8246 #ifdef TARGET_NR_munlock
8247     case TARGET_NR_munlock:
8248         return get_errno(munlock(g2h(arg1), arg2));
8249 #endif
8250 #ifdef TARGET_NR_mlockall
8251     case TARGET_NR_mlockall:
8252         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8253 #endif
8254 #ifdef TARGET_NR_munlockall
8255     case TARGET_NR_munlockall:
8256         return get_errno(munlockall());
8257 #endif
8258 #ifdef TARGET_NR_truncate
8259     case TARGET_NR_truncate:
8260         if (!(p = lock_user_string(arg1)))
8261             return -TARGET_EFAULT;
8262         ret = get_errno(truncate(p, arg2));
8263         unlock_user(p, arg1, 0);
8264         return ret;
8265 #endif
8266 #ifdef TARGET_NR_ftruncate
8267     case TARGET_NR_ftruncate:
8268         return get_errno(ftruncate(arg1, arg2));
8269 #endif
8270     case TARGET_NR_fchmod:
8271         return get_errno(fchmod(arg1, arg2));
8272 #if defined(TARGET_NR_fchmodat)
8273     case TARGET_NR_fchmodat:
8274         if (!(p = lock_user_string(arg2)))
8275             return -TARGET_EFAULT;
8276         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8277         unlock_user(p, arg2, 0);
8278         return ret;
8279 #endif
8280     case TARGET_NR_getpriority:
8281         /* Note that negative values are valid for getpriority, so we must
8282            differentiate based on errno settings.  */
8283         errno = 0;
8284         ret = getpriority(arg1, arg2);
8285         if (ret == -1 && errno != 0) {
8286             return -host_to_target_errno(errno);
8287         }
8288 #ifdef TARGET_ALPHA
8289         /* Return value is the unbiased priority.  Signal no error.  */
8290         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8291 #else
8292         /* Return value is a biased priority to avoid negative numbers.  */
8293         ret = 20 - ret;
8294 #endif
8295         return ret;
8296     case TARGET_NR_setpriority:
8297         return get_errno(setpriority(arg1, arg2, arg3));
8298 #ifdef TARGET_NR_statfs
8299     case TARGET_NR_statfs:
8300         if (!(p = lock_user_string(arg1))) {
8301             return -TARGET_EFAULT;
8302         }
8303         ret = get_errno(statfs(path(p), &stfs));
8304         unlock_user(p, arg1, 0);
8305     convert_statfs:
8306         if (!is_error(ret)) {
8307             struct target_statfs *target_stfs;
8308 
8309             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8310                 return -TARGET_EFAULT;
8311             __put_user(stfs.f_type, &target_stfs->f_type);
8312             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8313             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8314             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8315             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8316             __put_user(stfs.f_files, &target_stfs->f_files);
8317             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8318             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8319             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8320             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8321             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8322 #ifdef _STATFS_F_FLAGS
8323             __put_user(stfs.f_flags, &target_stfs->f_flags);
8324 #else
8325             __put_user(0, &target_stfs->f_flags);
8326 #endif
8327             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8328             unlock_user_struct(target_stfs, arg2, 1);
8329         }
8330         return ret;
8331 #endif
8332 #ifdef TARGET_NR_fstatfs
8333     case TARGET_NR_fstatfs:
8334         ret = get_errno(fstatfs(arg1, &stfs));
8335         goto convert_statfs;
8336 #endif
8337 #ifdef TARGET_NR_statfs64
8338     case TARGET_NR_statfs64:
8339         if (!(p = lock_user_string(arg1))) {
8340             return -TARGET_EFAULT;
8341         }
8342         ret = get_errno(statfs(path(p), &stfs));
8343         unlock_user(p, arg1, 0);
8344     convert_statfs64:
8345         if (!is_error(ret)) {
8346             struct target_statfs64 *target_stfs;
8347 
8348             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8349                 return -TARGET_EFAULT;
8350             __put_user(stfs.f_type, &target_stfs->f_type);
8351             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8352             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8353             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8354             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8355             __put_user(stfs.f_files, &target_stfs->f_files);
8356             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8357             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8358             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8359             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8360             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8361             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8362             unlock_user_struct(target_stfs, arg3, 1);
8363         }
8364         return ret;
8365     case TARGET_NR_fstatfs64:
8366         ret = get_errno(fstatfs(arg1, &stfs));
8367         goto convert_statfs64;
8368 #endif
8369 #ifdef TARGET_NR_socketcall
8370     case TARGET_NR_socketcall:
8371         return do_socketcall(arg1, arg2);
8372 #endif
8373 #ifdef TARGET_NR_accept
8374     case TARGET_NR_accept:
8375         return do_accept4(arg1, arg2, arg3, 0);
8376 #endif
8377 #ifdef TARGET_NR_accept4
8378     case TARGET_NR_accept4:
8379         return do_accept4(arg1, arg2, arg3, arg4);
8380 #endif
8381 #ifdef TARGET_NR_bind
8382     case TARGET_NR_bind:
8383         return do_bind(arg1, arg2, arg3);
8384 #endif
8385 #ifdef TARGET_NR_connect
8386     case TARGET_NR_connect:
8387         return do_connect(arg1, arg2, arg3);
8388 #endif
8389 #ifdef TARGET_NR_getpeername
8390     case TARGET_NR_getpeername:
8391         return do_getpeername(arg1, arg2, arg3);
8392 #endif
8393 #ifdef TARGET_NR_getsockname
8394     case TARGET_NR_getsockname:
8395         return do_getsockname(arg1, arg2, arg3);
8396 #endif
8397 #ifdef TARGET_NR_getsockopt
8398     case TARGET_NR_getsockopt:
8399         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8400 #endif
8401 #ifdef TARGET_NR_listen
8402     case TARGET_NR_listen:
8403         return get_errno(listen(arg1, arg2));
8404 #endif
8405 #ifdef TARGET_NR_recv
8406     case TARGET_NR_recv:
8407         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8408 #endif
8409 #ifdef TARGET_NR_recvfrom
8410     case TARGET_NR_recvfrom:
8411         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8412 #endif
8413 #ifdef TARGET_NR_recvmsg
8414     case TARGET_NR_recvmsg:
8415         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8416 #endif
8417 #ifdef TARGET_NR_send
8418     case TARGET_NR_send:
8419         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8420 #endif
8421 #ifdef TARGET_NR_sendmsg
8422     case TARGET_NR_sendmsg:
8423         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8424 #endif
8425 #ifdef TARGET_NR_sendmmsg
8426     case TARGET_NR_sendmmsg:
8427         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8428     case TARGET_NR_recvmmsg:
8429         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8430 #endif
8431 #ifdef TARGET_NR_sendto
8432     case TARGET_NR_sendto:
8433         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8434 #endif
8435 #ifdef TARGET_NR_shutdown
8436     case TARGET_NR_shutdown:
8437         return get_errno(shutdown(arg1, arg2));
8438 #endif
8439 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8440     case TARGET_NR_getrandom:
8441         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8442         if (!p) {
8443             return -TARGET_EFAULT;
8444         }
8445         ret = get_errno(getrandom(p, arg2, arg3));
8446         unlock_user(p, arg1, ret);
8447         return ret;
8448 #endif
8449 #ifdef TARGET_NR_socket
8450     case TARGET_NR_socket:
8451         return do_socket(arg1, arg2, arg3);
8452 #endif
8453 #ifdef TARGET_NR_socketpair
8454     case TARGET_NR_socketpair:
8455         return do_socketpair(arg1, arg2, arg3, arg4);
8456 #endif
8457 #ifdef TARGET_NR_setsockopt
8458     case TARGET_NR_setsockopt:
8459         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8460 #endif
8461 #if defined(TARGET_NR_syslog)
8462     case TARGET_NR_syslog:
8463         {
8464             int len = arg2;
8465 
8466             switch (arg1) {
8467             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8468             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8469             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8470             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8471             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8472             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8473             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8474             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8475                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8476             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8477             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8478             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8479                 {
8480                     if (len < 0) {
8481                         return -TARGET_EINVAL;
8482                     }
8483                     if (len == 0) {
8484                         return 0;
8485                     }
8486                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8487                     if (!p) {
8488                         return -TARGET_EFAULT;
8489                     }
8490                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8491                     unlock_user(p, arg2, arg3);
8492                 }
8493                 return ret;
8494             default:
8495                 return -TARGET_EINVAL;
8496             }
8497         }
8498         break;
8499 #endif
8500     case TARGET_NR_setitimer:
8501         {
8502             struct itimerval value, ovalue, *pvalue;
8503 
8504             if (arg2) {
8505                 pvalue = &value;
8506                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8507                     || copy_from_user_timeval(&pvalue->it_value,
8508                                               arg2 + sizeof(struct target_timeval)))
8509                     return -TARGET_EFAULT;
8510             } else {
8511                 pvalue = NULL;
8512             }
8513             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8514             if (!is_error(ret) && arg3) {
8515                 if (copy_to_user_timeval(arg3,
8516                                          &ovalue.it_interval)
8517                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8518                                             &ovalue.it_value))
8519                     return -TARGET_EFAULT;
8520             }
8521         }
8522         return ret;
8523     case TARGET_NR_getitimer:
8524         {
8525             struct itimerval value;
8526 
8527             ret = get_errno(getitimer(arg1, &value));
8528             if (!is_error(ret) && arg2) {
8529                 if (copy_to_user_timeval(arg2,
8530                                          &value.it_interval)
8531                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8532                                             &value.it_value))
8533                     return -TARGET_EFAULT;
8534             }
8535         }
8536         return ret;
8537 #ifdef TARGET_NR_stat
8538     case TARGET_NR_stat:
8539         if (!(p = lock_user_string(arg1))) {
8540             return -TARGET_EFAULT;
8541         }
8542         ret = get_errno(stat(path(p), &st));
8543         unlock_user(p, arg1, 0);
8544         goto do_stat;
8545 #endif
8546 #ifdef TARGET_NR_lstat
8547     case TARGET_NR_lstat:
8548         if (!(p = lock_user_string(arg1))) {
8549             return -TARGET_EFAULT;
8550         }
8551         ret = get_errno(lstat(path(p), &st));
8552         unlock_user(p, arg1, 0);
8553         goto do_stat;
8554 #endif
8555 #ifdef TARGET_NR_fstat
8556     case TARGET_NR_fstat:
8557         {
8558             ret = get_errno(fstat(arg1, &st));
8559 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8560         do_stat:
8561 #endif
8562             if (!is_error(ret)) {
8563                 struct target_stat *target_st;
8564 
8565                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8566                     return -TARGET_EFAULT;
8567                 memset(target_st, 0, sizeof(*target_st));
8568                 __put_user(st.st_dev, &target_st->st_dev);
8569                 __put_user(st.st_ino, &target_st->st_ino);
8570                 __put_user(st.st_mode, &target_st->st_mode);
8571                 __put_user(st.st_uid, &target_st->st_uid);
8572                 __put_user(st.st_gid, &target_st->st_gid);
8573                 __put_user(st.st_nlink, &target_st->st_nlink);
8574                 __put_user(st.st_rdev, &target_st->st_rdev);
8575                 __put_user(st.st_size, &target_st->st_size);
8576                 __put_user(st.st_blksize, &target_st->st_blksize);
8577                 __put_user(st.st_blocks, &target_st->st_blocks);
8578                 __put_user(st.st_atime, &target_st->target_st_atime);
8579                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8580                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8581                 unlock_user_struct(target_st, arg2, 1);
8582             }
8583         }
8584         return ret;
8585 #endif
8586     case TARGET_NR_vhangup:
8587         return get_errno(vhangup());
8588 #ifdef TARGET_NR_syscall
8589     case TARGET_NR_syscall:
8590         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8591                           arg6, arg7, arg8, 0);
8592 #endif
8593     case TARGET_NR_wait4:
8594         {
8595             int status;
8596             abi_long status_ptr = arg2;
8597             struct rusage rusage, *rusage_ptr;
8598             abi_ulong target_rusage = arg4;
8599             abi_long rusage_err;
8600             if (target_rusage)
8601                 rusage_ptr = &rusage;
8602             else
8603                 rusage_ptr = NULL;
8604             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8605             if (!is_error(ret)) {
8606                 if (status_ptr && ret) {
8607                     status = host_to_target_waitstatus(status);
8608                     if (put_user_s32(status, status_ptr))
8609                         return -TARGET_EFAULT;
8610                 }
8611                 if (target_rusage) {
8612                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8613                     if (rusage_err) {
8614                         ret = rusage_err;
8615                     }
8616                 }
8617             }
8618         }
8619         return ret;
8620 #ifdef TARGET_NR_swapoff
8621     case TARGET_NR_swapoff:
8622         if (!(p = lock_user_string(arg1)))
8623             return -TARGET_EFAULT;
8624         ret = get_errno(swapoff(p));
8625         unlock_user(p, arg1, 0);
8626         return ret;
8627 #endif
8628     case TARGET_NR_sysinfo:
8629         {
8630             struct target_sysinfo *target_value;
8631             struct sysinfo value;
8632             ret = get_errno(sysinfo(&value));
8633             if (!is_error(ret) && arg1)
8634             {
8635                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8636                     return -TARGET_EFAULT;
8637                 __put_user(value.uptime, &target_value->uptime);
8638                 __put_user(value.loads[0], &target_value->loads[0]);
8639                 __put_user(value.loads[1], &target_value->loads[1]);
8640                 __put_user(value.loads[2], &target_value->loads[2]);
8641                 __put_user(value.totalram, &target_value->totalram);
8642                 __put_user(value.freeram, &target_value->freeram);
8643                 __put_user(value.sharedram, &target_value->sharedram);
8644                 __put_user(value.bufferram, &target_value->bufferram);
8645                 __put_user(value.totalswap, &target_value->totalswap);
8646                 __put_user(value.freeswap, &target_value->freeswap);
8647                 __put_user(value.procs, &target_value->procs);
8648                 __put_user(value.totalhigh, &target_value->totalhigh);
8649                 __put_user(value.freehigh, &target_value->freehigh);
8650                 __put_user(value.mem_unit, &target_value->mem_unit);
8651                 unlock_user_struct(target_value, arg1, 1);
8652             }
8653         }
8654         return ret;
8655 #ifdef TARGET_NR_ipc
8656     case TARGET_NR_ipc:
8657         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8658 #endif
8659 #ifdef TARGET_NR_semget
8660     case TARGET_NR_semget:
8661         return get_errno(semget(arg1, arg2, arg3));
8662 #endif
8663 #ifdef TARGET_NR_semop
8664     case TARGET_NR_semop:
8665         return do_semop(arg1, arg2, arg3);
8666 #endif
8667 #ifdef TARGET_NR_semctl
8668     case TARGET_NR_semctl:
8669         return do_semctl(arg1, arg2, arg3, arg4);
8670 #endif
8671 #ifdef TARGET_NR_msgctl
8672     case TARGET_NR_msgctl:
8673         return do_msgctl(arg1, arg2, arg3);
8674 #endif
8675 #ifdef TARGET_NR_msgget
8676     case TARGET_NR_msgget:
8677         return get_errno(msgget(arg1, arg2));
8678 #endif
8679 #ifdef TARGET_NR_msgrcv
8680     case TARGET_NR_msgrcv:
8681         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8682 #endif
8683 #ifdef TARGET_NR_msgsnd
8684     case TARGET_NR_msgsnd:
8685         return do_msgsnd(arg1, arg2, arg3, arg4);
8686 #endif
8687 #ifdef TARGET_NR_shmget
8688     case TARGET_NR_shmget:
8689         return get_errno(shmget(arg1, arg2, arg3));
8690 #endif
8691 #ifdef TARGET_NR_shmctl
8692     case TARGET_NR_shmctl:
8693         return do_shmctl(arg1, arg2, arg3);
8694 #endif
8695 #ifdef TARGET_NR_shmat
8696     case TARGET_NR_shmat:
8697         return do_shmat(cpu_env, arg1, arg2, arg3);
8698 #endif
8699 #ifdef TARGET_NR_shmdt
8700     case TARGET_NR_shmdt:
8701         return do_shmdt(arg1);
8702 #endif
8703     case TARGET_NR_fsync:
8704         return get_errno(fsync(arg1));
8705     case TARGET_NR_clone:
8706         /* Linux manages to have three different orderings for its
8707          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8708          * match the kernel's CONFIG_CLONE_* settings.
8709          * Microblaze is further special in that it uses a sixth
8710          * implicit argument to clone for the TLS pointer.
8711          */
8712 #if defined(TARGET_MICROBLAZE)
8713         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8714 #elif defined(TARGET_CLONE_BACKWARDS)
8715         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8716 #elif defined(TARGET_CLONE_BACKWARDS2)
8717         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8718 #else
8719         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8720 #endif
8721         return ret;
8722 #ifdef __NR_exit_group
8723         /* new thread calls */
8724     case TARGET_NR_exit_group:
8725         preexit_cleanup(cpu_env, arg1);
8726         return get_errno(exit_group(arg1));
8727 #endif
8728     case TARGET_NR_setdomainname:
8729         if (!(p = lock_user_string(arg1)))
8730             return -TARGET_EFAULT;
8731         ret = get_errno(setdomainname(p, arg2));
8732         unlock_user(p, arg1, 0);
8733         return ret;
8734     case TARGET_NR_uname:
8735         /* no need to transcode because we use the linux syscall */
8736         {
8737             struct new_utsname * buf;
8738 
8739             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8740                 return -TARGET_EFAULT;
8741             ret = get_errno(sys_uname(buf));
8742             if (!is_error(ret)) {
8743                 /* Overwrite the native machine name with whatever is being
8744                    emulated. */
8745                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8746                           sizeof(buf->machine));
8747                 /* Allow the user to override the reported release.  */
8748                 if (qemu_uname_release && *qemu_uname_release) {
8749                     g_strlcpy(buf->release, qemu_uname_release,
8750                               sizeof(buf->release));
8751                 }
8752             }
8753             unlock_user_struct(buf, arg1, 1);
8754         }
8755         return ret;
8756 #ifdef TARGET_I386
8757     case TARGET_NR_modify_ldt:
8758         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
8759 #if !defined(TARGET_X86_64)
8760     case TARGET_NR_vm86:
8761         return do_vm86(cpu_env, arg1, arg2);
8762 #endif
8763 #endif
8764     case TARGET_NR_adjtimex:
8765         {
8766             struct timex host_buf;
8767 
8768             if (target_to_host_timex(&host_buf, arg1) != 0) {
8769                 return -TARGET_EFAULT;
8770             }
8771             ret = get_errno(adjtimex(&host_buf));
8772             if (!is_error(ret)) {
8773                 if (host_to_target_timex(arg1, &host_buf) != 0) {
8774                     return -TARGET_EFAULT;
8775                 }
8776             }
8777         }
8778         return ret;
8779 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8780     case TARGET_NR_clock_adjtime:
8781         {
8782             struct timex htx, *phtx = &htx;
8783 
8784             if (target_to_host_timex(phtx, arg2) != 0) {
8785                 return -TARGET_EFAULT;
8786             }
8787             ret = get_errno(clock_adjtime(arg1, phtx));
8788             if (!is_error(ret) && phtx) {
8789                 if (host_to_target_timex(arg2, phtx) != 0) {
8790                     return -TARGET_EFAULT;
8791                 }
8792             }
8793         }
8794         return ret;
8795 #endif
8796     case TARGET_NR_getpgid:
8797         return get_errno(getpgid(arg1));
8798     case TARGET_NR_fchdir:
8799         return get_errno(fchdir(arg1));
8800     case TARGET_NR_personality:
8801         return get_errno(personality(arg1));
8802 #ifdef TARGET_NR__llseek /* Not on alpha */
8803     case TARGET_NR__llseek:
8804         {
8805             int64_t res;
8806 #if !defined(__NR_llseek)
8807             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
8808             if (res == -1) {
8809                 ret = get_errno(res);
8810             } else {
8811                 ret = 0;
8812             }
8813 #else
8814             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8815 #endif
8816             if ((ret == 0) && put_user_s64(res, arg4)) {
8817                 return -TARGET_EFAULT;
8818             }
8819         }
8820         return ret;
8821 #endif
8822 #ifdef TARGET_NR_getdents
8823     case TARGET_NR_getdents:
8824 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8825 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8826         {
8827             struct target_dirent *target_dirp;
8828             struct linux_dirent *dirp;
8829             abi_long count = arg3;
8830 
8831             dirp = g_try_malloc(count);
8832             if (!dirp) {
8833                 return -TARGET_ENOMEM;
8834             }
8835 
8836             ret = get_errno(sys_getdents(arg1, dirp, count));
8837             if (!is_error(ret)) {
8838                 struct linux_dirent *de;
8839 		struct target_dirent *tde;
8840                 int len = ret;
8841                 int reclen, treclen;
8842 		int count1, tnamelen;
8843 
8844 		count1 = 0;
8845                 de = dirp;
8846                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8847                     return -TARGET_EFAULT;
8848 		tde = target_dirp;
8849                 while (len > 0) {
8850                     reclen = de->d_reclen;
8851                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8852                     assert(tnamelen >= 0);
8853                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
8854                     assert(count1 + treclen <= count);
8855                     tde->d_reclen = tswap16(treclen);
8856                     tde->d_ino = tswapal(de->d_ino);
8857                     tde->d_off = tswapal(de->d_off);
8858                     memcpy(tde->d_name, de->d_name, tnamelen);
8859                     de = (struct linux_dirent *)((char *)de + reclen);
8860                     len -= reclen;
8861                     tde = (struct target_dirent *)((char *)tde + treclen);
8862 		    count1 += treclen;
8863                 }
8864 		ret = count1;
8865                 unlock_user(target_dirp, arg2, ret);
8866             }
8867             g_free(dirp);
8868         }
8869 #else
8870         {
8871             struct linux_dirent *dirp;
8872             abi_long count = arg3;
8873 
8874             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8875                 return -TARGET_EFAULT;
8876             ret = get_errno(sys_getdents(arg1, dirp, count));
8877             if (!is_error(ret)) {
8878                 struct linux_dirent *de;
8879                 int len = ret;
8880                 int reclen;
8881                 de = dirp;
8882                 while (len > 0) {
8883                     reclen = de->d_reclen;
8884                     if (reclen > len)
8885                         break;
8886                     de->d_reclen = tswap16(reclen);
8887                     tswapls(&de->d_ino);
8888                     tswapls(&de->d_off);
8889                     de = (struct linux_dirent *)((char *)de + reclen);
8890                     len -= reclen;
8891                 }
8892             }
8893             unlock_user(dirp, arg2, ret);
8894         }
8895 #endif
8896 #else
8897         /* Implement getdents in terms of getdents64 */
8898         {
8899             struct linux_dirent64 *dirp;
8900             abi_long count = arg3;
8901 
8902             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8903             if (!dirp) {
8904                 return -TARGET_EFAULT;
8905             }
8906             ret = get_errno(sys_getdents64(arg1, dirp, count));
8907             if (!is_error(ret)) {
8908                 /* Convert the dirent64 structs to target dirent.  We do this
8909                  * in-place, since we can guarantee that a target_dirent is no
8910                  * larger than a dirent64; however this means we have to be
8911                  * careful to read everything before writing in the new format.
8912                  */
8913                 struct linux_dirent64 *de;
8914                 struct target_dirent *tde;
8915                 int len = ret;
8916                 int tlen = 0;
8917 
8918                 de = dirp;
8919                 tde = (struct target_dirent *)dirp;
8920                 while (len > 0) {
8921                     int namelen, treclen;
8922                     int reclen = de->d_reclen;
8923                     uint64_t ino = de->d_ino;
8924                     int64_t off = de->d_off;
8925                     uint8_t type = de->d_type;
8926 
8927                     namelen = strlen(de->d_name);
8928                     treclen = offsetof(struct target_dirent, d_name)
8929                         + namelen + 2;
8930                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8931 
8932                     memmove(tde->d_name, de->d_name, namelen + 1);
8933                     tde->d_ino = tswapal(ino);
8934                     tde->d_off = tswapal(off);
8935                     tde->d_reclen = tswap16(treclen);
8936                     /* The target_dirent type is in what was formerly a padding
8937                      * byte at the end of the structure:
8938                      */
8939                     *(((char *)tde) + treclen - 1) = type;
8940 
8941                     de = (struct linux_dirent64 *)((char *)de + reclen);
8942                     tde = (struct target_dirent *)((char *)tde + treclen);
8943                     len -= reclen;
8944                     tlen += treclen;
8945                 }
8946                 ret = tlen;
8947             }
8948             unlock_user(dirp, arg2, ret);
8949         }
8950 #endif
8951         return ret;
8952 #endif /* TARGET_NR_getdents */
8953 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8954     case TARGET_NR_getdents64:
8955         {
8956             struct linux_dirent64 *dirp;
8957             abi_long count = arg3;
8958             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8959                 return -TARGET_EFAULT;
8960             ret = get_errno(sys_getdents64(arg1, dirp, count));
8961             if (!is_error(ret)) {
8962                 struct linux_dirent64 *de;
8963                 int len = ret;
8964                 int reclen;
8965                 de = dirp;
8966                 while (len > 0) {
8967                     reclen = de->d_reclen;
8968                     if (reclen > len)
8969                         break;
8970                     de->d_reclen = tswap16(reclen);
8971                     tswap64s((uint64_t *)&de->d_ino);
8972                     tswap64s((uint64_t *)&de->d_off);
8973                     de = (struct linux_dirent64 *)((char *)de + reclen);
8974                     len -= reclen;
8975                 }
8976             }
8977             unlock_user(dirp, arg2, ret);
8978         }
8979         return ret;
8980 #endif /* TARGET_NR_getdents64 */
8981 #if defined(TARGET_NR__newselect)
8982     case TARGET_NR__newselect:
8983         return do_select(arg1, arg2, arg3, arg4, arg5);
8984 #endif
8985 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8986 # ifdef TARGET_NR_poll
8987     case TARGET_NR_poll:
8988 # endif
8989 # ifdef TARGET_NR_ppoll
8990     case TARGET_NR_ppoll:
8991 # endif
8992         {
8993             struct target_pollfd *target_pfd;
8994             unsigned int nfds = arg2;
8995             struct pollfd *pfd;
8996             unsigned int i;
8997 
8998             pfd = NULL;
8999             target_pfd = NULL;
9000             if (nfds) {
9001                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9002                     return -TARGET_EINVAL;
9003                 }
9004 
9005                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9006                                        sizeof(struct target_pollfd) * nfds, 1);
9007                 if (!target_pfd) {
9008                     return -TARGET_EFAULT;
9009                 }
9010 
9011                 pfd = alloca(sizeof(struct pollfd) * nfds);
9012                 for (i = 0; i < nfds; i++) {
9013                     pfd[i].fd = tswap32(target_pfd[i].fd);
9014                     pfd[i].events = tswap16(target_pfd[i].events);
9015                 }
9016             }
9017 
9018             switch (num) {
9019 # ifdef TARGET_NR_ppoll
9020             case TARGET_NR_ppoll:
9021             {
9022                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9023                 target_sigset_t *target_set;
9024                 sigset_t _set, *set = &_set;
9025 
9026                 if (arg3) {
9027                     if (target_to_host_timespec(timeout_ts, arg3)) {
9028                         unlock_user(target_pfd, arg1, 0);
9029                         return -TARGET_EFAULT;
9030                     }
9031                 } else {
9032                     timeout_ts = NULL;
9033                 }
9034 
9035                 if (arg4) {
9036                     if (arg5 != sizeof(target_sigset_t)) {
9037                         unlock_user(target_pfd, arg1, 0);
9038                         return -TARGET_EINVAL;
9039                     }
9040 
9041                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9042                     if (!target_set) {
9043                         unlock_user(target_pfd, arg1, 0);
9044                         return -TARGET_EFAULT;
9045                     }
9046                     target_to_host_sigset(set, target_set);
9047                 } else {
9048                     set = NULL;
9049                 }
9050 
9051                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9052                                            set, SIGSET_T_SIZE));
9053 
9054                 if (!is_error(ret) && arg3) {
9055                     host_to_target_timespec(arg3, timeout_ts);
9056                 }
9057                 if (arg4) {
9058                     unlock_user(target_set, arg4, 0);
9059                 }
9060                 break;
9061             }
9062 # endif
9063 # ifdef TARGET_NR_poll
9064             case TARGET_NR_poll:
9065             {
9066                 struct timespec ts, *pts;
9067 
9068                 if (arg3 >= 0) {
9069                     /* Convert ms to secs, ns */
9070                     ts.tv_sec = arg3 / 1000;
9071                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9072                     pts = &ts;
9073                 } else {
9074                     /* -ve poll() timeout means "infinite" */
9075                     pts = NULL;
9076                 }
9077                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9078                 break;
9079             }
9080 # endif
9081             default:
9082                 g_assert_not_reached();
9083             }
9084 
9085             if (!is_error(ret)) {
9086                 for(i = 0; i < nfds; i++) {
9087                     target_pfd[i].revents = tswap16(pfd[i].revents);
9088                 }
9089             }
9090             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9091         }
9092         return ret;
9093 #endif
9094     case TARGET_NR_flock:
9095         /* NOTE: the flock constant seems to be the same for every
9096            Linux platform */
9097         return get_errno(safe_flock(arg1, arg2));
9098     case TARGET_NR_readv:
9099         {
9100             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9101             if (vec != NULL) {
9102                 ret = get_errno(safe_readv(arg1, vec, arg3));
9103                 unlock_iovec(vec, arg2, arg3, 1);
9104             } else {
9105                 ret = -host_to_target_errno(errno);
9106             }
9107         }
9108         return ret;
9109     case TARGET_NR_writev:
9110         {
9111             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9112             if (vec != NULL) {
9113                 ret = get_errno(safe_writev(arg1, vec, arg3));
9114                 unlock_iovec(vec, arg2, arg3, 0);
9115             } else {
9116                 ret = -host_to_target_errno(errno);
9117             }
9118         }
9119         return ret;
9120 #if defined(TARGET_NR_preadv)
9121     case TARGET_NR_preadv:
9122         {
9123             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9124             if (vec != NULL) {
9125                 unsigned long low, high;
9126 
9127                 target_to_host_low_high(arg4, arg5, &low, &high);
9128                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9129                 unlock_iovec(vec, arg2, arg3, 1);
9130             } else {
9131                 ret = -host_to_target_errno(errno);
9132            }
9133         }
9134         return ret;
9135 #endif
9136 #if defined(TARGET_NR_pwritev)
9137     case TARGET_NR_pwritev:
9138         {
9139             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9140             if (vec != NULL) {
9141                 unsigned long low, high;
9142 
9143                 target_to_host_low_high(arg4, arg5, &low, &high);
9144                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9145                 unlock_iovec(vec, arg2, arg3, 0);
9146             } else {
9147                 ret = -host_to_target_errno(errno);
9148            }
9149         }
9150         return ret;
9151 #endif
9152     case TARGET_NR_getsid:
9153         return get_errno(getsid(arg1));
9154 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9155     case TARGET_NR_fdatasync:
9156         return get_errno(fdatasync(arg1));
9157 #endif
9158 #ifdef TARGET_NR__sysctl
9159     case TARGET_NR__sysctl:
9160         /* We don't implement this, but ENOTDIR is always a safe
9161            return value. */
9162         return -TARGET_ENOTDIR;
9163 #endif
9164     case TARGET_NR_sched_getaffinity:
9165         {
9166             unsigned int mask_size;
9167             unsigned long *mask;
9168 
9169             /*
9170              * sched_getaffinity needs multiples of ulong, so need to take
9171              * care of mismatches between target ulong and host ulong sizes.
9172              */
9173             if (arg2 & (sizeof(abi_ulong) - 1)) {
9174                 return -TARGET_EINVAL;
9175             }
9176             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9177 
9178             mask = alloca(mask_size);
9179             memset(mask, 0, mask_size);
9180             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9181 
9182             if (!is_error(ret)) {
9183                 if (ret > arg2) {
9184                     /* More data returned than the caller's buffer will fit.
9185                      * This only happens if sizeof(abi_long) < sizeof(long)
9186                      * and the caller passed us a buffer holding an odd number
9187                      * of abi_longs. If the host kernel is actually using the
9188                      * extra 4 bytes then fail EINVAL; otherwise we can just
9189                      * ignore them and only copy the interesting part.
9190                      */
9191                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9192                     if (numcpus > arg2 * 8) {
9193                         return -TARGET_EINVAL;
9194                     }
9195                     ret = arg2;
9196                 }
9197 
9198                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9199                     return -TARGET_EFAULT;
9200                 }
9201             }
9202         }
9203         return ret;
9204     case TARGET_NR_sched_setaffinity:
9205         {
9206             unsigned int mask_size;
9207             unsigned long *mask;
9208 
9209             /*
9210              * sched_setaffinity needs multiples of ulong, so need to take
9211              * care of mismatches between target ulong and host ulong sizes.
9212              */
9213             if (arg2 & (sizeof(abi_ulong) - 1)) {
9214                 return -TARGET_EINVAL;
9215             }
9216             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9217             mask = alloca(mask_size);
9218 
9219             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9220             if (ret) {
9221                 return ret;
9222             }
9223 
9224             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9225         }
9226     case TARGET_NR_getcpu:
9227         {
9228             unsigned cpu, node;
9229             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9230                                        arg2 ? &node : NULL,
9231                                        NULL));
9232             if (is_error(ret)) {
9233                 return ret;
9234             }
9235             if (arg1 && put_user_u32(cpu, arg1)) {
9236                 return -TARGET_EFAULT;
9237             }
9238             if (arg2 && put_user_u32(node, arg2)) {
9239                 return -TARGET_EFAULT;
9240             }
9241         }
9242         return ret;
9243     case TARGET_NR_sched_setparam:
9244         {
9245             struct sched_param *target_schp;
9246             struct sched_param schp;
9247 
9248             if (arg2 == 0) {
9249                 return -TARGET_EINVAL;
9250             }
9251             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9252                 return -TARGET_EFAULT;
9253             schp.sched_priority = tswap32(target_schp->sched_priority);
9254             unlock_user_struct(target_schp, arg2, 0);
9255             return get_errno(sched_setparam(arg1, &schp));
9256         }
9257     case TARGET_NR_sched_getparam:
9258         {
9259             struct sched_param *target_schp;
9260             struct sched_param schp;
9261 
9262             if (arg2 == 0) {
9263                 return -TARGET_EINVAL;
9264             }
9265             ret = get_errno(sched_getparam(arg1, &schp));
9266             if (!is_error(ret)) {
9267                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9268                     return -TARGET_EFAULT;
9269                 target_schp->sched_priority = tswap32(schp.sched_priority);
9270                 unlock_user_struct(target_schp, arg2, 1);
9271             }
9272         }
9273         return ret;
9274     case TARGET_NR_sched_setscheduler:
9275         {
9276             struct sched_param *target_schp;
9277             struct sched_param schp;
9278             if (arg3 == 0) {
9279                 return -TARGET_EINVAL;
9280             }
9281             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9282                 return -TARGET_EFAULT;
9283             schp.sched_priority = tswap32(target_schp->sched_priority);
9284             unlock_user_struct(target_schp, arg3, 0);
9285             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9286         }
9287     case TARGET_NR_sched_getscheduler:
9288         return get_errno(sched_getscheduler(arg1));
9289     case TARGET_NR_sched_yield:
9290         return get_errno(sched_yield());
9291     case TARGET_NR_sched_get_priority_max:
9292         return get_errno(sched_get_priority_max(arg1));
9293     case TARGET_NR_sched_get_priority_min:
9294         return get_errno(sched_get_priority_min(arg1));
9295     case TARGET_NR_sched_rr_get_interval:
9296         {
9297             struct timespec ts;
9298             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9299             if (!is_error(ret)) {
9300                 ret = host_to_target_timespec(arg2, &ts);
9301             }
9302         }
9303         return ret;
9304     case TARGET_NR_nanosleep:
9305         {
9306             struct timespec req, rem;
9307             target_to_host_timespec(&req, arg1);
9308             ret = get_errno(safe_nanosleep(&req, &rem));
9309             if (is_error(ret) && arg2) {
9310                 host_to_target_timespec(arg2, &rem);
9311             }
9312         }
9313         return ret;
9314     case TARGET_NR_prctl:
9315         switch (arg1) {
9316         case PR_GET_PDEATHSIG:
9317         {
9318             int deathsig;
9319             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9320             if (!is_error(ret) && arg2
9321                 && put_user_ual(deathsig, arg2)) {
9322                 return -TARGET_EFAULT;
9323             }
9324             return ret;
9325         }
9326 #ifdef PR_GET_NAME
9327         case PR_GET_NAME:
9328         {
9329             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9330             if (!name) {
9331                 return -TARGET_EFAULT;
9332             }
9333             ret = get_errno(prctl(arg1, (unsigned long)name,
9334                                   arg3, arg4, arg5));
9335             unlock_user(name, arg2, 16);
9336             return ret;
9337         }
9338         case PR_SET_NAME:
9339         {
9340             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9341             if (!name) {
9342                 return -TARGET_EFAULT;
9343             }
9344             ret = get_errno(prctl(arg1, (unsigned long)name,
9345                                   arg3, arg4, arg5));
9346             unlock_user(name, arg2, 0);
9347             return ret;
9348         }
9349 #endif
9350 #ifdef TARGET_AARCH64
9351         case TARGET_PR_SVE_SET_VL:
9352             /*
9353              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9354              * PR_SVE_VL_INHERIT.  Note the kernel definition
9355              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9356              * even though the current architectural maximum is VQ=16.
9357              */
9358             ret = -TARGET_EINVAL;
9359             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
9360                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9361                 CPUARMState *env = cpu_env;
9362                 ARMCPU *cpu = arm_env_get_cpu(env);
9363                 uint32_t vq, old_vq;
9364 
9365                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9366                 vq = MAX(arg2 / 16, 1);
9367                 vq = MIN(vq, cpu->sve_max_vq);
9368 
9369                 if (vq < old_vq) {
9370                     aarch64_sve_narrow_vq(env, vq);
9371                 }
9372                 env->vfp.zcr_el[1] = vq - 1;
9373                 ret = vq * 16;
9374             }
9375             return ret;
9376         case TARGET_PR_SVE_GET_VL:
9377             ret = -TARGET_EINVAL;
9378             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
9379                 CPUARMState *env = cpu_env;
9380                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
9381             }
9382             return ret;
9383 #endif /* AARCH64 */
9384         case PR_GET_SECCOMP:
9385         case PR_SET_SECCOMP:
9386             /* Disable seccomp to prevent the target disabling syscalls we
9387              * need. */
9388             return -TARGET_EINVAL;
9389         default:
9390             /* Most prctl options have no pointer arguments */
9391             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9392         }
9393         break;
9394 #ifdef TARGET_NR_arch_prctl
9395     case TARGET_NR_arch_prctl:
9396 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9397         return do_arch_prctl(cpu_env, arg1, arg2);
9398 #else
9399 #error unreachable
9400 #endif
9401 #endif
9402 #ifdef TARGET_NR_pread64
9403     case TARGET_NR_pread64:
9404         if (regpairs_aligned(cpu_env, num)) {
9405             arg4 = arg5;
9406             arg5 = arg6;
9407         }
9408         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9409             return -TARGET_EFAULT;
9410         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9411         unlock_user(p, arg2, ret);
9412         return ret;
9413     case TARGET_NR_pwrite64:
9414         if (regpairs_aligned(cpu_env, num)) {
9415             arg4 = arg5;
9416             arg5 = arg6;
9417         }
9418         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9419             return -TARGET_EFAULT;
9420         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9421         unlock_user(p, arg2, 0);
9422         return ret;
9423 #endif
9424     case TARGET_NR_getcwd:
9425         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9426             return -TARGET_EFAULT;
9427         ret = get_errno(sys_getcwd1(p, arg2));
9428         unlock_user(p, arg1, ret);
9429         return ret;
9430     case TARGET_NR_capget:
9431     case TARGET_NR_capset:
9432     {
9433         struct target_user_cap_header *target_header;
9434         struct target_user_cap_data *target_data = NULL;
9435         struct __user_cap_header_struct header;
9436         struct __user_cap_data_struct data[2];
9437         struct __user_cap_data_struct *dataptr = NULL;
9438         int i, target_datalen;
9439         int data_items = 1;
9440 
9441         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9442             return -TARGET_EFAULT;
9443         }
9444         header.version = tswap32(target_header->version);
9445         header.pid = tswap32(target_header->pid);
9446 
9447         if (header.version != _LINUX_CAPABILITY_VERSION) {
9448             /* Version 2 and up takes pointer to two user_data structs */
9449             data_items = 2;
9450         }
9451 
9452         target_datalen = sizeof(*target_data) * data_items;
9453 
9454         if (arg2) {
9455             if (num == TARGET_NR_capget) {
9456                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9457             } else {
9458                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9459             }
9460             if (!target_data) {
9461                 unlock_user_struct(target_header, arg1, 0);
9462                 return -TARGET_EFAULT;
9463             }
9464 
9465             if (num == TARGET_NR_capset) {
9466                 for (i = 0; i < data_items; i++) {
9467                     data[i].effective = tswap32(target_data[i].effective);
9468                     data[i].permitted = tswap32(target_data[i].permitted);
9469                     data[i].inheritable = tswap32(target_data[i].inheritable);
9470                 }
9471             }
9472 
9473             dataptr = data;
9474         }
9475 
9476         if (num == TARGET_NR_capget) {
9477             ret = get_errno(capget(&header, dataptr));
9478         } else {
9479             ret = get_errno(capset(&header, dataptr));
9480         }
9481 
9482         /* The kernel always updates version for both capget and capset */
9483         target_header->version = tswap32(header.version);
9484         unlock_user_struct(target_header, arg1, 1);
9485 
9486         if (arg2) {
9487             if (num == TARGET_NR_capget) {
9488                 for (i = 0; i < data_items; i++) {
9489                     target_data[i].effective = tswap32(data[i].effective);
9490                     target_data[i].permitted = tswap32(data[i].permitted);
9491                     target_data[i].inheritable = tswap32(data[i].inheritable);
9492                 }
9493                 unlock_user(target_data, arg2, target_datalen);
9494             } else {
9495                 unlock_user(target_data, arg2, 0);
9496             }
9497         }
9498         return ret;
9499     }
9500     case TARGET_NR_sigaltstack:
9501         return do_sigaltstack(arg1, arg2,
9502                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9503 
9504 #ifdef CONFIG_SENDFILE
9505 #ifdef TARGET_NR_sendfile
9506     case TARGET_NR_sendfile:
9507     {
9508         off_t *offp = NULL;
9509         off_t off;
9510         if (arg3) {
9511             ret = get_user_sal(off, arg3);
9512             if (is_error(ret)) {
9513                 return ret;
9514             }
9515             offp = &off;
9516         }
9517         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9518         if (!is_error(ret) && arg3) {
9519             abi_long ret2 = put_user_sal(off, arg3);
9520             if (is_error(ret2)) {
9521                 ret = ret2;
9522             }
9523         }
9524         return ret;
9525     }
9526 #endif
9527 #ifdef TARGET_NR_sendfile64
9528     case TARGET_NR_sendfile64:
9529     {
9530         off_t *offp = NULL;
9531         off_t off;
9532         if (arg3) {
9533             ret = get_user_s64(off, arg3);
9534             if (is_error(ret)) {
9535                 return ret;
9536             }
9537             offp = &off;
9538         }
9539         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9540         if (!is_error(ret) && arg3) {
9541             abi_long ret2 = put_user_s64(off, arg3);
9542             if (is_error(ret2)) {
9543                 ret = ret2;
9544             }
9545         }
9546         return ret;
9547     }
9548 #endif
9549 #endif
9550 #ifdef TARGET_NR_vfork
9551     case TARGET_NR_vfork:
9552         return get_errno(do_fork(cpu_env,
9553                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9554                          0, 0, 0, 0));
9555 #endif
9556 #ifdef TARGET_NR_ugetrlimit
9557     case TARGET_NR_ugetrlimit:
9558     {
9559 	struct rlimit rlim;
9560 	int resource = target_to_host_resource(arg1);
9561 	ret = get_errno(getrlimit(resource, &rlim));
9562 	if (!is_error(ret)) {
9563 	    struct target_rlimit *target_rlim;
9564             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9565                 return -TARGET_EFAULT;
9566 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9567 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9568             unlock_user_struct(target_rlim, arg2, 1);
9569 	}
9570         return ret;
9571     }
9572 #endif
9573 #ifdef TARGET_NR_truncate64
9574     case TARGET_NR_truncate64:
9575         if (!(p = lock_user_string(arg1)))
9576             return -TARGET_EFAULT;
9577 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9578         unlock_user(p, arg1, 0);
9579         return ret;
9580 #endif
9581 #ifdef TARGET_NR_ftruncate64
9582     case TARGET_NR_ftruncate64:
9583         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9584 #endif
9585 #ifdef TARGET_NR_stat64
9586     case TARGET_NR_stat64:
9587         if (!(p = lock_user_string(arg1))) {
9588             return -TARGET_EFAULT;
9589         }
9590         ret = get_errno(stat(path(p), &st));
9591         unlock_user(p, arg1, 0);
9592         if (!is_error(ret))
9593             ret = host_to_target_stat64(cpu_env, arg2, &st);
9594         return ret;
9595 #endif
9596 #ifdef TARGET_NR_lstat64
9597     case TARGET_NR_lstat64:
9598         if (!(p = lock_user_string(arg1))) {
9599             return -TARGET_EFAULT;
9600         }
9601         ret = get_errno(lstat(path(p), &st));
9602         unlock_user(p, arg1, 0);
9603         if (!is_error(ret))
9604             ret = host_to_target_stat64(cpu_env, arg2, &st);
9605         return ret;
9606 #endif
9607 #ifdef TARGET_NR_fstat64
9608     case TARGET_NR_fstat64:
9609         ret = get_errno(fstat(arg1, &st));
9610         if (!is_error(ret))
9611             ret = host_to_target_stat64(cpu_env, arg2, &st);
9612         return ret;
9613 #endif
9614 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9615 #ifdef TARGET_NR_fstatat64
9616     case TARGET_NR_fstatat64:
9617 #endif
9618 #ifdef TARGET_NR_newfstatat
9619     case TARGET_NR_newfstatat:
9620 #endif
9621         if (!(p = lock_user_string(arg2))) {
9622             return -TARGET_EFAULT;
9623         }
9624         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9625         unlock_user(p, arg2, 0);
9626         if (!is_error(ret))
9627             ret = host_to_target_stat64(cpu_env, arg3, &st);
9628         return ret;
9629 #endif
9630 #ifdef TARGET_NR_lchown
9631     case TARGET_NR_lchown:
9632         if (!(p = lock_user_string(arg1)))
9633             return -TARGET_EFAULT;
9634         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9635         unlock_user(p, arg1, 0);
9636         return ret;
9637 #endif
9638 #ifdef TARGET_NR_getuid
9639     case TARGET_NR_getuid:
9640         return get_errno(high2lowuid(getuid()));
9641 #endif
9642 #ifdef TARGET_NR_getgid
9643     case TARGET_NR_getgid:
9644         return get_errno(high2lowgid(getgid()));
9645 #endif
9646 #ifdef TARGET_NR_geteuid
9647     case TARGET_NR_geteuid:
9648         return get_errno(high2lowuid(geteuid()));
9649 #endif
9650 #ifdef TARGET_NR_getegid
9651     case TARGET_NR_getegid:
9652         return get_errno(high2lowgid(getegid()));
9653 #endif
9654     case TARGET_NR_setreuid:
9655         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9656     case TARGET_NR_setregid:
9657         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9658     case TARGET_NR_getgroups:
9659         {
9660             int gidsetsize = arg1;
9661             target_id *target_grouplist;
9662             gid_t *grouplist;
9663             int i;
9664 
9665             grouplist = alloca(gidsetsize * sizeof(gid_t));
9666             ret = get_errno(getgroups(gidsetsize, grouplist));
9667             if (gidsetsize == 0)
9668                 return ret;
9669             if (!is_error(ret)) {
9670                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9671                 if (!target_grouplist)
9672                     return -TARGET_EFAULT;
9673                 for(i = 0;i < ret; i++)
9674                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9675                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9676             }
9677         }
9678         return ret;
9679     case TARGET_NR_setgroups:
9680         {
9681             int gidsetsize = arg1;
9682             target_id *target_grouplist;
9683             gid_t *grouplist = NULL;
9684             int i;
9685             if (gidsetsize) {
9686                 grouplist = alloca(gidsetsize * sizeof(gid_t));
9687                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9688                 if (!target_grouplist) {
9689                     return -TARGET_EFAULT;
9690                 }
9691                 for (i = 0; i < gidsetsize; i++) {
9692                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9693                 }
9694                 unlock_user(target_grouplist, arg2, 0);
9695             }
9696             return get_errno(setgroups(gidsetsize, grouplist));
9697         }
9698     case TARGET_NR_fchown:
9699         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9700 #if defined(TARGET_NR_fchownat)
9701     case TARGET_NR_fchownat:
9702         if (!(p = lock_user_string(arg2)))
9703             return -TARGET_EFAULT;
9704         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9705                                  low2highgid(arg4), arg5));
9706         unlock_user(p, arg2, 0);
9707         return ret;
9708 #endif
9709 #ifdef TARGET_NR_setresuid
9710     case TARGET_NR_setresuid:
9711         return get_errno(sys_setresuid(low2highuid(arg1),
9712                                        low2highuid(arg2),
9713                                        low2highuid(arg3)));
9714 #endif
9715 #ifdef TARGET_NR_getresuid
9716     case TARGET_NR_getresuid:
9717         {
9718             uid_t ruid, euid, suid;
9719             ret = get_errno(getresuid(&ruid, &euid, &suid));
9720             if (!is_error(ret)) {
9721                 if (put_user_id(high2lowuid(ruid), arg1)
9722                     || put_user_id(high2lowuid(euid), arg2)
9723                     || put_user_id(high2lowuid(suid), arg3))
9724                     return -TARGET_EFAULT;
9725             }
9726         }
9727         return ret;
9728 #endif
9729 #ifdef TARGET_NR_getresgid
9730     case TARGET_NR_setresgid:
9731         return get_errno(sys_setresgid(low2highgid(arg1),
9732                                        low2highgid(arg2),
9733                                        low2highgid(arg3)));
9734 #endif
9735 #ifdef TARGET_NR_getresgid
9736     case TARGET_NR_getresgid:
9737         {
9738             gid_t rgid, egid, sgid;
9739             ret = get_errno(getresgid(&rgid, &egid, &sgid));
9740             if (!is_error(ret)) {
9741                 if (put_user_id(high2lowgid(rgid), arg1)
9742                     || put_user_id(high2lowgid(egid), arg2)
9743                     || put_user_id(high2lowgid(sgid), arg3))
9744                     return -TARGET_EFAULT;
9745             }
9746         }
9747         return ret;
9748 #endif
9749 #ifdef TARGET_NR_chown
9750     case TARGET_NR_chown:
9751         if (!(p = lock_user_string(arg1)))
9752             return -TARGET_EFAULT;
9753         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9754         unlock_user(p, arg1, 0);
9755         return ret;
9756 #endif
9757     case TARGET_NR_setuid:
9758         return get_errno(sys_setuid(low2highuid(arg1)));
9759     case TARGET_NR_setgid:
9760         return get_errno(sys_setgid(low2highgid(arg1)));
9761     case TARGET_NR_setfsuid:
9762         return get_errno(setfsuid(arg1));
9763     case TARGET_NR_setfsgid:
9764         return get_errno(setfsgid(arg1));
9765 
9766 #ifdef TARGET_NR_lchown32
9767     case TARGET_NR_lchown32:
9768         if (!(p = lock_user_string(arg1)))
9769             return -TARGET_EFAULT;
9770         ret = get_errno(lchown(p, arg2, arg3));
9771         unlock_user(p, arg1, 0);
9772         return ret;
9773 #endif
9774 #ifdef TARGET_NR_getuid32
9775     case TARGET_NR_getuid32:
9776         return get_errno(getuid());
9777 #endif
9778 
9779 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9780    /* Alpha specific */
9781     case TARGET_NR_getxuid:
9782          {
9783             uid_t euid;
9784             euid=geteuid();
9785             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9786          }
9787         return get_errno(getuid());
9788 #endif
9789 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9790    /* Alpha specific */
9791     case TARGET_NR_getxgid:
9792          {
9793             uid_t egid;
9794             egid=getegid();
9795             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9796          }
9797         return get_errno(getgid());
9798 #endif
9799 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9800     /* Alpha specific */
9801     case TARGET_NR_osf_getsysinfo:
9802         ret = -TARGET_EOPNOTSUPP;
9803         switch (arg1) {
9804           case TARGET_GSI_IEEE_FP_CONTROL:
9805             {
9806                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9807 
9808                 /* Copied from linux ieee_fpcr_to_swcr.  */
9809                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9810                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9811                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9812                                         | SWCR_TRAP_ENABLE_DZE
9813                                         | SWCR_TRAP_ENABLE_OVF);
9814                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9815                                         | SWCR_TRAP_ENABLE_INE);
9816                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9817                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9818 
9819                 if (put_user_u64 (swcr, arg2))
9820                         return -TARGET_EFAULT;
9821                 ret = 0;
9822             }
9823             break;
9824 
9825           /* case GSI_IEEE_STATE_AT_SIGNAL:
9826              -- Not implemented in linux kernel.
9827              case GSI_UACPROC:
9828              -- Retrieves current unaligned access state; not much used.
9829              case GSI_PROC_TYPE:
9830              -- Retrieves implver information; surely not used.
9831              case GSI_GET_HWRPB:
9832              -- Grabs a copy of the HWRPB; surely not used.
9833           */
9834         }
9835         return ret;
9836 #endif
9837 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9838     /* Alpha specific */
9839     case TARGET_NR_osf_setsysinfo:
9840         ret = -TARGET_EOPNOTSUPP;
9841         switch (arg1) {
9842           case TARGET_SSI_IEEE_FP_CONTROL:
9843             {
9844                 uint64_t swcr, fpcr, orig_fpcr;
9845 
9846                 if (get_user_u64 (swcr, arg2)) {
9847                     return -TARGET_EFAULT;
9848                 }
9849                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9850                 fpcr = orig_fpcr & FPCR_DYN_MASK;
9851 
9852                 /* Copied from linux ieee_swcr_to_fpcr.  */
9853                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9854                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9855                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9856                                   | SWCR_TRAP_ENABLE_DZE
9857                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
9858                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9859                                   | SWCR_TRAP_ENABLE_INE)) << 57;
9860                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9861                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9862 
9863                 cpu_alpha_store_fpcr(cpu_env, fpcr);
9864                 ret = 0;
9865             }
9866             break;
9867 
9868           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9869             {
9870                 uint64_t exc, fpcr, orig_fpcr;
9871                 int si_code;
9872 
9873                 if (get_user_u64(exc, arg2)) {
9874                     return -TARGET_EFAULT;
9875                 }
9876 
9877                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9878 
9879                 /* We only add to the exception status here.  */
9880                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9881 
9882                 cpu_alpha_store_fpcr(cpu_env, fpcr);
9883                 ret = 0;
9884 
9885                 /* Old exceptions are not signaled.  */
9886                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9887 
9888                 /* If any exceptions set by this call,
9889                    and are unmasked, send a signal.  */
9890                 si_code = 0;
9891                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9892                     si_code = TARGET_FPE_FLTRES;
9893                 }
9894                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9895                     si_code = TARGET_FPE_FLTUND;
9896                 }
9897                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9898                     si_code = TARGET_FPE_FLTOVF;
9899                 }
9900                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9901                     si_code = TARGET_FPE_FLTDIV;
9902                 }
9903                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9904                     si_code = TARGET_FPE_FLTINV;
9905                 }
9906                 if (si_code != 0) {
9907                     target_siginfo_t info;
9908                     info.si_signo = SIGFPE;
9909                     info.si_errno = 0;
9910                     info.si_code = si_code;
9911                     info._sifields._sigfault._addr
9912                         = ((CPUArchState *)cpu_env)->pc;
9913                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
9914                                  QEMU_SI_FAULT, &info);
9915                 }
9916             }
9917             break;
9918 
9919           /* case SSI_NVPAIRS:
9920              -- Used with SSIN_UACPROC to enable unaligned accesses.
9921              case SSI_IEEE_STATE_AT_SIGNAL:
9922              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9923              -- Not implemented in linux kernel
9924           */
9925         }
9926         return ret;
9927 #endif
9928 #ifdef TARGET_NR_osf_sigprocmask
9929     /* Alpha specific.  */
9930     case TARGET_NR_osf_sigprocmask:
9931         {
9932             abi_ulong mask;
9933             int how;
9934             sigset_t set, oldset;
9935 
9936             switch(arg1) {
9937             case TARGET_SIG_BLOCK:
9938                 how = SIG_BLOCK;
9939                 break;
9940             case TARGET_SIG_UNBLOCK:
9941                 how = SIG_UNBLOCK;
9942                 break;
9943             case TARGET_SIG_SETMASK:
9944                 how = SIG_SETMASK;
9945                 break;
9946             default:
9947                 return -TARGET_EINVAL;
9948             }
9949             mask = arg2;
9950             target_to_host_old_sigset(&set, &mask);
9951             ret = do_sigprocmask(how, &set, &oldset);
9952             if (!ret) {
9953                 host_to_target_old_sigset(&mask, &oldset);
9954                 ret = mask;
9955             }
9956         }
9957         return ret;
9958 #endif
9959 
9960 #ifdef TARGET_NR_getgid32
9961     case TARGET_NR_getgid32:
9962         return get_errno(getgid());
9963 #endif
9964 #ifdef TARGET_NR_geteuid32
9965     case TARGET_NR_geteuid32:
9966         return get_errno(geteuid());
9967 #endif
9968 #ifdef TARGET_NR_getegid32
9969     case TARGET_NR_getegid32:
9970         return get_errno(getegid());
9971 #endif
9972 #ifdef TARGET_NR_setreuid32
9973     case TARGET_NR_setreuid32:
9974         return get_errno(setreuid(arg1, arg2));
9975 #endif
9976 #ifdef TARGET_NR_setregid32
9977     case TARGET_NR_setregid32:
9978         return get_errno(setregid(arg1, arg2));
9979 #endif
9980 #ifdef TARGET_NR_getgroups32
9981     case TARGET_NR_getgroups32:
9982         {
9983             int gidsetsize = arg1;
9984             uint32_t *target_grouplist;
9985             gid_t *grouplist;
9986             int i;
9987 
9988             grouplist = alloca(gidsetsize * sizeof(gid_t));
9989             ret = get_errno(getgroups(gidsetsize, grouplist));
9990             if (gidsetsize == 0)
9991                 return ret;
9992             if (!is_error(ret)) {
9993                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9994                 if (!target_grouplist) {
9995                     return -TARGET_EFAULT;
9996                 }
9997                 for(i = 0;i < ret; i++)
9998                     target_grouplist[i] = tswap32(grouplist[i]);
9999                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10000             }
10001         }
10002         return ret;
10003 #endif
10004 #ifdef TARGET_NR_setgroups32
10005     case TARGET_NR_setgroups32:
10006         {
10007             int gidsetsize = arg1;
10008             uint32_t *target_grouplist;
10009             gid_t *grouplist;
10010             int i;
10011 
10012             grouplist = alloca(gidsetsize * sizeof(gid_t));
10013             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10014             if (!target_grouplist) {
10015                 return -TARGET_EFAULT;
10016             }
10017             for(i = 0;i < gidsetsize; i++)
10018                 grouplist[i] = tswap32(target_grouplist[i]);
10019             unlock_user(target_grouplist, arg2, 0);
10020             return get_errno(setgroups(gidsetsize, grouplist));
10021         }
10022 #endif
10023 #ifdef TARGET_NR_fchown32
10024     case TARGET_NR_fchown32:
10025         return get_errno(fchown(arg1, arg2, arg3));
10026 #endif
10027 #ifdef TARGET_NR_setresuid32
10028     case TARGET_NR_setresuid32:
10029         return get_errno(sys_setresuid(arg1, arg2, arg3));
10030 #endif
10031 #ifdef TARGET_NR_getresuid32
10032     case TARGET_NR_getresuid32:
10033         {
10034             uid_t ruid, euid, suid;
10035             ret = get_errno(getresuid(&ruid, &euid, &suid));
10036             if (!is_error(ret)) {
10037                 if (put_user_u32(ruid, arg1)
10038                     || put_user_u32(euid, arg2)
10039                     || put_user_u32(suid, arg3))
10040                     return -TARGET_EFAULT;
10041             }
10042         }
10043         return ret;
10044 #endif
10045 #ifdef TARGET_NR_setresgid32
10046     case TARGET_NR_setresgid32:
10047         return get_errno(sys_setresgid(arg1, arg2, arg3));
10048 #endif
10049 #ifdef TARGET_NR_getresgid32
10050     case TARGET_NR_getresgid32:
10051         {
10052             gid_t rgid, egid, sgid;
10053             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10054             if (!is_error(ret)) {
10055                 if (put_user_u32(rgid, arg1)
10056                     || put_user_u32(egid, arg2)
10057                     || put_user_u32(sgid, arg3))
10058                     return -TARGET_EFAULT;
10059             }
10060         }
10061         return ret;
10062 #endif
10063 #ifdef TARGET_NR_chown32
10064     case TARGET_NR_chown32:
10065         if (!(p = lock_user_string(arg1)))
10066             return -TARGET_EFAULT;
10067         ret = get_errno(chown(p, arg2, arg3));
10068         unlock_user(p, arg1, 0);
10069         return ret;
10070 #endif
10071 #ifdef TARGET_NR_setuid32
10072     case TARGET_NR_setuid32:
10073         return get_errno(sys_setuid(arg1));
10074 #endif
10075 #ifdef TARGET_NR_setgid32
10076     case TARGET_NR_setgid32:
10077         return get_errno(sys_setgid(arg1));
10078 #endif
10079 #ifdef TARGET_NR_setfsuid32
10080     case TARGET_NR_setfsuid32:
10081         return get_errno(setfsuid(arg1));
10082 #endif
10083 #ifdef TARGET_NR_setfsgid32
10084     case TARGET_NR_setfsgid32:
10085         return get_errno(setfsgid(arg1));
10086 #endif
10087 #ifdef TARGET_NR_mincore
10088     case TARGET_NR_mincore:
10089         {
10090             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10091             if (!a) {
10092                 return -TARGET_ENOMEM;
10093             }
10094             p = lock_user_string(arg3);
10095             if (!p) {
10096                 ret = -TARGET_EFAULT;
10097             } else {
10098                 ret = get_errno(mincore(a, arg2, p));
10099                 unlock_user(p, arg3, ret);
10100             }
10101             unlock_user(a, arg1, 0);
10102         }
10103         return ret;
10104 #endif
10105 #ifdef TARGET_NR_arm_fadvise64_64
10106     case TARGET_NR_arm_fadvise64_64:
10107         /* arm_fadvise64_64 looks like fadvise64_64 but
10108          * with different argument order: fd, advice, offset, len
10109          * rather than the usual fd, offset, len, advice.
10110          * Note that offset and len are both 64-bit so appear as
10111          * pairs of 32-bit registers.
10112          */
10113         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10114                             target_offset64(arg5, arg6), arg2);
10115         return -host_to_target_errno(ret);
10116 #endif
10117 
10118 #if TARGET_ABI_BITS == 32
10119 
10120 #ifdef TARGET_NR_fadvise64_64
10121     case TARGET_NR_fadvise64_64:
10122 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10123         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10124         ret = arg2;
10125         arg2 = arg3;
10126         arg3 = arg4;
10127         arg4 = arg5;
10128         arg5 = arg6;
10129         arg6 = ret;
10130 #else
10131         /* 6 args: fd, offset (high, low), len (high, low), advice */
10132         if (regpairs_aligned(cpu_env, num)) {
10133             /* offset is in (3,4), len in (5,6) and advice in 7 */
10134             arg2 = arg3;
10135             arg3 = arg4;
10136             arg4 = arg5;
10137             arg5 = arg6;
10138             arg6 = arg7;
10139         }
10140 #endif
10141         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10142                             target_offset64(arg4, arg5), arg6);
10143         return -host_to_target_errno(ret);
10144 #endif
10145 
10146 #ifdef TARGET_NR_fadvise64
10147     case TARGET_NR_fadvise64:
10148         /* 5 args: fd, offset (high, low), len, advice */
10149         if (regpairs_aligned(cpu_env, num)) {
10150             /* offset is in (3,4), len in 5 and advice in 6 */
10151             arg2 = arg3;
10152             arg3 = arg4;
10153             arg4 = arg5;
10154             arg5 = arg6;
10155         }
10156         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10157         return -host_to_target_errno(ret);
10158 #endif
10159 
10160 #else /* not a 32-bit ABI */
10161 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10162 #ifdef TARGET_NR_fadvise64_64
10163     case TARGET_NR_fadvise64_64:
10164 #endif
10165 #ifdef TARGET_NR_fadvise64
10166     case TARGET_NR_fadvise64:
10167 #endif
10168 #ifdef TARGET_S390X
10169         switch (arg4) {
10170         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10171         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10172         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10173         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10174         default: break;
10175         }
10176 #endif
10177         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10178 #endif
10179 #endif /* end of 64-bit ABI fadvise handling */
10180 
10181 #ifdef TARGET_NR_madvise
10182     case TARGET_NR_madvise:
10183         /* A straight passthrough may not be safe because qemu sometimes
10184            turns private file-backed mappings into anonymous mappings.
10185            This will break MADV_DONTNEED.
10186            This is a hint, so ignoring and returning success is ok.  */
10187         return 0;
10188 #endif
10189 #if TARGET_ABI_BITS == 32
10190     case TARGET_NR_fcntl64:
10191     {
10192 	int cmd;
10193 	struct flock64 fl;
10194         from_flock64_fn *copyfrom = copy_from_user_flock64;
10195         to_flock64_fn *copyto = copy_to_user_flock64;
10196 
10197 #ifdef TARGET_ARM
10198         if (!((CPUARMState *)cpu_env)->eabi) {
10199             copyfrom = copy_from_user_oabi_flock64;
10200             copyto = copy_to_user_oabi_flock64;
10201         }
10202 #endif
10203 
10204 	cmd = target_to_host_fcntl_cmd(arg2);
10205         if (cmd == -TARGET_EINVAL) {
10206             return cmd;
10207         }
10208 
10209         switch(arg2) {
10210         case TARGET_F_GETLK64:
10211             ret = copyfrom(&fl, arg3);
10212             if (ret) {
10213                 break;
10214             }
10215             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10216             if (ret == 0) {
10217                 ret = copyto(arg3, &fl);
10218             }
10219 	    break;
10220 
10221         case TARGET_F_SETLK64:
10222         case TARGET_F_SETLKW64:
10223             ret = copyfrom(&fl, arg3);
10224             if (ret) {
10225                 break;
10226             }
10227             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10228 	    break;
10229         default:
10230             ret = do_fcntl(arg1, arg2, arg3);
10231             break;
10232         }
10233         return ret;
10234     }
10235 #endif
10236 #ifdef TARGET_NR_cacheflush
10237     case TARGET_NR_cacheflush:
10238         /* self-modifying code is handled automatically, so nothing needed */
10239         return 0;
10240 #endif
10241 #ifdef TARGET_NR_getpagesize
10242     case TARGET_NR_getpagesize:
10243         return TARGET_PAGE_SIZE;
10244 #endif
10245     case TARGET_NR_gettid:
10246         return get_errno(gettid());
10247 #ifdef TARGET_NR_readahead
10248     case TARGET_NR_readahead:
10249 #if TARGET_ABI_BITS == 32
10250         if (regpairs_aligned(cpu_env, num)) {
10251             arg2 = arg3;
10252             arg3 = arg4;
10253             arg4 = arg5;
10254         }
10255         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10256 #else
10257         ret = get_errno(readahead(arg1, arg2, arg3));
10258 #endif
10259         return ret;
10260 #endif
10261 #ifdef CONFIG_ATTR
10262 #ifdef TARGET_NR_setxattr
10263     case TARGET_NR_listxattr:
10264     case TARGET_NR_llistxattr:
10265     {
10266         void *p, *b = 0;
10267         if (arg2) {
10268             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10269             if (!b) {
10270                 return -TARGET_EFAULT;
10271             }
10272         }
10273         p = lock_user_string(arg1);
10274         if (p) {
10275             if (num == TARGET_NR_listxattr) {
10276                 ret = get_errno(listxattr(p, b, arg3));
10277             } else {
10278                 ret = get_errno(llistxattr(p, b, arg3));
10279             }
10280         } else {
10281             ret = -TARGET_EFAULT;
10282         }
10283         unlock_user(p, arg1, 0);
10284         unlock_user(b, arg2, arg3);
10285         return ret;
10286     }
10287     case TARGET_NR_flistxattr:
10288     {
10289         void *b = 0;
10290         if (arg2) {
10291             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10292             if (!b) {
10293                 return -TARGET_EFAULT;
10294             }
10295         }
10296         ret = get_errno(flistxattr(arg1, b, arg3));
10297         unlock_user(b, arg2, arg3);
10298         return ret;
10299     }
10300     case TARGET_NR_setxattr:
10301     case TARGET_NR_lsetxattr:
10302         {
10303             void *p, *n, *v = 0;
10304             if (arg3) {
10305                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10306                 if (!v) {
10307                     return -TARGET_EFAULT;
10308                 }
10309             }
10310             p = lock_user_string(arg1);
10311             n = lock_user_string(arg2);
10312             if (p && n) {
10313                 if (num == TARGET_NR_setxattr) {
10314                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10315                 } else {
10316                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10317                 }
10318             } else {
10319                 ret = -TARGET_EFAULT;
10320             }
10321             unlock_user(p, arg1, 0);
10322             unlock_user(n, arg2, 0);
10323             unlock_user(v, arg3, 0);
10324         }
10325         return ret;
10326     case TARGET_NR_fsetxattr:
10327         {
10328             void *n, *v = 0;
10329             if (arg3) {
10330                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10331                 if (!v) {
10332                     return -TARGET_EFAULT;
10333                 }
10334             }
10335             n = lock_user_string(arg2);
10336             if (n) {
10337                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10338             } else {
10339                 ret = -TARGET_EFAULT;
10340             }
10341             unlock_user(n, arg2, 0);
10342             unlock_user(v, arg3, 0);
10343         }
10344         return ret;
10345     case TARGET_NR_getxattr:
10346     case TARGET_NR_lgetxattr:
10347         {
10348             void *p, *n, *v = 0;
10349             if (arg3) {
10350                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10351                 if (!v) {
10352                     return -TARGET_EFAULT;
10353                 }
10354             }
10355             p = lock_user_string(arg1);
10356             n = lock_user_string(arg2);
10357             if (p && n) {
10358                 if (num == TARGET_NR_getxattr) {
10359                     ret = get_errno(getxattr(p, n, v, arg4));
10360                 } else {
10361                     ret = get_errno(lgetxattr(p, n, v, arg4));
10362                 }
10363             } else {
10364                 ret = -TARGET_EFAULT;
10365             }
10366             unlock_user(p, arg1, 0);
10367             unlock_user(n, arg2, 0);
10368             unlock_user(v, arg3, arg4);
10369         }
10370         return ret;
10371     case TARGET_NR_fgetxattr:
10372         {
10373             void *n, *v = 0;
10374             if (arg3) {
10375                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10376                 if (!v) {
10377                     return -TARGET_EFAULT;
10378                 }
10379             }
10380             n = lock_user_string(arg2);
10381             if (n) {
10382                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10383             } else {
10384                 ret = -TARGET_EFAULT;
10385             }
10386             unlock_user(n, arg2, 0);
10387             unlock_user(v, arg3, arg4);
10388         }
10389         return ret;
10390     case TARGET_NR_removexattr:
10391     case TARGET_NR_lremovexattr:
10392         {
10393             void *p, *n;
10394             p = lock_user_string(arg1);
10395             n = lock_user_string(arg2);
10396             if (p && n) {
10397                 if (num == TARGET_NR_removexattr) {
10398                     ret = get_errno(removexattr(p, n));
10399                 } else {
10400                     ret = get_errno(lremovexattr(p, n));
10401                 }
10402             } else {
10403                 ret = -TARGET_EFAULT;
10404             }
10405             unlock_user(p, arg1, 0);
10406             unlock_user(n, arg2, 0);
10407         }
10408         return ret;
10409     case TARGET_NR_fremovexattr:
10410         {
10411             void *n;
10412             n = lock_user_string(arg2);
10413             if (n) {
10414                 ret = get_errno(fremovexattr(arg1, n));
10415             } else {
10416                 ret = -TARGET_EFAULT;
10417             }
10418             unlock_user(n, arg2, 0);
10419         }
10420         return ret;
10421 #endif
10422 #endif /* CONFIG_ATTR */
10423 #ifdef TARGET_NR_set_thread_area
10424     case TARGET_NR_set_thread_area:
10425 #if defined(TARGET_MIPS)
10426       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10427       return 0;
10428 #elif defined(TARGET_CRIS)
10429       if (arg1 & 0xff)
10430           ret = -TARGET_EINVAL;
10431       else {
10432           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10433           ret = 0;
10434       }
10435       return ret;
10436 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10437       return do_set_thread_area(cpu_env, arg1);
10438 #elif defined(TARGET_M68K)
10439       {
10440           TaskState *ts = cpu->opaque;
10441           ts->tp_value = arg1;
10442           return 0;
10443       }
10444 #else
10445       return -TARGET_ENOSYS;
10446 #endif
10447 #endif
10448 #ifdef TARGET_NR_get_thread_area
10449     case TARGET_NR_get_thread_area:
10450 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10451         return do_get_thread_area(cpu_env, arg1);
10452 #elif defined(TARGET_M68K)
10453         {
10454             TaskState *ts = cpu->opaque;
10455             return ts->tp_value;
10456         }
10457 #else
10458         return -TARGET_ENOSYS;
10459 #endif
10460 #endif
10461 #ifdef TARGET_NR_getdomainname
10462     case TARGET_NR_getdomainname:
10463         return -TARGET_ENOSYS;
10464 #endif
10465 
10466 #ifdef TARGET_NR_clock_settime
10467     case TARGET_NR_clock_settime:
10468     {
10469         struct timespec ts;
10470 
10471         ret = target_to_host_timespec(&ts, arg2);
10472         if (!is_error(ret)) {
10473             ret = get_errno(clock_settime(arg1, &ts));
10474         }
10475         return ret;
10476     }
10477 #endif
10478 #ifdef TARGET_NR_clock_gettime
10479     case TARGET_NR_clock_gettime:
10480     {
10481         struct timespec ts;
10482         ret = get_errno(clock_gettime(arg1, &ts));
10483         if (!is_error(ret)) {
10484             ret = host_to_target_timespec(arg2, &ts);
10485         }
10486         return ret;
10487     }
10488 #endif
10489 #ifdef TARGET_NR_clock_getres
10490     case TARGET_NR_clock_getres:
10491     {
10492         struct timespec ts;
10493         ret = get_errno(clock_getres(arg1, &ts));
10494         if (!is_error(ret)) {
10495             host_to_target_timespec(arg2, &ts);
10496         }
10497         return ret;
10498     }
10499 #endif
10500 #ifdef TARGET_NR_clock_nanosleep
10501     case TARGET_NR_clock_nanosleep:
10502     {
10503         struct timespec ts;
10504         target_to_host_timespec(&ts, arg3);
10505         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10506                                              &ts, arg4 ? &ts : NULL));
10507         if (arg4)
10508             host_to_target_timespec(arg4, &ts);
10509 
10510 #if defined(TARGET_PPC)
10511         /* clock_nanosleep is odd in that it returns positive errno values.
10512          * On PPC, CR0 bit 3 should be set in such a situation. */
10513         if (ret && ret != -TARGET_ERESTARTSYS) {
10514             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10515         }
10516 #endif
10517         return ret;
10518     }
10519 #endif
10520 
10521 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10522     case TARGET_NR_set_tid_address:
10523         return get_errno(set_tid_address((int *)g2h(arg1)));
10524 #endif
10525 
10526     case TARGET_NR_tkill:
10527         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10528 
10529     case TARGET_NR_tgkill:
10530         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10531                          target_to_host_signal(arg3)));
10532 
10533 #ifdef TARGET_NR_set_robust_list
10534     case TARGET_NR_set_robust_list:
10535     case TARGET_NR_get_robust_list:
10536         /* The ABI for supporting robust futexes has userspace pass
10537          * the kernel a pointer to a linked list which is updated by
10538          * userspace after the syscall; the list is walked by the kernel
10539          * when the thread exits. Since the linked list in QEMU guest
10540          * memory isn't a valid linked list for the host and we have
10541          * no way to reliably intercept the thread-death event, we can't
10542          * support these. Silently return ENOSYS so that guest userspace
10543          * falls back to a non-robust futex implementation (which should
10544          * be OK except in the corner case of the guest crashing while
10545          * holding a mutex that is shared with another process via
10546          * shared memory).
10547          */
10548         return -TARGET_ENOSYS;
10549 #endif
10550 
10551 #if defined(TARGET_NR_utimensat)
10552     case TARGET_NR_utimensat:
10553         {
10554             struct timespec *tsp, ts[2];
10555             if (!arg3) {
10556                 tsp = NULL;
10557             } else {
10558                 target_to_host_timespec(ts, arg3);
10559                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10560                 tsp = ts;
10561             }
10562             if (!arg2)
10563                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10564             else {
10565                 if (!(p = lock_user_string(arg2))) {
10566                     return -TARGET_EFAULT;
10567                 }
10568                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10569                 unlock_user(p, arg2, 0);
10570             }
10571         }
10572         return ret;
10573 #endif
10574     case TARGET_NR_futex:
10575         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10576 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10577     case TARGET_NR_inotify_init:
10578         ret = get_errno(sys_inotify_init());
10579         if (ret >= 0) {
10580             fd_trans_register(ret, &target_inotify_trans);
10581         }
10582         return ret;
10583 #endif
10584 #ifdef CONFIG_INOTIFY1
10585 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10586     case TARGET_NR_inotify_init1:
10587         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10588                                           fcntl_flags_tbl)));
10589         if (ret >= 0) {
10590             fd_trans_register(ret, &target_inotify_trans);
10591         }
10592         return ret;
10593 #endif
10594 #endif
10595 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10596     case TARGET_NR_inotify_add_watch:
10597         p = lock_user_string(arg2);
10598         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10599         unlock_user(p, arg2, 0);
10600         return ret;
10601 #endif
10602 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10603     case TARGET_NR_inotify_rm_watch:
10604         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10605 #endif
10606 
10607 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10608     case TARGET_NR_mq_open:
10609         {
10610             struct mq_attr posix_mq_attr;
10611             struct mq_attr *pposix_mq_attr;
10612             int host_flags;
10613 
10614             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10615             pposix_mq_attr = NULL;
10616             if (arg4) {
10617                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10618                     return -TARGET_EFAULT;
10619                 }
10620                 pposix_mq_attr = &posix_mq_attr;
10621             }
10622             p = lock_user_string(arg1 - 1);
10623             if (!p) {
10624                 return -TARGET_EFAULT;
10625             }
10626             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
10627             unlock_user (p, arg1, 0);
10628         }
10629         return ret;
10630 
10631     case TARGET_NR_mq_unlink:
10632         p = lock_user_string(arg1 - 1);
10633         if (!p) {
10634             return -TARGET_EFAULT;
10635         }
10636         ret = get_errno(mq_unlink(p));
10637         unlock_user (p, arg1, 0);
10638         return ret;
10639 
10640     case TARGET_NR_mq_timedsend:
10641         {
10642             struct timespec ts;
10643 
10644             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10645             if (arg5 != 0) {
10646                 target_to_host_timespec(&ts, arg5);
10647                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10648                 host_to_target_timespec(arg5, &ts);
10649             } else {
10650                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10651             }
10652             unlock_user (p, arg2, arg3);
10653         }
10654         return ret;
10655 
10656     case TARGET_NR_mq_timedreceive:
10657         {
10658             struct timespec ts;
10659             unsigned int prio;
10660 
10661             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10662             if (arg5 != 0) {
10663                 target_to_host_timespec(&ts, arg5);
10664                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10665                                                      &prio, &ts));
10666                 host_to_target_timespec(arg5, &ts);
10667             } else {
10668                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10669                                                      &prio, NULL));
10670             }
10671             unlock_user (p, arg2, arg3);
10672             if (arg4 != 0)
10673                 put_user_u32(prio, arg4);
10674         }
10675         return ret;
10676 
10677     /* Not implemented for now... */
10678 /*     case TARGET_NR_mq_notify: */
10679 /*         break; */
10680 
10681     case TARGET_NR_mq_getsetattr:
10682         {
10683             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10684             ret = 0;
10685             if (arg2 != 0) {
10686                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10687                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
10688                                            &posix_mq_attr_out));
10689             } else if (arg3 != 0) {
10690                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
10691             }
10692             if (ret == 0 && arg3 != 0) {
10693                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10694             }
10695         }
10696         return ret;
10697 #endif
10698 
10699 #ifdef CONFIG_SPLICE
10700 #ifdef TARGET_NR_tee
10701     case TARGET_NR_tee:
10702         {
10703             ret = get_errno(tee(arg1,arg2,arg3,arg4));
10704         }
10705         return ret;
10706 #endif
10707 #ifdef TARGET_NR_splice
10708     case TARGET_NR_splice:
10709         {
10710             loff_t loff_in, loff_out;
10711             loff_t *ploff_in = NULL, *ploff_out = NULL;
10712             if (arg2) {
10713                 if (get_user_u64(loff_in, arg2)) {
10714                     return -TARGET_EFAULT;
10715                 }
10716                 ploff_in = &loff_in;
10717             }
10718             if (arg4) {
10719                 if (get_user_u64(loff_out, arg4)) {
10720                     return -TARGET_EFAULT;
10721                 }
10722                 ploff_out = &loff_out;
10723             }
10724             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10725             if (arg2) {
10726                 if (put_user_u64(loff_in, arg2)) {
10727                     return -TARGET_EFAULT;
10728                 }
10729             }
10730             if (arg4) {
10731                 if (put_user_u64(loff_out, arg4)) {
10732                     return -TARGET_EFAULT;
10733                 }
10734             }
10735         }
10736         return ret;
10737 #endif
10738 #ifdef TARGET_NR_vmsplice
10739 	case TARGET_NR_vmsplice:
10740         {
10741             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10742             if (vec != NULL) {
10743                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10744                 unlock_iovec(vec, arg2, arg3, 0);
10745             } else {
10746                 ret = -host_to_target_errno(errno);
10747             }
10748         }
10749         return ret;
10750 #endif
10751 #endif /* CONFIG_SPLICE */
10752 #ifdef CONFIG_EVENTFD
10753 #if defined(TARGET_NR_eventfd)
10754     case TARGET_NR_eventfd:
10755         ret = get_errno(eventfd(arg1, 0));
10756         if (ret >= 0) {
10757             fd_trans_register(ret, &target_eventfd_trans);
10758         }
10759         return ret;
10760 #endif
10761 #if defined(TARGET_NR_eventfd2)
10762     case TARGET_NR_eventfd2:
10763     {
10764         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10765         if (arg2 & TARGET_O_NONBLOCK) {
10766             host_flags |= O_NONBLOCK;
10767         }
10768         if (arg2 & TARGET_O_CLOEXEC) {
10769             host_flags |= O_CLOEXEC;
10770         }
10771         ret = get_errno(eventfd(arg1, host_flags));
10772         if (ret >= 0) {
10773             fd_trans_register(ret, &target_eventfd_trans);
10774         }
10775         return ret;
10776     }
10777 #endif
10778 #endif /* CONFIG_EVENTFD  */
10779 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10780     case TARGET_NR_fallocate:
10781 #if TARGET_ABI_BITS == 32
10782         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10783                                   target_offset64(arg5, arg6)));
10784 #else
10785         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10786 #endif
10787         return ret;
10788 #endif
10789 #if defined(CONFIG_SYNC_FILE_RANGE)
10790 #if defined(TARGET_NR_sync_file_range)
10791     case TARGET_NR_sync_file_range:
10792 #if TARGET_ABI_BITS == 32
10793 #if defined(TARGET_MIPS)
10794         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10795                                         target_offset64(arg5, arg6), arg7));
10796 #else
10797         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10798                                         target_offset64(arg4, arg5), arg6));
10799 #endif /* !TARGET_MIPS */
10800 #else
10801         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10802 #endif
10803         return ret;
10804 #endif
10805 #if defined(TARGET_NR_sync_file_range2)
10806     case TARGET_NR_sync_file_range2:
10807         /* This is like sync_file_range but the arguments are reordered */
10808 #if TARGET_ABI_BITS == 32
10809         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10810                                         target_offset64(arg5, arg6), arg2));
10811 #else
10812         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10813 #endif
10814         return ret;
10815 #endif
10816 #endif
10817 #if defined(TARGET_NR_signalfd4)
10818     case TARGET_NR_signalfd4:
10819         return do_signalfd4(arg1, arg2, arg4);
10820 #endif
10821 #if defined(TARGET_NR_signalfd)
10822     case TARGET_NR_signalfd:
10823         return do_signalfd4(arg1, arg2, 0);
10824 #endif
10825 #if defined(CONFIG_EPOLL)
10826 #if defined(TARGET_NR_epoll_create)
10827     case TARGET_NR_epoll_create:
10828         return get_errno(epoll_create(arg1));
10829 #endif
10830 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10831     case TARGET_NR_epoll_create1:
10832         return get_errno(epoll_create1(arg1));
10833 #endif
10834 #if defined(TARGET_NR_epoll_ctl)
10835     case TARGET_NR_epoll_ctl:
10836     {
10837         struct epoll_event ep;
10838         struct epoll_event *epp = 0;
10839         if (arg4) {
10840             struct target_epoll_event *target_ep;
10841             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10842                 return -TARGET_EFAULT;
10843             }
10844             ep.events = tswap32(target_ep->events);
10845             /* The epoll_data_t union is just opaque data to the kernel,
10846              * so we transfer all 64 bits across and need not worry what
10847              * actual data type it is.
10848              */
10849             ep.data.u64 = tswap64(target_ep->data.u64);
10850             unlock_user_struct(target_ep, arg4, 0);
10851             epp = &ep;
10852         }
10853         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10854     }
10855 #endif
10856 
10857 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10858 #if defined(TARGET_NR_epoll_wait)
10859     case TARGET_NR_epoll_wait:
10860 #endif
10861 #if defined(TARGET_NR_epoll_pwait)
10862     case TARGET_NR_epoll_pwait:
10863 #endif
10864     {
10865         struct target_epoll_event *target_ep;
10866         struct epoll_event *ep;
10867         int epfd = arg1;
10868         int maxevents = arg3;
10869         int timeout = arg4;
10870 
10871         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
10872             return -TARGET_EINVAL;
10873         }
10874 
10875         target_ep = lock_user(VERIFY_WRITE, arg2,
10876                               maxevents * sizeof(struct target_epoll_event), 1);
10877         if (!target_ep) {
10878             return -TARGET_EFAULT;
10879         }
10880 
10881         ep = g_try_new(struct epoll_event, maxevents);
10882         if (!ep) {
10883             unlock_user(target_ep, arg2, 0);
10884             return -TARGET_ENOMEM;
10885         }
10886 
10887         switch (num) {
10888 #if defined(TARGET_NR_epoll_pwait)
10889         case TARGET_NR_epoll_pwait:
10890         {
10891             target_sigset_t *target_set;
10892             sigset_t _set, *set = &_set;
10893 
10894             if (arg5) {
10895                 if (arg6 != sizeof(target_sigset_t)) {
10896                     ret = -TARGET_EINVAL;
10897                     break;
10898                 }
10899 
10900                 target_set = lock_user(VERIFY_READ, arg5,
10901                                        sizeof(target_sigset_t), 1);
10902                 if (!target_set) {
10903                     ret = -TARGET_EFAULT;
10904                     break;
10905                 }
10906                 target_to_host_sigset(set, target_set);
10907                 unlock_user(target_set, arg5, 0);
10908             } else {
10909                 set = NULL;
10910             }
10911 
10912             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10913                                              set, SIGSET_T_SIZE));
10914             break;
10915         }
10916 #endif
10917 #if defined(TARGET_NR_epoll_wait)
10918         case TARGET_NR_epoll_wait:
10919             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10920                                              NULL, 0));
10921             break;
10922 #endif
10923         default:
10924             ret = -TARGET_ENOSYS;
10925         }
10926         if (!is_error(ret)) {
10927             int i;
10928             for (i = 0; i < ret; i++) {
10929                 target_ep[i].events = tswap32(ep[i].events);
10930                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10931             }
10932             unlock_user(target_ep, arg2,
10933                         ret * sizeof(struct target_epoll_event));
10934         } else {
10935             unlock_user(target_ep, arg2, 0);
10936         }
10937         g_free(ep);
10938         return ret;
10939     }
10940 #endif
10941 #endif
10942 #ifdef TARGET_NR_prlimit64
10943     case TARGET_NR_prlimit64:
10944     {
10945         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10946         struct target_rlimit64 *target_rnew, *target_rold;
10947         struct host_rlimit64 rnew, rold, *rnewp = 0;
10948         int resource = target_to_host_resource(arg2);
10949         if (arg3) {
10950             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10951                 return -TARGET_EFAULT;
10952             }
10953             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10954             rnew.rlim_max = tswap64(target_rnew->rlim_max);
10955             unlock_user_struct(target_rnew, arg3, 0);
10956             rnewp = &rnew;
10957         }
10958 
10959         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10960         if (!is_error(ret) && arg4) {
10961             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10962                 return -TARGET_EFAULT;
10963             }
10964             target_rold->rlim_cur = tswap64(rold.rlim_cur);
10965             target_rold->rlim_max = tswap64(rold.rlim_max);
10966             unlock_user_struct(target_rold, arg4, 1);
10967         }
10968         return ret;
10969     }
10970 #endif
10971 #ifdef TARGET_NR_gethostname
10972     case TARGET_NR_gethostname:
10973     {
10974         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10975         if (name) {
10976             ret = get_errno(gethostname(name, arg2));
10977             unlock_user(name, arg1, arg2);
10978         } else {
10979             ret = -TARGET_EFAULT;
10980         }
10981         return ret;
10982     }
10983 #endif
10984 #ifdef TARGET_NR_atomic_cmpxchg_32
10985     case TARGET_NR_atomic_cmpxchg_32:
10986     {
10987         /* should use start_exclusive from main.c */
10988         abi_ulong mem_value;
10989         if (get_user_u32(mem_value, arg6)) {
10990             target_siginfo_t info;
10991             info.si_signo = SIGSEGV;
10992             info.si_errno = 0;
10993             info.si_code = TARGET_SEGV_MAPERR;
10994             info._sifields._sigfault._addr = arg6;
10995             queue_signal((CPUArchState *)cpu_env, info.si_signo,
10996                          QEMU_SI_FAULT, &info);
10997             ret = 0xdeadbeef;
10998 
10999         }
11000         if (mem_value == arg2)
11001             put_user_u32(arg1, arg6);
11002         return mem_value;
11003     }
11004 #endif
11005 #ifdef TARGET_NR_atomic_barrier
11006     case TARGET_NR_atomic_barrier:
11007         /* Like the kernel implementation and the
11008            qemu arm barrier, no-op this? */
11009         return 0;
11010 #endif
11011 
11012 #ifdef TARGET_NR_timer_create
11013     case TARGET_NR_timer_create:
11014     {
11015         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11016 
11017         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11018 
11019         int clkid = arg1;
11020         int timer_index = next_free_host_timer();
11021 
11022         if (timer_index < 0) {
11023             ret = -TARGET_EAGAIN;
11024         } else {
11025             timer_t *phtimer = g_posix_timers  + timer_index;
11026 
11027             if (arg2) {
11028                 phost_sevp = &host_sevp;
11029                 ret = target_to_host_sigevent(phost_sevp, arg2);
11030                 if (ret != 0) {
11031                     return ret;
11032                 }
11033             }
11034 
11035             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11036             if (ret) {
11037                 phtimer = NULL;
11038             } else {
11039                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11040                     return -TARGET_EFAULT;
11041                 }
11042             }
11043         }
11044         return ret;
11045     }
11046 #endif
11047 
11048 #ifdef TARGET_NR_timer_settime
11049     case TARGET_NR_timer_settime:
11050     {
11051         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11052          * struct itimerspec * old_value */
11053         target_timer_t timerid = get_timer_id(arg1);
11054 
11055         if (timerid < 0) {
11056             ret = timerid;
11057         } else if (arg3 == 0) {
11058             ret = -TARGET_EINVAL;
11059         } else {
11060             timer_t htimer = g_posix_timers[timerid];
11061             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11062 
11063             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11064                 return -TARGET_EFAULT;
11065             }
11066             ret = get_errno(
11067                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11068             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11069                 return -TARGET_EFAULT;
11070             }
11071         }
11072         return ret;
11073     }
11074 #endif
11075 
11076 #ifdef TARGET_NR_timer_gettime
11077     case TARGET_NR_timer_gettime:
11078     {
11079         /* args: timer_t timerid, struct itimerspec *curr_value */
11080         target_timer_t timerid = get_timer_id(arg1);
11081 
11082         if (timerid < 0) {
11083             ret = timerid;
11084         } else if (!arg2) {
11085             ret = -TARGET_EFAULT;
11086         } else {
11087             timer_t htimer = g_posix_timers[timerid];
11088             struct itimerspec hspec;
11089             ret = get_errno(timer_gettime(htimer, &hspec));
11090 
11091             if (host_to_target_itimerspec(arg2, &hspec)) {
11092                 ret = -TARGET_EFAULT;
11093             }
11094         }
11095         return ret;
11096     }
11097 #endif
11098 
11099 #ifdef TARGET_NR_timer_getoverrun
11100     case TARGET_NR_timer_getoverrun:
11101     {
11102         /* args: timer_t timerid */
11103         target_timer_t timerid = get_timer_id(arg1);
11104 
11105         if (timerid < 0) {
11106             ret = timerid;
11107         } else {
11108             timer_t htimer = g_posix_timers[timerid];
11109             ret = get_errno(timer_getoverrun(htimer));
11110         }
11111         fd_trans_unregister(ret);
11112         return ret;
11113     }
11114 #endif
11115 
11116 #ifdef TARGET_NR_timer_delete
11117     case TARGET_NR_timer_delete:
11118     {
11119         /* args: timer_t timerid */
11120         target_timer_t timerid = get_timer_id(arg1);
11121 
11122         if (timerid < 0) {
11123             ret = timerid;
11124         } else {
11125             timer_t htimer = g_posix_timers[timerid];
11126             ret = get_errno(timer_delete(htimer));
11127             g_posix_timers[timerid] = 0;
11128         }
11129         return ret;
11130     }
11131 #endif
11132 
11133 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11134     case TARGET_NR_timerfd_create:
11135         return get_errno(timerfd_create(arg1,
11136                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11137 #endif
11138 
11139 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11140     case TARGET_NR_timerfd_gettime:
11141         {
11142             struct itimerspec its_curr;
11143 
11144             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11145 
11146             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11147                 return -TARGET_EFAULT;
11148             }
11149         }
11150         return ret;
11151 #endif
11152 
11153 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11154     case TARGET_NR_timerfd_settime:
11155         {
11156             struct itimerspec its_new, its_old, *p_new;
11157 
11158             if (arg3) {
11159                 if (target_to_host_itimerspec(&its_new, arg3)) {
11160                     return -TARGET_EFAULT;
11161                 }
11162                 p_new = &its_new;
11163             } else {
11164                 p_new = NULL;
11165             }
11166 
11167             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11168 
11169             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11170                 return -TARGET_EFAULT;
11171             }
11172         }
11173         return ret;
11174 #endif
11175 
11176 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11177     case TARGET_NR_ioprio_get:
11178         return get_errno(ioprio_get(arg1, arg2));
11179 #endif
11180 
11181 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11182     case TARGET_NR_ioprio_set:
11183         return get_errno(ioprio_set(arg1, arg2, arg3));
11184 #endif
11185 
11186 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11187     case TARGET_NR_setns:
11188         return get_errno(setns(arg1, arg2));
11189 #endif
11190 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11191     case TARGET_NR_unshare:
11192         return get_errno(unshare(arg1));
11193 #endif
11194 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11195     case TARGET_NR_kcmp:
11196         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11197 #endif
11198 #ifdef TARGET_NR_swapcontext
11199     case TARGET_NR_swapcontext:
11200         /* PowerPC specific.  */
11201         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11202 #endif
11203 
11204     default:
11205         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11206         return -TARGET_ENOSYS;
11207     }
11208     return ret;
11209 }
11210 
11211 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11212                     abi_long arg2, abi_long arg3, abi_long arg4,
11213                     abi_long arg5, abi_long arg6, abi_long arg7,
11214                     abi_long arg8)
11215 {
11216     CPUState *cpu = ENV_GET_CPU(cpu_env);
11217     abi_long ret;
11218 
11219 #ifdef DEBUG_ERESTARTSYS
11220     /* Debug-only code for exercising the syscall-restart code paths
11221      * in the per-architecture cpu main loops: restart every syscall
11222      * the guest makes once before letting it through.
11223      */
11224     {
11225         static bool flag;
11226         flag = !flag;
11227         if (flag) {
11228             return -TARGET_ERESTARTSYS;
11229         }
11230     }
11231 #endif
11232 
11233     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11234                              arg5, arg6, arg7, arg8);
11235 
11236     if (unlikely(do_strace)) {
11237         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11238         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11239                           arg5, arg6, arg7, arg8);
11240         print_syscall_ret(num, ret);
11241     } else {
11242         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11243                           arg5, arg6, arg7, arg8);
11244     }
11245 
11246     trace_guest_user_syscall_ret(cpu, num, ret);
11247     return ret;
11248 }
11249