xref: /openbmc/qemu/linux-user/syscall.c (revision 073d9f2c)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 static int sys_getcwd1(char *buf, size_t size)
364 {
365   if (getcwd(buf, size) == NULL) {
366       /* getcwd() sets errno */
367       return (-1);
368   }
369   return strlen(buf)+1;
370 }
371 
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376           const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379                          const struct timespec times[2], int flags)
380 {
381     errno = ENOSYS;
382     return -1;
383 }
384 #endif
385 #endif /* TARGET_NR_utimensat */
386 
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391           const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394                          int newfd, const char *new, int flags)
395 {
396     if (flags == 0) {
397         return renameat(oldfd, old, newfd, new);
398     }
399     errno = ENOSYS;
400     return -1;
401 }
402 #endif
403 #endif /* TARGET_NR_renameat2 */
404 
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
407 
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
410 {
411   return (inotify_init());
412 }
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
416 {
417   return (inotify_add_watch(fd, pathname, mask));
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
422 {
423   return (inotify_rm_watch(fd, wd));
424 }
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
429 {
430   return (inotify_init1(flags));
431 }
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY  */
441 
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449     uint64_t rlim_cur;
450     uint64_t rlim_max;
451 };
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453           const struct host_rlimit64 *, new_limit,
454           struct host_rlimit64 *, old_limit)
455 #endif
456 
457 
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
461 
462 static inline int next_free_host_timer(void)
463 {
464     int k ;
465     /* FIXME: Does finding the next free slot require a lock? */
466     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467         if (g_posix_timers[k] == 0) {
468             g_posix_timers[k] = (timer_t) 1;
469             return k;
470         }
471     }
472     return -1;
473 }
474 #endif
475 
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
479 {
480     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 }
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486  * of registers which translates to the same as ARM/MIPS, because we start with
487  * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
492 {
493     switch (num) {
494     case TARGET_NR_pread64:
495     case TARGET_NR_pwrite64:
496         return 1;
497 
498     default:
499         return 0;
500     }
501 }
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
507 
508 #define ERRNO_TABLE_SIZE 1200
509 
510 /* target_to_host_errno_table[] is initialized from
511  * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
513 };
514 
515 /*
516  * This list is the union of errno values overridden in asm-<arch>/errno.h
517  * minus the errnos that are not actually generic to all archs.
518  */
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520     [EAGAIN]		= TARGET_EAGAIN,
521     [EIDRM]		= TARGET_EIDRM,
522     [ECHRNG]		= TARGET_ECHRNG,
523     [EL2NSYNC]		= TARGET_EL2NSYNC,
524     [EL3HLT]		= TARGET_EL3HLT,
525     [EL3RST]		= TARGET_EL3RST,
526     [ELNRNG]		= TARGET_ELNRNG,
527     [EUNATCH]		= TARGET_EUNATCH,
528     [ENOCSI]		= TARGET_ENOCSI,
529     [EL2HLT]		= TARGET_EL2HLT,
530     [EDEADLK]		= TARGET_EDEADLK,
531     [ENOLCK]		= TARGET_ENOLCK,
532     [EBADE]		= TARGET_EBADE,
533     [EBADR]		= TARGET_EBADR,
534     [EXFULL]		= TARGET_EXFULL,
535     [ENOANO]		= TARGET_ENOANO,
536     [EBADRQC]		= TARGET_EBADRQC,
537     [EBADSLT]		= TARGET_EBADSLT,
538     [EBFONT]		= TARGET_EBFONT,
539     [ENOSTR]		= TARGET_ENOSTR,
540     [ENODATA]		= TARGET_ENODATA,
541     [ETIME]		= TARGET_ETIME,
542     [ENOSR]		= TARGET_ENOSR,
543     [ENONET]		= TARGET_ENONET,
544     [ENOPKG]		= TARGET_ENOPKG,
545     [EREMOTE]		= TARGET_EREMOTE,
546     [ENOLINK]		= TARGET_ENOLINK,
547     [EADV]		= TARGET_EADV,
548     [ESRMNT]		= TARGET_ESRMNT,
549     [ECOMM]		= TARGET_ECOMM,
550     [EPROTO]		= TARGET_EPROTO,
551     [EDOTDOT]		= TARGET_EDOTDOT,
552     [EMULTIHOP]		= TARGET_EMULTIHOP,
553     [EBADMSG]		= TARGET_EBADMSG,
554     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
555     [EOVERFLOW]		= TARGET_EOVERFLOW,
556     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
557     [EBADFD]		= TARGET_EBADFD,
558     [EREMCHG]		= TARGET_EREMCHG,
559     [ELIBACC]		= TARGET_ELIBACC,
560     [ELIBBAD]		= TARGET_ELIBBAD,
561     [ELIBSCN]		= TARGET_ELIBSCN,
562     [ELIBMAX]		= TARGET_ELIBMAX,
563     [ELIBEXEC]		= TARGET_ELIBEXEC,
564     [EILSEQ]		= TARGET_EILSEQ,
565     [ENOSYS]		= TARGET_ENOSYS,
566     [ELOOP]		= TARGET_ELOOP,
567     [ERESTART]		= TARGET_ERESTART,
568     [ESTRPIPE]		= TARGET_ESTRPIPE,
569     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
570     [EUSERS]		= TARGET_EUSERS,
571     [ENOTSOCK]		= TARGET_ENOTSOCK,
572     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
573     [EMSGSIZE]		= TARGET_EMSGSIZE,
574     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
575     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
576     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
577     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
578     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
579     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
580     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
581     [EADDRINUSE]	= TARGET_EADDRINUSE,
582     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
583     [ENETDOWN]		= TARGET_ENETDOWN,
584     [ENETUNREACH]	= TARGET_ENETUNREACH,
585     [ENETRESET]		= TARGET_ENETRESET,
586     [ECONNABORTED]	= TARGET_ECONNABORTED,
587     [ECONNRESET]	= TARGET_ECONNRESET,
588     [ENOBUFS]		= TARGET_ENOBUFS,
589     [EISCONN]		= TARGET_EISCONN,
590     [ENOTCONN]		= TARGET_ENOTCONN,
591     [EUCLEAN]		= TARGET_EUCLEAN,
592     [ENOTNAM]		= TARGET_ENOTNAM,
593     [ENAVAIL]		= TARGET_ENAVAIL,
594     [EISNAM]		= TARGET_EISNAM,
595     [EREMOTEIO]		= TARGET_EREMOTEIO,
596     [EDQUOT]            = TARGET_EDQUOT,
597     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
598     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
599     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
600     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
601     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
602     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
603     [EALREADY]		= TARGET_EALREADY,
604     [EINPROGRESS]	= TARGET_EINPROGRESS,
605     [ESTALE]		= TARGET_ESTALE,
606     [ECANCELED]		= TARGET_ECANCELED,
607     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
608     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610     [ENOKEY]		= TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628     [ENOMSG]            = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631     [ERFKILL]           = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634     [EHWPOISON]         = TARGET_EHWPOISON,
635 #endif
636 };
637 
638 static inline int host_to_target_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         host_to_target_errno_table[err]) {
642         return host_to_target_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline int target_to_host_errno(int err)
648 {
649     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650         target_to_host_errno_table[err]) {
651         return target_to_host_errno_table[err];
652     }
653     return err;
654 }
655 
656 static inline abi_long get_errno(abi_long ret)
657 {
658     if (ret == -1)
659         return -host_to_target_errno(errno);
660     else
661         return ret;
662 }
663 
664 const char *target_strerror(int err)
665 {
666     if (err == TARGET_ERESTARTSYS) {
667         return "To be restarted";
668     }
669     if (err == TARGET_QEMU_ESIGRETURN) {
670         return "Successful exit from sigreturn";
671     }
672 
673     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674         return NULL;
675     }
676     return strerror(target_to_host_errno(err));
677 }
678 
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
681 { \
682     return safe_syscall(__NR_##name); \
683 }
684 
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
687 { \
688     return safe_syscall(__NR_##name, arg1); \
689 }
690 
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2); \
695 }
696 
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
701 }
702 
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
708 }
709 
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
716 }
717 
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721     type5 arg5, type6 arg6) \
722 { \
723     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
724 }
725 
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729               int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731               struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733               int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744               const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752               unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754               unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756               socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765               const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767               int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769               struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772               const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776               int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778               long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780               unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783  * wrappers for the sub-operations to hide this implementation detail.
784  * Annoyingly we can't include linux/ipc.h to get the constant definitions
785  * for the call parameter because some structs in there conflict with the
786  * sys/ipc.h ones. So we just define them here, and rely on them being
787  * the same for all host architectures.
788  */
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
793 
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
799 }
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
801 {
802     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
803 }
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805                            const struct timespec *timeout)
806 {
807     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808                     (long)timeout);
809 }
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818  * "third argument might be integer or pointer or not present" behaviour of
819  * the libc function.
820  */
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824  *  use the flock64 struct rather than unsuffixed flock
825  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826  */
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
832 
833 static inline int host_to_target_sock_type(int host_type)
834 {
835     int target_type;
836 
837     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838     case SOCK_DGRAM:
839         target_type = TARGET_SOCK_DGRAM;
840         break;
841     case SOCK_STREAM:
842         target_type = TARGET_SOCK_STREAM;
843         break;
844     default:
845         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846         break;
847     }
848 
849 #if defined(SOCK_CLOEXEC)
850     if (host_type & SOCK_CLOEXEC) {
851         target_type |= TARGET_SOCK_CLOEXEC;
852     }
853 #endif
854 
855 #if defined(SOCK_NONBLOCK)
856     if (host_type & SOCK_NONBLOCK) {
857         target_type |= TARGET_SOCK_NONBLOCK;
858     }
859 #endif
860 
861     return target_type;
862 }
863 
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
867 
868 void target_set_brk(abi_ulong new_brk)
869 {
870     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871     brk_page = HOST_PAGE_ALIGN(target_brk);
872 }
873 
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
876 
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
879 {
880     abi_long mapped_addr;
881     abi_ulong new_alloc_size;
882 
883     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 
885     if (!new_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887         return target_brk;
888     }
889     if (new_brk < target_original_brk) {
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891                    target_brk);
892         return target_brk;
893     }
894 
895     /* If the new brk is less than the highest page reserved to the
896      * target heap allocation, set it and we're almost done...  */
897     if (new_brk <= brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  */
900         if (new_brk > target_brk) {
901             memset(g2h(target_brk), 0, new_brk - target_brk);
902         }
903 	target_brk = new_brk;
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905 	return target_brk;
906     }
907 
908     /* We need to allocate more memory after the brk... Note that
909      * we don't use MAP_FIXED because that will map over the top of
910      * any existing mapping (like the one with the host libc or qemu
911      * itself); instead we treat "mapped but at wrong address" as
912      * a failure and unmap again.
913      */
914     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916                                         PROT_READ|PROT_WRITE,
917                                         MAP_ANON|MAP_PRIVATE, 0, 0));
918 
919     if (mapped_addr == brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  Technically the new pages are already
922          * initialized to zero since they *are* anonymous mapped
923          * pages, however we have to take care with the contents that
924          * come from the remaining part of the previous page: it may
925          * contains garbage data due to a previous heap usage (grown
926          * then shrunken).  */
927         memset(g2h(target_brk), 0, brk_page - target_brk);
928 
929         target_brk = new_brk;
930         brk_page = HOST_PAGE_ALIGN(target_brk);
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932             target_brk);
933         return target_brk;
934     } else if (mapped_addr != -1) {
935         /* Mapped but at wrong address, meaning there wasn't actually
936          * enough space for this brk.
937          */
938         target_munmap(mapped_addr, new_alloc_size);
939         mapped_addr = -1;
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941     }
942     else {
943         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
944     }
945 
946 #if defined(TARGET_ALPHA)
947     /* We (partially) emulate OSF/1 on Alpha, which requires we
948        return a proper errno, not an unchanged brk value.  */
949     return -TARGET_ENOMEM;
950 #endif
951     /* For everything else, return the previous break. */
952     return target_brk;
953 }
954 
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956                                             abi_ulong target_fds_addr,
957                                             int n)
958 {
959     int i, nw, j, k;
960     abi_ulong b, *target_fds;
961 
962     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963     if (!(target_fds = lock_user(VERIFY_READ,
964                                  target_fds_addr,
965                                  sizeof(abi_ulong) * nw,
966                                  1)))
967         return -TARGET_EFAULT;
968 
969     FD_ZERO(fds);
970     k = 0;
971     for (i = 0; i < nw; i++) {
972         /* grab the abi_ulong */
973         __get_user(b, &target_fds[i]);
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             /* check the bit inside the abi_ulong */
976             if ((b >> j) & 1)
977                 FD_SET(k, fds);
978             k++;
979         }
980     }
981 
982     unlock_user(target_fds, target_fds_addr, 0);
983 
984     return 0;
985 }
986 
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988                                                  abi_ulong target_fds_addr,
989                                                  int n)
990 {
991     if (target_fds_addr) {
992         if (copy_from_user_fdset(fds, target_fds_addr, n))
993             return -TARGET_EFAULT;
994         *fds_ptr = fds;
995     } else {
996         *fds_ptr = NULL;
997     }
998     return 0;
999 }
1000 
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002                                           const fd_set *fds,
1003                                           int n)
1004 {
1005     int i, nw, j, k;
1006     abi_long v;
1007     abi_ulong *target_fds;
1008 
1009     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010     if (!(target_fds = lock_user(VERIFY_WRITE,
1011                                  target_fds_addr,
1012                                  sizeof(abi_ulong) * nw,
1013                                  0)))
1014         return -TARGET_EFAULT;
1015 
1016     k = 0;
1017     for (i = 0; i < nw; i++) {
1018         v = 0;
1019         for (j = 0; j < TARGET_ABI_BITS; j++) {
1020             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021             k++;
1022         }
1023         __put_user(v, &target_fds[i]);
1024     }
1025 
1026     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1027 
1028     return 0;
1029 }
1030 
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1036 
1037 static inline abi_long host_to_target_clock_t(long ticks)
1038 {
1039 #if HOST_HZ == TARGET_HZ
1040     return ticks;
1041 #else
1042     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1044 }
1045 
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047                                              const struct rusage *rusage)
1048 {
1049     struct target_rusage *target_rusage;
1050 
1051     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052         return -TARGET_EFAULT;
1053     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071     unlock_user_struct(target_rusage, target_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 {
1094     abi_ulong target_rlim_swap;
1095     abi_ulong result;
1096 
1097     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098         target_rlim_swap = TARGET_RLIM_INFINITY;
1099     else
1100         target_rlim_swap = rlim;
1101     result = tswapal(target_rlim_swap);
1102 
1103     return result;
1104 }
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150         return -TARGET_EFAULT;
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161                                             const struct timeval *tv)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166         return -TARGET_EFAULT;
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177                                                abi_ulong target_tz_addr)
1178 {
1179     struct target_timezone *target_tz;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184 
1185     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1187 
1188     unlock_user_struct(target_tz, target_tz_addr, 0);
1189 
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1195 
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197                                               abi_ulong target_mq_attr_addr)
1198 {
1199     struct target_mq_attr *target_mq_attr;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202                           target_mq_attr_addr, 1))
1203         return -TARGET_EFAULT;
1204 
1205     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1209 
1210     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1211 
1212     return 0;
1213 }
1214 
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216                                             const struct mq_attr *attr)
1217 {
1218     struct target_mq_attr *target_mq_attr;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221                           target_mq_attr_addr, 0))
1222         return -TARGET_EFAULT;
1223 
1224     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1228 
1229     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1239                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1240 {
1241     fd_set rfds, wfds, efds;
1242     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243     struct timeval tv;
1244     struct timespec ts, *ts_ptr;
1245     abi_long ret;
1246 
1247     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256     if (ret) {
1257         return ret;
1258     }
1259 
1260     if (target_tv_addr) {
1261         if (copy_from_user_timeval(&tv, target_tv_addr))
1262             return -TARGET_EFAULT;
1263         ts.tv_sec = tv.tv_sec;
1264         ts.tv_nsec = tv.tv_usec * 1000;
1265         ts_ptr = &ts;
1266     } else {
1267         ts_ptr = NULL;
1268     }
1269 
1270     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271                                   ts_ptr, NULL));
1272 
1273     if (!is_error(ret)) {
1274         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275             return -TARGET_EFAULT;
1276         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277             return -TARGET_EFAULT;
1278         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279             return -TARGET_EFAULT;
1280 
1281         if (target_tv_addr) {
1282             tv.tv_sec = ts.tv_sec;
1283             tv.tv_usec = ts.tv_nsec / 1000;
1284             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285                 return -TARGET_EFAULT;
1286             }
1287         }
1288     }
1289 
1290     return ret;
1291 }
1292 
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1295 {
1296     struct target_sel_arg_struct *sel;
1297     abi_ulong inp, outp, exp, tvp;
1298     long nsel;
1299 
1300     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     nsel = tswapal(sel->n);
1305     inp = tswapal(sel->inp);
1306     outp = tswapal(sel->outp);
1307     exp = tswapal(sel->exp);
1308     tvp = tswapal(sel->tvp);
1309 
1310     unlock_user_struct(sel, arg1, 0);
1311 
1312     return do_select(nsel, inp, outp, exp, tvp);
1313 }
1314 #endif
1315 #endif
1316 
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1318 {
1319 #ifdef CONFIG_PIPE2
1320     return pipe2(host_pipe, flags);
1321 #else
1322     return -ENOSYS;
1323 #endif
1324 }
1325 
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327                         int flags, int is_pipe2)
1328 {
1329     int host_pipe[2];
1330     abi_long ret;
1331     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1332 
1333     if (is_error(ret))
1334         return get_errno(ret);
1335 
1336     /* Several targets have special calling conventions for the original
1337        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1338     if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341         return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344         return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347         return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350         return host_pipe[0];
1351 #endif
1352     }
1353 
1354     if (put_user_s32(host_pipe[0], pipedes)
1355         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356         return -TARGET_EFAULT;
1357     return get_errno(ret);
1358 }
1359 
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361                                               abi_ulong target_addr,
1362                                               socklen_t len)
1363 {
1364     struct target_ip_mreqn *target_smreqn;
1365 
1366     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367     if (!target_smreqn)
1368         return -TARGET_EFAULT;
1369     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371     if (len == sizeof(struct target_ip_mreqn))
1372         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373     unlock_user(target_smreqn, target_addr, 0);
1374 
1375     return 0;
1376 }
1377 
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379                                                abi_ulong target_addr,
1380                                                socklen_t len)
1381 {
1382     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383     sa_family_t sa_family;
1384     struct target_sockaddr *target_saddr;
1385 
1386     if (fd_trans_target_to_host_addr(fd)) {
1387         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1388     }
1389 
1390     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391     if (!target_saddr)
1392         return -TARGET_EFAULT;
1393 
1394     sa_family = tswap16(target_saddr->sa_family);
1395 
1396     /* Oops. The caller might send a incomplete sun_path; sun_path
1397      * must be terminated by \0 (see the manual page), but
1398      * unfortunately it is quite common to specify sockaddr_un
1399      * length as "strlen(x->sun_path)" while it should be
1400      * "strlen(...) + 1". We'll fix that here if needed.
1401      * Linux kernel has a similar feature.
1402      */
1403 
1404     if (sa_family == AF_UNIX) {
1405         if (len < unix_maxlen && len > 0) {
1406             char *cp = (char*)target_saddr;
1407 
1408             if ( cp[len-1] && !cp[len] )
1409                 len++;
1410         }
1411         if (len > unix_maxlen)
1412             len = unix_maxlen;
1413     }
1414 
1415     memcpy(addr, target_saddr, len);
1416     addr->sa_family = sa_family;
1417     if (sa_family == AF_NETLINK) {
1418         struct sockaddr_nl *nladdr;
1419 
1420         nladdr = (struct sockaddr_nl *)addr;
1421         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423     } else if (sa_family == AF_PACKET) {
1424 	struct target_sockaddr_ll *lladdr;
1425 
1426 	lladdr = (struct target_sockaddr_ll *)addr;
1427 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1429     }
1430     unlock_user(target_saddr, target_addr, 0);
1431 
1432     return 0;
1433 }
1434 
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436                                                struct sockaddr *addr,
1437                                                socklen_t len)
1438 {
1439     struct target_sockaddr *target_saddr;
1440 
1441     if (len == 0) {
1442         return 0;
1443     }
1444     assert(addr);
1445 
1446     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447     if (!target_saddr)
1448         return -TARGET_EFAULT;
1449     memcpy(target_saddr, addr, len);
1450     if (len >= offsetof(struct target_sockaddr, sa_family) +
1451         sizeof(target_saddr->sa_family)) {
1452         target_saddr->sa_family = tswap16(addr->sa_family);
1453     }
1454     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458     } else if (addr->sa_family == AF_PACKET) {
1459         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462     } else if (addr->sa_family == AF_INET6 &&
1463                len >= sizeof(struct target_sockaddr_in6)) {
1464         struct target_sockaddr_in6 *target_in6 =
1465                (struct target_sockaddr_in6 *)target_saddr;
1466         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1467     }
1468     unlock_user(target_saddr, target_addr, len);
1469 
1470     return 0;
1471 }
1472 
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474                                            struct target_msghdr *target_msgh)
1475 {
1476     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477     abi_long msg_controllen;
1478     abi_ulong target_cmsg_addr;
1479     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480     socklen_t space = 0;
1481 
1482     msg_controllen = tswapal(target_msgh->msg_controllen);
1483     if (msg_controllen < sizeof (struct target_cmsghdr))
1484         goto the_end;
1485     target_cmsg_addr = tswapal(target_msgh->msg_control);
1486     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487     target_cmsg_start = target_cmsg;
1488     if (!target_cmsg)
1489         return -TARGET_EFAULT;
1490 
1491     while (cmsg && target_cmsg) {
1492         void *data = CMSG_DATA(cmsg);
1493         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1494 
1495         int len = tswapal(target_cmsg->cmsg_len)
1496             - sizeof(struct target_cmsghdr);
1497 
1498         space += CMSG_SPACE(len);
1499         if (space > msgh->msg_controllen) {
1500             space -= CMSG_SPACE(len);
1501             /* This is a QEMU bug, since we allocated the payload
1502              * area ourselves (unlike overflow in host-to-target
1503              * conversion, which is just the guest giving us a buffer
1504              * that's too small). It can't happen for the payload types
1505              * we currently support; if it becomes an issue in future
1506              * we would need to improve our allocation strategy to
1507              * something more intelligent than "twice the size of the
1508              * target buffer we're reading from".
1509              */
1510             gemu_log("Host cmsg overflow\n");
1511             break;
1512         }
1513 
1514         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515             cmsg->cmsg_level = SOL_SOCKET;
1516         } else {
1517             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1518         }
1519         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520         cmsg->cmsg_len = CMSG_LEN(len);
1521 
1522         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523             int *fd = (int *)data;
1524             int *target_fd = (int *)target_data;
1525             int i, numfds = len / sizeof(int);
1526 
1527             for (i = 0; i < numfds; i++) {
1528                 __get_user(fd[i], target_fd + i);
1529             }
1530         } else if (cmsg->cmsg_level == SOL_SOCKET
1531                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1532             struct ucred *cred = (struct ucred *)data;
1533             struct target_ucred *target_cred =
1534                 (struct target_ucred *)target_data;
1535 
1536             __get_user(cred->pid, &target_cred->pid);
1537             __get_user(cred->uid, &target_cred->uid);
1538             __get_user(cred->gid, &target_cred->gid);
1539         } else {
1540             gemu_log("Unsupported ancillary data: %d/%d\n",
1541                                         cmsg->cmsg_level, cmsg->cmsg_type);
1542             memcpy(data, target_data, len);
1543         }
1544 
1545         cmsg = CMSG_NXTHDR(msgh, cmsg);
1546         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547                                          target_cmsg_start);
1548     }
1549     unlock_user(target_cmsg, target_cmsg_addr, 0);
1550  the_end:
1551     msgh->msg_controllen = space;
1552     return 0;
1553 }
1554 
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556                                            struct msghdr *msgh)
1557 {
1558     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559     abi_long msg_controllen;
1560     abi_ulong target_cmsg_addr;
1561     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562     socklen_t space = 0;
1563 
1564     msg_controllen = tswapal(target_msgh->msg_controllen);
1565     if (msg_controllen < sizeof (struct target_cmsghdr))
1566         goto the_end;
1567     target_cmsg_addr = tswapal(target_msgh->msg_control);
1568     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569     target_cmsg_start = target_cmsg;
1570     if (!target_cmsg)
1571         return -TARGET_EFAULT;
1572 
1573     while (cmsg && target_cmsg) {
1574         void *data = CMSG_DATA(cmsg);
1575         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1576 
1577         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578         int tgt_len, tgt_space;
1579 
1580         /* We never copy a half-header but may copy half-data;
1581          * this is Linux's behaviour in put_cmsg(). Note that
1582          * truncation here is a guest problem (which we report
1583          * to the guest via the CTRUNC bit), unlike truncation
1584          * in target_to_host_cmsg, which is a QEMU bug.
1585          */
1586         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             break;
1589         }
1590 
1591         if (cmsg->cmsg_level == SOL_SOCKET) {
1592             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593         } else {
1594             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1595         }
1596         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1597 
1598         /* Payload types which need a different size of payload on
1599          * the target must adjust tgt_len here.
1600          */
1601         tgt_len = len;
1602         switch (cmsg->cmsg_level) {
1603         case SOL_SOCKET:
1604             switch (cmsg->cmsg_type) {
1605             case SO_TIMESTAMP:
1606                 tgt_len = sizeof(struct target_timeval);
1607                 break;
1608             default:
1609                 break;
1610             }
1611             break;
1612         default:
1613             break;
1614         }
1615 
1616         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1619         }
1620 
1621         /* We must now copy-and-convert len bytes of payload
1622          * into tgt_len bytes of destination space. Bear in mind
1623          * that in both source and destination we may be dealing
1624          * with a truncated value!
1625          */
1626         switch (cmsg->cmsg_level) {
1627         case SOL_SOCKET:
1628             switch (cmsg->cmsg_type) {
1629             case SCM_RIGHTS:
1630             {
1631                 int *fd = (int *)data;
1632                 int *target_fd = (int *)target_data;
1633                 int i, numfds = tgt_len / sizeof(int);
1634 
1635                 for (i = 0; i < numfds; i++) {
1636                     __put_user(fd[i], target_fd + i);
1637                 }
1638                 break;
1639             }
1640             case SO_TIMESTAMP:
1641             {
1642                 struct timeval *tv = (struct timeval *)data;
1643                 struct target_timeval *target_tv =
1644                     (struct target_timeval *)target_data;
1645 
1646                 if (len != sizeof(struct timeval) ||
1647                     tgt_len != sizeof(struct target_timeval)) {
1648                     goto unimplemented;
1649                 }
1650 
1651                 /* copy struct timeval to target */
1652                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654                 break;
1655             }
1656             case SCM_CREDENTIALS:
1657             {
1658                 struct ucred *cred = (struct ucred *)data;
1659                 struct target_ucred *target_cred =
1660                     (struct target_ucred *)target_data;
1661 
1662                 __put_user(cred->pid, &target_cred->pid);
1663                 __put_user(cred->uid, &target_cred->uid);
1664                 __put_user(cred->gid, &target_cred->gid);
1665                 break;
1666             }
1667             default:
1668                 goto unimplemented;
1669             }
1670             break;
1671 
1672         case SOL_IP:
1673             switch (cmsg->cmsg_type) {
1674             case IP_TTL:
1675             {
1676                 uint32_t *v = (uint32_t *)data;
1677                 uint32_t *t_int = (uint32_t *)target_data;
1678 
1679                 if (len != sizeof(uint32_t) ||
1680                     tgt_len != sizeof(uint32_t)) {
1681                     goto unimplemented;
1682                 }
1683                 __put_user(*v, t_int);
1684                 break;
1685             }
1686             case IP_RECVERR:
1687             {
1688                 struct errhdr_t {
1689                    struct sock_extended_err ee;
1690                    struct sockaddr_in offender;
1691                 };
1692                 struct errhdr_t *errh = (struct errhdr_t *)data;
1693                 struct errhdr_t *target_errh =
1694                     (struct errhdr_t *)target_data;
1695 
1696                 if (len != sizeof(struct errhdr_t) ||
1697                     tgt_len != sizeof(struct errhdr_t)) {
1698                     goto unimplemented;
1699                 }
1700                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1703                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708                     (void *) &errh->offender, sizeof(errh->offender));
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IPV6:
1717             switch (cmsg->cmsg_type) {
1718             case IPV6_HOPLIMIT:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IPV6_RECVERR:
1731             {
1732                 struct errhdr6_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in6 offender;
1735                 };
1736                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737                 struct errhdr6_t *target_errh =
1738                     (struct errhdr6_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr6_t) ||
1741                     tgt_len != sizeof(struct errhdr6_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         default:
1761         unimplemented:
1762             gemu_log("Unsupported ancillary data: %d/%d\n",
1763                                         cmsg->cmsg_level, cmsg->cmsg_type);
1764             memcpy(target_data, data, MIN(len, tgt_len));
1765             if (tgt_len > len) {
1766                 memset(target_data + len, 0, tgt_len - len);
1767             }
1768         }
1769 
1770         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772         if (msg_controllen < tgt_space) {
1773             tgt_space = msg_controllen;
1774         }
1775         msg_controllen -= tgt_space;
1776         space += tgt_space;
1777         cmsg = CMSG_NXTHDR(msgh, cmsg);
1778         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779                                          target_cmsg_start);
1780     }
1781     unlock_user(target_cmsg, target_cmsg_addr, space);
1782  the_end:
1783     target_msgh->msg_controllen = tswapal(space);
1784     return 0;
1785 }
1786 
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789                               abi_ulong optval_addr, socklen_t optlen)
1790 {
1791     abi_long ret;
1792     int val;
1793     struct ip_mreqn *ip_mreq;
1794     struct ip_mreq_source *ip_mreq_source;
1795 
1796     switch(level) {
1797     case SOL_TCP:
1798         /* TCP options all take an 'int' value.  */
1799         if (optlen < sizeof(uint32_t))
1800             return -TARGET_EINVAL;
1801 
1802         if (get_user_u32(val, optval_addr))
1803             return -TARGET_EFAULT;
1804         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805         break;
1806     case SOL_IP:
1807         switch(optname) {
1808         case IP_TOS:
1809         case IP_TTL:
1810         case IP_HDRINCL:
1811         case IP_ROUTER_ALERT:
1812         case IP_RECVOPTS:
1813         case IP_RETOPTS:
1814         case IP_PKTINFO:
1815         case IP_MTU_DISCOVER:
1816         case IP_RECVERR:
1817         case IP_RECVTTL:
1818         case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820         case IP_FREEBIND:
1821 #endif
1822         case IP_MULTICAST_TTL:
1823         case IP_MULTICAST_LOOP:
1824             val = 0;
1825             if (optlen >= sizeof(uint32_t)) {
1826                 if (get_user_u32(val, optval_addr))
1827                     return -TARGET_EFAULT;
1828             } else if (optlen >= 1) {
1829                 if (get_user_u8(val, optval_addr))
1830                     return -TARGET_EFAULT;
1831             }
1832             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833             break;
1834         case IP_ADD_MEMBERSHIP:
1835         case IP_DROP_MEMBERSHIP:
1836             if (optlen < sizeof (struct target_ip_mreq) ||
1837                 optlen > sizeof (struct target_ip_mreqn))
1838                 return -TARGET_EINVAL;
1839 
1840             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843             break;
1844 
1845         case IP_BLOCK_SOURCE:
1846         case IP_UNBLOCK_SOURCE:
1847         case IP_ADD_SOURCE_MEMBERSHIP:
1848         case IP_DROP_SOURCE_MEMBERSHIP:
1849             if (optlen != sizeof (struct target_ip_mreq_source))
1850                 return -TARGET_EINVAL;
1851 
1852             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854             unlock_user (ip_mreq_source, optval_addr, 0);
1855             break;
1856 
1857         default:
1858             goto unimplemented;
1859         }
1860         break;
1861     case SOL_IPV6:
1862         switch (optname) {
1863         case IPV6_MTU_DISCOVER:
1864         case IPV6_MTU:
1865         case IPV6_V6ONLY:
1866         case IPV6_RECVPKTINFO:
1867         case IPV6_UNICAST_HOPS:
1868         case IPV6_MULTICAST_HOPS:
1869         case IPV6_MULTICAST_LOOP:
1870         case IPV6_RECVERR:
1871         case IPV6_RECVHOPLIMIT:
1872         case IPV6_2292HOPLIMIT:
1873         case IPV6_CHECKSUM:
1874             val = 0;
1875             if (optlen < sizeof(uint32_t)) {
1876                 return -TARGET_EINVAL;
1877             }
1878             if (get_user_u32(val, optval_addr)) {
1879                 return -TARGET_EFAULT;
1880             }
1881             ret = get_errno(setsockopt(sockfd, level, optname,
1882                                        &val, sizeof(val)));
1883             break;
1884         case IPV6_PKTINFO:
1885         {
1886             struct in6_pktinfo pki;
1887 
1888             if (optlen < sizeof(pki)) {
1889                 return -TARGET_EINVAL;
1890             }
1891 
1892             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893                 return -TARGET_EFAULT;
1894             }
1895 
1896             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1897 
1898             ret = get_errno(setsockopt(sockfd, level, optname,
1899                                        &pki, sizeof(pki)));
1900             break;
1901         }
1902         default:
1903             goto unimplemented;
1904         }
1905         break;
1906     case SOL_ICMPV6:
1907         switch (optname) {
1908         case ICMPV6_FILTER:
1909         {
1910             struct icmp6_filter icmp6f;
1911 
1912             if (optlen > sizeof(icmp6f)) {
1913                 optlen = sizeof(icmp6f);
1914             }
1915 
1916             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917                 return -TARGET_EFAULT;
1918             }
1919 
1920             for (val = 0; val < 8; val++) {
1921                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1922             }
1923 
1924             ret = get_errno(setsockopt(sockfd, level, optname,
1925                                        &icmp6f, optlen));
1926             break;
1927         }
1928         default:
1929             goto unimplemented;
1930         }
1931         break;
1932     case SOL_RAW:
1933         switch (optname) {
1934         case ICMP_FILTER:
1935         case IPV6_CHECKSUM:
1936             /* those take an u32 value */
1937             if (optlen < sizeof(uint32_t)) {
1938                 return -TARGET_EINVAL;
1939             }
1940 
1941             if (get_user_u32(val, optval_addr)) {
1942                 return -TARGET_EFAULT;
1943             }
1944             ret = get_errno(setsockopt(sockfd, level, optname,
1945                                        &val, sizeof(val)));
1946             break;
1947 
1948         default:
1949             goto unimplemented;
1950         }
1951         break;
1952     case TARGET_SOL_SOCKET:
1953         switch (optname) {
1954         case TARGET_SO_RCVTIMEO:
1955         {
1956                 struct timeval tv;
1957 
1958                 optname = SO_RCVTIMEO;
1959 
1960 set_timeout:
1961                 if (optlen != sizeof(struct target_timeval)) {
1962                     return -TARGET_EINVAL;
1963                 }
1964 
1965                 if (copy_from_user_timeval(&tv, optval_addr)) {
1966                     return -TARGET_EFAULT;
1967                 }
1968 
1969                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970                                 &tv, sizeof(tv)));
1971                 return ret;
1972         }
1973         case TARGET_SO_SNDTIMEO:
1974                 optname = SO_SNDTIMEO;
1975                 goto set_timeout;
1976         case TARGET_SO_ATTACH_FILTER:
1977         {
1978                 struct target_sock_fprog *tfprog;
1979                 struct target_sock_filter *tfilter;
1980                 struct sock_fprog fprog;
1981                 struct sock_filter *filter;
1982                 int i;
1983 
1984                 if (optlen != sizeof(*tfprog)) {
1985                     return -TARGET_EINVAL;
1986                 }
1987                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988                     return -TARGET_EFAULT;
1989                 }
1990                 if (!lock_user_struct(VERIFY_READ, tfilter,
1991                                       tswapal(tfprog->filter), 0)) {
1992                     unlock_user_struct(tfprog, optval_addr, 1);
1993                     return -TARGET_EFAULT;
1994                 }
1995 
1996                 fprog.len = tswap16(tfprog->len);
1997                 filter = g_try_new(struct sock_filter, fprog.len);
1998                 if (filter == NULL) {
1999                     unlock_user_struct(tfilter, tfprog->filter, 1);
2000                     unlock_user_struct(tfprog, optval_addr, 1);
2001                     return -TARGET_ENOMEM;
2002                 }
2003                 for (i = 0; i < fprog.len; i++) {
2004                     filter[i].code = tswap16(tfilter[i].code);
2005                     filter[i].jt = tfilter[i].jt;
2006                     filter[i].jf = tfilter[i].jf;
2007                     filter[i].k = tswap32(tfilter[i].k);
2008                 }
2009                 fprog.filter = filter;
2010 
2011                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013                 g_free(filter);
2014 
2015                 unlock_user_struct(tfilter, tfprog->filter, 1);
2016                 unlock_user_struct(tfprog, optval_addr, 1);
2017                 return ret;
2018         }
2019 	case TARGET_SO_BINDTODEVICE:
2020 	{
2021 		char *dev_ifname, *addr_ifname;
2022 
2023 		if (optlen > IFNAMSIZ - 1) {
2024 		    optlen = IFNAMSIZ - 1;
2025 		}
2026 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 		if (!dev_ifname) {
2028 		    return -TARGET_EFAULT;
2029 		}
2030 		optname = SO_BINDTODEVICE;
2031 		addr_ifname = alloca(IFNAMSIZ);
2032 		memcpy(addr_ifname, dev_ifname, optlen);
2033 		addr_ifname[optlen] = 0;
2034 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035                                            addr_ifname, optlen));
2036 		unlock_user (dev_ifname, optval_addr, 0);
2037 		return ret;
2038 	}
2039         case TARGET_SO_LINGER:
2040         {
2041                 struct linger lg;
2042                 struct target_linger *tlg;
2043 
2044                 if (optlen != sizeof(struct target_linger)) {
2045                     return -TARGET_EINVAL;
2046                 }
2047                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048                     return -TARGET_EFAULT;
2049                 }
2050                 __get_user(lg.l_onoff, &tlg->l_onoff);
2051                 __get_user(lg.l_linger, &tlg->l_linger);
2052                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053                                 &lg, sizeof(lg)));
2054                 unlock_user_struct(tlg, optval_addr, 0);
2055                 return ret;
2056         }
2057             /* Options with 'int' argument.  */
2058         case TARGET_SO_DEBUG:
2059 		optname = SO_DEBUG;
2060 		break;
2061         case TARGET_SO_REUSEADDR:
2062 		optname = SO_REUSEADDR;
2063 		break;
2064 #ifdef SO_REUSEPORT
2065         case TARGET_SO_REUSEPORT:
2066                 optname = SO_REUSEPORT;
2067                 break;
2068 #endif
2069         case TARGET_SO_TYPE:
2070 		optname = SO_TYPE;
2071 		break;
2072         case TARGET_SO_ERROR:
2073 		optname = SO_ERROR;
2074 		break;
2075         case TARGET_SO_DONTROUTE:
2076 		optname = SO_DONTROUTE;
2077 		break;
2078         case TARGET_SO_BROADCAST:
2079 		optname = SO_BROADCAST;
2080 		break;
2081         case TARGET_SO_SNDBUF:
2082 		optname = SO_SNDBUF;
2083 		break;
2084         case TARGET_SO_SNDBUFFORCE:
2085                 optname = SO_SNDBUFFORCE;
2086                 break;
2087         case TARGET_SO_RCVBUF:
2088 		optname = SO_RCVBUF;
2089 		break;
2090         case TARGET_SO_RCVBUFFORCE:
2091                 optname = SO_RCVBUFFORCE;
2092                 break;
2093         case TARGET_SO_KEEPALIVE:
2094 		optname = SO_KEEPALIVE;
2095 		break;
2096         case TARGET_SO_OOBINLINE:
2097 		optname = SO_OOBINLINE;
2098 		break;
2099         case TARGET_SO_NO_CHECK:
2100 		optname = SO_NO_CHECK;
2101 		break;
2102         case TARGET_SO_PRIORITY:
2103 		optname = SO_PRIORITY;
2104 		break;
2105 #ifdef SO_BSDCOMPAT
2106         case TARGET_SO_BSDCOMPAT:
2107 		optname = SO_BSDCOMPAT;
2108 		break;
2109 #endif
2110         case TARGET_SO_PASSCRED:
2111 		optname = SO_PASSCRED;
2112 		break;
2113         case TARGET_SO_PASSSEC:
2114                 optname = SO_PASSSEC;
2115                 break;
2116         case TARGET_SO_TIMESTAMP:
2117 		optname = SO_TIMESTAMP;
2118 		break;
2119         case TARGET_SO_RCVLOWAT:
2120 		optname = SO_RCVLOWAT;
2121 		break;
2122         default:
2123             goto unimplemented;
2124         }
2125 	if (optlen < sizeof(uint32_t))
2126             return -TARGET_EINVAL;
2127 
2128 	if (get_user_u32(val, optval_addr))
2129             return -TARGET_EFAULT;
2130 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2131         break;
2132     default:
2133     unimplemented:
2134         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2135         ret = -TARGET_ENOPROTOOPT;
2136     }
2137     return ret;
2138 }
2139 
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long do_getsockopt(int sockfd, int level, int optname,
2142                               abi_ulong optval_addr, abi_ulong optlen)
2143 {
2144     abi_long ret;
2145     int len, val;
2146     socklen_t lv;
2147 
2148     switch(level) {
2149     case TARGET_SOL_SOCKET:
2150         level = SOL_SOCKET;
2151         switch (optname) {
2152         /* These don't just return a single integer */
2153         case TARGET_SO_RCVTIMEO:
2154         case TARGET_SO_SNDTIMEO:
2155         case TARGET_SO_PEERNAME:
2156             goto unimplemented;
2157         case TARGET_SO_PEERCRED: {
2158             struct ucred cr;
2159             socklen_t crlen;
2160             struct target_ucred *tcr;
2161 
2162             if (get_user_u32(len, optlen)) {
2163                 return -TARGET_EFAULT;
2164             }
2165             if (len < 0) {
2166                 return -TARGET_EINVAL;
2167             }
2168 
2169             crlen = sizeof(cr);
2170             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2171                                        &cr, &crlen));
2172             if (ret < 0) {
2173                 return ret;
2174             }
2175             if (len > crlen) {
2176                 len = crlen;
2177             }
2178             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2179                 return -TARGET_EFAULT;
2180             }
2181             __put_user(cr.pid, &tcr->pid);
2182             __put_user(cr.uid, &tcr->uid);
2183             __put_user(cr.gid, &tcr->gid);
2184             unlock_user_struct(tcr, optval_addr, 1);
2185             if (put_user_u32(len, optlen)) {
2186                 return -TARGET_EFAULT;
2187             }
2188             break;
2189         }
2190         case TARGET_SO_LINGER:
2191         {
2192             struct linger lg;
2193             socklen_t lglen;
2194             struct target_linger *tlg;
2195 
2196             if (get_user_u32(len, optlen)) {
2197                 return -TARGET_EFAULT;
2198             }
2199             if (len < 0) {
2200                 return -TARGET_EINVAL;
2201             }
2202 
2203             lglen = sizeof(lg);
2204             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2205                                        &lg, &lglen));
2206             if (ret < 0) {
2207                 return ret;
2208             }
2209             if (len > lglen) {
2210                 len = lglen;
2211             }
2212             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2213                 return -TARGET_EFAULT;
2214             }
2215             __put_user(lg.l_onoff, &tlg->l_onoff);
2216             __put_user(lg.l_linger, &tlg->l_linger);
2217             unlock_user_struct(tlg, optval_addr, 1);
2218             if (put_user_u32(len, optlen)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             break;
2222         }
2223         /* Options with 'int' argument.  */
2224         case TARGET_SO_DEBUG:
2225             optname = SO_DEBUG;
2226             goto int_case;
2227         case TARGET_SO_REUSEADDR:
2228             optname = SO_REUSEADDR;
2229             goto int_case;
2230 #ifdef SO_REUSEPORT
2231         case TARGET_SO_REUSEPORT:
2232             optname = SO_REUSEPORT;
2233             goto int_case;
2234 #endif
2235         case TARGET_SO_TYPE:
2236             optname = SO_TYPE;
2237             goto int_case;
2238         case TARGET_SO_ERROR:
2239             optname = SO_ERROR;
2240             goto int_case;
2241         case TARGET_SO_DONTROUTE:
2242             optname = SO_DONTROUTE;
2243             goto int_case;
2244         case TARGET_SO_BROADCAST:
2245             optname = SO_BROADCAST;
2246             goto int_case;
2247         case TARGET_SO_SNDBUF:
2248             optname = SO_SNDBUF;
2249             goto int_case;
2250         case TARGET_SO_RCVBUF:
2251             optname = SO_RCVBUF;
2252             goto int_case;
2253         case TARGET_SO_KEEPALIVE:
2254             optname = SO_KEEPALIVE;
2255             goto int_case;
2256         case TARGET_SO_OOBINLINE:
2257             optname = SO_OOBINLINE;
2258             goto int_case;
2259         case TARGET_SO_NO_CHECK:
2260             optname = SO_NO_CHECK;
2261             goto int_case;
2262         case TARGET_SO_PRIORITY:
2263             optname = SO_PRIORITY;
2264             goto int_case;
2265 #ifdef SO_BSDCOMPAT
2266         case TARGET_SO_BSDCOMPAT:
2267             optname = SO_BSDCOMPAT;
2268             goto int_case;
2269 #endif
2270         case TARGET_SO_PASSCRED:
2271             optname = SO_PASSCRED;
2272             goto int_case;
2273         case TARGET_SO_TIMESTAMP:
2274             optname = SO_TIMESTAMP;
2275             goto int_case;
2276         case TARGET_SO_RCVLOWAT:
2277             optname = SO_RCVLOWAT;
2278             goto int_case;
2279         case TARGET_SO_ACCEPTCONN:
2280             optname = SO_ACCEPTCONN;
2281             goto int_case;
2282         default:
2283             goto int_case;
2284         }
2285         break;
2286     case SOL_TCP:
2287         /* TCP options all take an 'int' value.  */
2288     int_case:
2289         if (get_user_u32(len, optlen))
2290             return -TARGET_EFAULT;
2291         if (len < 0)
2292             return -TARGET_EINVAL;
2293         lv = sizeof(lv);
2294         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2295         if (ret < 0)
2296             return ret;
2297         if (optname == SO_TYPE) {
2298             val = host_to_target_sock_type(val);
2299         }
2300         if (len > lv)
2301             len = lv;
2302         if (len == 4) {
2303             if (put_user_u32(val, optval_addr))
2304                 return -TARGET_EFAULT;
2305         } else {
2306             if (put_user_u8(val, optval_addr))
2307                 return -TARGET_EFAULT;
2308         }
2309         if (put_user_u32(len, optlen))
2310             return -TARGET_EFAULT;
2311         break;
2312     case SOL_IP:
2313         switch(optname) {
2314         case IP_TOS:
2315         case IP_TTL:
2316         case IP_HDRINCL:
2317         case IP_ROUTER_ALERT:
2318         case IP_RECVOPTS:
2319         case IP_RETOPTS:
2320         case IP_PKTINFO:
2321         case IP_MTU_DISCOVER:
2322         case IP_RECVERR:
2323         case IP_RECVTOS:
2324 #ifdef IP_FREEBIND
2325         case IP_FREEBIND:
2326 #endif
2327         case IP_MULTICAST_TTL:
2328         case IP_MULTICAST_LOOP:
2329             if (get_user_u32(len, optlen))
2330                 return -TARGET_EFAULT;
2331             if (len < 0)
2332                 return -TARGET_EINVAL;
2333             lv = sizeof(lv);
2334             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2335             if (ret < 0)
2336                 return ret;
2337             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2338                 len = 1;
2339                 if (put_user_u32(len, optlen)
2340                     || put_user_u8(val, optval_addr))
2341                     return -TARGET_EFAULT;
2342             } else {
2343                 if (len > sizeof(int))
2344                     len = sizeof(int);
2345                 if (put_user_u32(len, optlen)
2346                     || put_user_u32(val, optval_addr))
2347                     return -TARGET_EFAULT;
2348             }
2349             break;
2350         default:
2351             ret = -TARGET_ENOPROTOOPT;
2352             break;
2353         }
2354         break;
2355     case SOL_IPV6:
2356         switch (optname) {
2357         case IPV6_MTU_DISCOVER:
2358         case IPV6_MTU:
2359         case IPV6_V6ONLY:
2360         case IPV6_RECVPKTINFO:
2361         case IPV6_UNICAST_HOPS:
2362         case IPV6_MULTICAST_HOPS:
2363         case IPV6_MULTICAST_LOOP:
2364         case IPV6_RECVERR:
2365         case IPV6_RECVHOPLIMIT:
2366         case IPV6_2292HOPLIMIT:
2367         case IPV6_CHECKSUM:
2368             if (get_user_u32(len, optlen))
2369                 return -TARGET_EFAULT;
2370             if (len < 0)
2371                 return -TARGET_EINVAL;
2372             lv = sizeof(lv);
2373             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2374             if (ret < 0)
2375                 return ret;
2376             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2377                 len = 1;
2378                 if (put_user_u32(len, optlen)
2379                     || put_user_u8(val, optval_addr))
2380                     return -TARGET_EFAULT;
2381             } else {
2382                 if (len > sizeof(int))
2383                     len = sizeof(int);
2384                 if (put_user_u32(len, optlen)
2385                     || put_user_u32(val, optval_addr))
2386                     return -TARGET_EFAULT;
2387             }
2388             break;
2389         default:
2390             ret = -TARGET_ENOPROTOOPT;
2391             break;
2392         }
2393         break;
2394     default:
2395     unimplemented:
2396         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2397                  level, optname);
2398         ret = -TARGET_EOPNOTSUPP;
2399         break;
2400     }
2401     return ret;
2402 }
2403 
2404 /* Convert target low/high pair representing file offset into the host
2405  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2406  * as the kernel doesn't handle them either.
2407  */
2408 static void target_to_host_low_high(abi_ulong tlow,
2409                                     abi_ulong thigh,
2410                                     unsigned long *hlow,
2411                                     unsigned long *hhigh)
2412 {
2413     uint64_t off = tlow |
2414         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2415         TARGET_LONG_BITS / 2;
2416 
2417     *hlow = off;
2418     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2419 }
2420 
2421 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2422                                 abi_ulong count, int copy)
2423 {
2424     struct target_iovec *target_vec;
2425     struct iovec *vec;
2426     abi_ulong total_len, max_len;
2427     int i;
2428     int err = 0;
2429     bool bad_address = false;
2430 
2431     if (count == 0) {
2432         errno = 0;
2433         return NULL;
2434     }
2435     if (count > IOV_MAX) {
2436         errno = EINVAL;
2437         return NULL;
2438     }
2439 
2440     vec = g_try_new0(struct iovec, count);
2441     if (vec == NULL) {
2442         errno = ENOMEM;
2443         return NULL;
2444     }
2445 
2446     target_vec = lock_user(VERIFY_READ, target_addr,
2447                            count * sizeof(struct target_iovec), 1);
2448     if (target_vec == NULL) {
2449         err = EFAULT;
2450         goto fail2;
2451     }
2452 
2453     /* ??? If host page size > target page size, this will result in a
2454        value larger than what we can actually support.  */
2455     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2456     total_len = 0;
2457 
2458     for (i = 0; i < count; i++) {
2459         abi_ulong base = tswapal(target_vec[i].iov_base);
2460         abi_long len = tswapal(target_vec[i].iov_len);
2461 
2462         if (len < 0) {
2463             err = EINVAL;
2464             goto fail;
2465         } else if (len == 0) {
2466             /* Zero length pointer is ignored.  */
2467             vec[i].iov_base = 0;
2468         } else {
2469             vec[i].iov_base = lock_user(type, base, len, copy);
2470             /* If the first buffer pointer is bad, this is a fault.  But
2471              * subsequent bad buffers will result in a partial write; this
2472              * is realized by filling the vector with null pointers and
2473              * zero lengths. */
2474             if (!vec[i].iov_base) {
2475                 if (i == 0) {
2476                     err = EFAULT;
2477                     goto fail;
2478                 } else {
2479                     bad_address = true;
2480                 }
2481             }
2482             if (bad_address) {
2483                 len = 0;
2484             }
2485             if (len > max_len - total_len) {
2486                 len = max_len - total_len;
2487             }
2488         }
2489         vec[i].iov_len = len;
2490         total_len += len;
2491     }
2492 
2493     unlock_user(target_vec, target_addr, 0);
2494     return vec;
2495 
2496  fail:
2497     while (--i >= 0) {
2498         if (tswapal(target_vec[i].iov_len) > 0) {
2499             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2500         }
2501     }
2502     unlock_user(target_vec, target_addr, 0);
2503  fail2:
2504     g_free(vec);
2505     errno = err;
2506     return NULL;
2507 }
2508 
2509 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2510                          abi_ulong count, int copy)
2511 {
2512     struct target_iovec *target_vec;
2513     int i;
2514 
2515     target_vec = lock_user(VERIFY_READ, target_addr,
2516                            count * sizeof(struct target_iovec), 1);
2517     if (target_vec) {
2518         for (i = 0; i < count; i++) {
2519             abi_ulong base = tswapal(target_vec[i].iov_base);
2520             abi_long len = tswapal(target_vec[i].iov_len);
2521             if (len < 0) {
2522                 break;
2523             }
2524             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2525         }
2526         unlock_user(target_vec, target_addr, 0);
2527     }
2528 
2529     g_free(vec);
2530 }
2531 
2532 static inline int target_to_host_sock_type(int *type)
2533 {
2534     int host_type = 0;
2535     int target_type = *type;
2536 
2537     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2538     case TARGET_SOCK_DGRAM:
2539         host_type = SOCK_DGRAM;
2540         break;
2541     case TARGET_SOCK_STREAM:
2542         host_type = SOCK_STREAM;
2543         break;
2544     default:
2545         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2546         break;
2547     }
2548     if (target_type & TARGET_SOCK_CLOEXEC) {
2549 #if defined(SOCK_CLOEXEC)
2550         host_type |= SOCK_CLOEXEC;
2551 #else
2552         return -TARGET_EINVAL;
2553 #endif
2554     }
2555     if (target_type & TARGET_SOCK_NONBLOCK) {
2556 #if defined(SOCK_NONBLOCK)
2557         host_type |= SOCK_NONBLOCK;
2558 #elif !defined(O_NONBLOCK)
2559         return -TARGET_EINVAL;
2560 #endif
2561     }
2562     *type = host_type;
2563     return 0;
2564 }
2565 
2566 /* Try to emulate socket type flags after socket creation.  */
2567 static int sock_flags_fixup(int fd, int target_type)
2568 {
2569 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2570     if (target_type & TARGET_SOCK_NONBLOCK) {
2571         int flags = fcntl(fd, F_GETFL);
2572         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2573             close(fd);
2574             return -TARGET_EINVAL;
2575         }
2576     }
2577 #endif
2578     return fd;
2579 }
2580 
2581 /* do_socket() Must return target values and target errnos. */
2582 static abi_long do_socket(int domain, int type, int protocol)
2583 {
2584     int target_type = type;
2585     int ret;
2586 
2587     ret = target_to_host_sock_type(&type);
2588     if (ret) {
2589         return ret;
2590     }
2591 
2592     if (domain == PF_NETLINK && !(
2593 #ifdef CONFIG_RTNETLINK
2594          protocol == NETLINK_ROUTE ||
2595 #endif
2596          protocol == NETLINK_KOBJECT_UEVENT ||
2597          protocol == NETLINK_AUDIT)) {
2598         return -EPFNOSUPPORT;
2599     }
2600 
2601     if (domain == AF_PACKET ||
2602         (domain == AF_INET && type == SOCK_PACKET)) {
2603         protocol = tswap16(protocol);
2604     }
2605 
2606     ret = get_errno(socket(domain, type, protocol));
2607     if (ret >= 0) {
2608         ret = sock_flags_fixup(ret, target_type);
2609         if (type == SOCK_PACKET) {
2610             /* Manage an obsolete case :
2611              * if socket type is SOCK_PACKET, bind by name
2612              */
2613             fd_trans_register(ret, &target_packet_trans);
2614         } else if (domain == PF_NETLINK) {
2615             switch (protocol) {
2616 #ifdef CONFIG_RTNETLINK
2617             case NETLINK_ROUTE:
2618                 fd_trans_register(ret, &target_netlink_route_trans);
2619                 break;
2620 #endif
2621             case NETLINK_KOBJECT_UEVENT:
2622                 /* nothing to do: messages are strings */
2623                 break;
2624             case NETLINK_AUDIT:
2625                 fd_trans_register(ret, &target_netlink_audit_trans);
2626                 break;
2627             default:
2628                 g_assert_not_reached();
2629             }
2630         }
2631     }
2632     return ret;
2633 }
2634 
2635 /* do_bind() Must return target values and target errnos. */
2636 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2637                         socklen_t addrlen)
2638 {
2639     void *addr;
2640     abi_long ret;
2641 
2642     if ((int)addrlen < 0) {
2643         return -TARGET_EINVAL;
2644     }
2645 
2646     addr = alloca(addrlen+1);
2647 
2648     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2649     if (ret)
2650         return ret;
2651 
2652     return get_errno(bind(sockfd, addr, addrlen));
2653 }
2654 
2655 /* do_connect() Must return target values and target errnos. */
2656 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2657                            socklen_t addrlen)
2658 {
2659     void *addr;
2660     abi_long ret;
2661 
2662     if ((int)addrlen < 0) {
2663         return -TARGET_EINVAL;
2664     }
2665 
2666     addr = alloca(addrlen+1);
2667 
2668     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2669     if (ret)
2670         return ret;
2671 
2672     return get_errno(safe_connect(sockfd, addr, addrlen));
2673 }
2674 
2675 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2676 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2677                                       int flags, int send)
2678 {
2679     abi_long ret, len;
2680     struct msghdr msg;
2681     abi_ulong count;
2682     struct iovec *vec;
2683     abi_ulong target_vec;
2684 
2685     if (msgp->msg_name) {
2686         msg.msg_namelen = tswap32(msgp->msg_namelen);
2687         msg.msg_name = alloca(msg.msg_namelen+1);
2688         ret = target_to_host_sockaddr(fd, msg.msg_name,
2689                                       tswapal(msgp->msg_name),
2690                                       msg.msg_namelen);
2691         if (ret == -TARGET_EFAULT) {
2692             /* For connected sockets msg_name and msg_namelen must
2693              * be ignored, so returning EFAULT immediately is wrong.
2694              * Instead, pass a bad msg_name to the host kernel, and
2695              * let it decide whether to return EFAULT or not.
2696              */
2697             msg.msg_name = (void *)-1;
2698         } else if (ret) {
2699             goto out2;
2700         }
2701     } else {
2702         msg.msg_name = NULL;
2703         msg.msg_namelen = 0;
2704     }
2705     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2706     msg.msg_control = alloca(msg.msg_controllen);
2707     memset(msg.msg_control, 0, msg.msg_controllen);
2708 
2709     msg.msg_flags = tswap32(msgp->msg_flags);
2710 
2711     count = tswapal(msgp->msg_iovlen);
2712     target_vec = tswapal(msgp->msg_iov);
2713 
2714     if (count > IOV_MAX) {
2715         /* sendrcvmsg returns a different errno for this condition than
2716          * readv/writev, so we must catch it here before lock_iovec() does.
2717          */
2718         ret = -TARGET_EMSGSIZE;
2719         goto out2;
2720     }
2721 
2722     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2723                      target_vec, count, send);
2724     if (vec == NULL) {
2725         ret = -host_to_target_errno(errno);
2726         goto out2;
2727     }
2728     msg.msg_iovlen = count;
2729     msg.msg_iov = vec;
2730 
2731     if (send) {
2732         if (fd_trans_target_to_host_data(fd)) {
2733             void *host_msg;
2734 
2735             host_msg = g_malloc(msg.msg_iov->iov_len);
2736             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2737             ret = fd_trans_target_to_host_data(fd)(host_msg,
2738                                                    msg.msg_iov->iov_len);
2739             if (ret >= 0) {
2740                 msg.msg_iov->iov_base = host_msg;
2741                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2742             }
2743             g_free(host_msg);
2744         } else {
2745             ret = target_to_host_cmsg(&msg, msgp);
2746             if (ret == 0) {
2747                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2748             }
2749         }
2750     } else {
2751         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2752         if (!is_error(ret)) {
2753             len = ret;
2754             if (fd_trans_host_to_target_data(fd)) {
2755                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2756                                                MIN(msg.msg_iov->iov_len, len));
2757             } else {
2758                 ret = host_to_target_cmsg(msgp, &msg);
2759             }
2760             if (!is_error(ret)) {
2761                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2762                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2763                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2764                                     msg.msg_name, msg.msg_namelen);
2765                     if (ret) {
2766                         goto out;
2767                     }
2768                 }
2769 
2770                 ret = len;
2771             }
2772         }
2773     }
2774 
2775 out:
2776     unlock_iovec(vec, target_vec, count, !send);
2777 out2:
2778     return ret;
2779 }
2780 
2781 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2782                                int flags, int send)
2783 {
2784     abi_long ret;
2785     struct target_msghdr *msgp;
2786 
2787     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2788                           msgp,
2789                           target_msg,
2790                           send ? 1 : 0)) {
2791         return -TARGET_EFAULT;
2792     }
2793     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2794     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2795     return ret;
2796 }
2797 
2798 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2799  * so it might not have this *mmsg-specific flag either.
2800  */
2801 #ifndef MSG_WAITFORONE
2802 #define MSG_WAITFORONE 0x10000
2803 #endif
2804 
2805 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2806                                 unsigned int vlen, unsigned int flags,
2807                                 int send)
2808 {
2809     struct target_mmsghdr *mmsgp;
2810     abi_long ret = 0;
2811     int i;
2812 
2813     if (vlen > UIO_MAXIOV) {
2814         vlen = UIO_MAXIOV;
2815     }
2816 
2817     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2818     if (!mmsgp) {
2819         return -TARGET_EFAULT;
2820     }
2821 
2822     for (i = 0; i < vlen; i++) {
2823         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2824         if (is_error(ret)) {
2825             break;
2826         }
2827         mmsgp[i].msg_len = tswap32(ret);
2828         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2829         if (flags & MSG_WAITFORONE) {
2830             flags |= MSG_DONTWAIT;
2831         }
2832     }
2833 
2834     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2835 
2836     /* Return number of datagrams sent if we sent any at all;
2837      * otherwise return the error.
2838      */
2839     if (i) {
2840         return i;
2841     }
2842     return ret;
2843 }
2844 
2845 /* do_accept4() Must return target values and target errnos. */
2846 static abi_long do_accept4(int fd, abi_ulong target_addr,
2847                            abi_ulong target_addrlen_addr, int flags)
2848 {
2849     socklen_t addrlen;
2850     void *addr;
2851     abi_long ret;
2852     int host_flags;
2853 
2854     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2855 
2856     if (target_addr == 0) {
2857         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2858     }
2859 
2860     /* linux returns EINVAL if addrlen pointer is invalid */
2861     if (get_user_u32(addrlen, target_addrlen_addr))
2862         return -TARGET_EINVAL;
2863 
2864     if ((int)addrlen < 0) {
2865         return -TARGET_EINVAL;
2866     }
2867 
2868     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2869         return -TARGET_EINVAL;
2870 
2871     addr = alloca(addrlen);
2872 
2873     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2874     if (!is_error(ret)) {
2875         host_to_target_sockaddr(target_addr, addr, addrlen);
2876         if (put_user_u32(addrlen, target_addrlen_addr))
2877             ret = -TARGET_EFAULT;
2878     }
2879     return ret;
2880 }
2881 
2882 /* do_getpeername() Must return target values and target errnos. */
2883 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2884                                abi_ulong target_addrlen_addr)
2885 {
2886     socklen_t addrlen;
2887     void *addr;
2888     abi_long ret;
2889 
2890     if (get_user_u32(addrlen, target_addrlen_addr))
2891         return -TARGET_EFAULT;
2892 
2893     if ((int)addrlen < 0) {
2894         return -TARGET_EINVAL;
2895     }
2896 
2897     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2898         return -TARGET_EFAULT;
2899 
2900     addr = alloca(addrlen);
2901 
2902     ret = get_errno(getpeername(fd, addr, &addrlen));
2903     if (!is_error(ret)) {
2904         host_to_target_sockaddr(target_addr, addr, addrlen);
2905         if (put_user_u32(addrlen, target_addrlen_addr))
2906             ret = -TARGET_EFAULT;
2907     }
2908     return ret;
2909 }
2910 
2911 /* do_getsockname() Must return target values and target errnos. */
2912 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2913                                abi_ulong target_addrlen_addr)
2914 {
2915     socklen_t addrlen;
2916     void *addr;
2917     abi_long ret;
2918 
2919     if (get_user_u32(addrlen, target_addrlen_addr))
2920         return -TARGET_EFAULT;
2921 
2922     if ((int)addrlen < 0) {
2923         return -TARGET_EINVAL;
2924     }
2925 
2926     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2927         return -TARGET_EFAULT;
2928 
2929     addr = alloca(addrlen);
2930 
2931     ret = get_errno(getsockname(fd, addr, &addrlen));
2932     if (!is_error(ret)) {
2933         host_to_target_sockaddr(target_addr, addr, addrlen);
2934         if (put_user_u32(addrlen, target_addrlen_addr))
2935             ret = -TARGET_EFAULT;
2936     }
2937     return ret;
2938 }
2939 
2940 /* do_socketpair() Must return target values and target errnos. */
2941 static abi_long do_socketpair(int domain, int type, int protocol,
2942                               abi_ulong target_tab_addr)
2943 {
2944     int tab[2];
2945     abi_long ret;
2946 
2947     target_to_host_sock_type(&type);
2948 
2949     ret = get_errno(socketpair(domain, type, protocol, tab));
2950     if (!is_error(ret)) {
2951         if (put_user_s32(tab[0], target_tab_addr)
2952             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2953             ret = -TARGET_EFAULT;
2954     }
2955     return ret;
2956 }
2957 
2958 /* do_sendto() Must return target values and target errnos. */
2959 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2960                           abi_ulong target_addr, socklen_t addrlen)
2961 {
2962     void *addr;
2963     void *host_msg;
2964     void *copy_msg = NULL;
2965     abi_long ret;
2966 
2967     if ((int)addrlen < 0) {
2968         return -TARGET_EINVAL;
2969     }
2970 
2971     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2972     if (!host_msg)
2973         return -TARGET_EFAULT;
2974     if (fd_trans_target_to_host_data(fd)) {
2975         copy_msg = host_msg;
2976         host_msg = g_malloc(len);
2977         memcpy(host_msg, copy_msg, len);
2978         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2979         if (ret < 0) {
2980             goto fail;
2981         }
2982     }
2983     if (target_addr) {
2984         addr = alloca(addrlen+1);
2985         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2986         if (ret) {
2987             goto fail;
2988         }
2989         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2990     } else {
2991         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2992     }
2993 fail:
2994     if (copy_msg) {
2995         g_free(host_msg);
2996         host_msg = copy_msg;
2997     }
2998     unlock_user(host_msg, msg, 0);
2999     return ret;
3000 }
3001 
3002 /* do_recvfrom() Must return target values and target errnos. */
3003 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3004                             abi_ulong target_addr,
3005                             abi_ulong target_addrlen)
3006 {
3007     socklen_t addrlen;
3008     void *addr;
3009     void *host_msg;
3010     abi_long ret;
3011 
3012     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3013     if (!host_msg)
3014         return -TARGET_EFAULT;
3015     if (target_addr) {
3016         if (get_user_u32(addrlen, target_addrlen)) {
3017             ret = -TARGET_EFAULT;
3018             goto fail;
3019         }
3020         if ((int)addrlen < 0) {
3021             ret = -TARGET_EINVAL;
3022             goto fail;
3023         }
3024         addr = alloca(addrlen);
3025         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3026                                       addr, &addrlen));
3027     } else {
3028         addr = NULL; /* To keep compiler quiet.  */
3029         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3030     }
3031     if (!is_error(ret)) {
3032         if (fd_trans_host_to_target_data(fd)) {
3033             abi_long trans;
3034             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3035             if (is_error(trans)) {
3036                 ret = trans;
3037                 goto fail;
3038             }
3039         }
3040         if (target_addr) {
3041             host_to_target_sockaddr(target_addr, addr, addrlen);
3042             if (put_user_u32(addrlen, target_addrlen)) {
3043                 ret = -TARGET_EFAULT;
3044                 goto fail;
3045             }
3046         }
3047         unlock_user(host_msg, msg, len);
3048     } else {
3049 fail:
3050         unlock_user(host_msg, msg, 0);
3051     }
3052     return ret;
3053 }
3054 
3055 #ifdef TARGET_NR_socketcall
3056 /* do_socketcall() must return target values and target errnos. */
3057 static abi_long do_socketcall(int num, abi_ulong vptr)
3058 {
3059     static const unsigned nargs[] = { /* number of arguments per operation */
3060         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3061         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3062         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3063         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3064         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3065         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3066         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3067         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3068         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3069         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3070         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3071         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3072         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3073         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3074         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3075         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3076         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3077         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3078         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3079         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3080     };
3081     abi_long a[6]; /* max 6 args */
3082     unsigned i;
3083 
3084     /* check the range of the first argument num */
3085     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3086     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3087         return -TARGET_EINVAL;
3088     }
3089     /* ensure we have space for args */
3090     if (nargs[num] > ARRAY_SIZE(a)) {
3091         return -TARGET_EINVAL;
3092     }
3093     /* collect the arguments in a[] according to nargs[] */
3094     for (i = 0; i < nargs[num]; ++i) {
3095         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3096             return -TARGET_EFAULT;
3097         }
3098     }
3099     /* now when we have the args, invoke the appropriate underlying function */
3100     switch (num) {
3101     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3102         return do_socket(a[0], a[1], a[2]);
3103     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3104         return do_bind(a[0], a[1], a[2]);
3105     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3106         return do_connect(a[0], a[1], a[2]);
3107     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3108         return get_errno(listen(a[0], a[1]));
3109     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3110         return do_accept4(a[0], a[1], a[2], 0);
3111     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3112         return do_getsockname(a[0], a[1], a[2]);
3113     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3114         return do_getpeername(a[0], a[1], a[2]);
3115     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3116         return do_socketpair(a[0], a[1], a[2], a[3]);
3117     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3118         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3119     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3120         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3121     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3122         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3123     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3124         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3125     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3126         return get_errno(shutdown(a[0], a[1]));
3127     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3128         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3129     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3130         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3131     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3132         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3133     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3134         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3135     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3136         return do_accept4(a[0], a[1], a[2], a[3]);
3137     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3138         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3139     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3140         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3141     default:
3142         gemu_log("Unsupported socketcall: %d\n", num);
3143         return -TARGET_EINVAL;
3144     }
3145 }
3146 #endif
3147 
3148 #define N_SHM_REGIONS	32
3149 
3150 static struct shm_region {
3151     abi_ulong start;
3152     abi_ulong size;
3153     bool in_use;
3154 } shm_regions[N_SHM_REGIONS];
3155 
3156 #ifndef TARGET_SEMID64_DS
3157 /* asm-generic version of this struct */
3158 struct target_semid64_ds
3159 {
3160   struct target_ipc_perm sem_perm;
3161   abi_ulong sem_otime;
3162 #if TARGET_ABI_BITS == 32
3163   abi_ulong __unused1;
3164 #endif
3165   abi_ulong sem_ctime;
3166 #if TARGET_ABI_BITS == 32
3167   abi_ulong __unused2;
3168 #endif
3169   abi_ulong sem_nsems;
3170   abi_ulong __unused3;
3171   abi_ulong __unused4;
3172 };
3173 #endif
3174 
3175 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3176                                                abi_ulong target_addr)
3177 {
3178     struct target_ipc_perm *target_ip;
3179     struct target_semid64_ds *target_sd;
3180 
3181     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3182         return -TARGET_EFAULT;
3183     target_ip = &(target_sd->sem_perm);
3184     host_ip->__key = tswap32(target_ip->__key);
3185     host_ip->uid = tswap32(target_ip->uid);
3186     host_ip->gid = tswap32(target_ip->gid);
3187     host_ip->cuid = tswap32(target_ip->cuid);
3188     host_ip->cgid = tswap32(target_ip->cgid);
3189 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3190     host_ip->mode = tswap32(target_ip->mode);
3191 #else
3192     host_ip->mode = tswap16(target_ip->mode);
3193 #endif
3194 #if defined(TARGET_PPC)
3195     host_ip->__seq = tswap32(target_ip->__seq);
3196 #else
3197     host_ip->__seq = tswap16(target_ip->__seq);
3198 #endif
3199     unlock_user_struct(target_sd, target_addr, 0);
3200     return 0;
3201 }
3202 
3203 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3204                                                struct ipc_perm *host_ip)
3205 {
3206     struct target_ipc_perm *target_ip;
3207     struct target_semid64_ds *target_sd;
3208 
3209     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3210         return -TARGET_EFAULT;
3211     target_ip = &(target_sd->sem_perm);
3212     target_ip->__key = tswap32(host_ip->__key);
3213     target_ip->uid = tswap32(host_ip->uid);
3214     target_ip->gid = tswap32(host_ip->gid);
3215     target_ip->cuid = tswap32(host_ip->cuid);
3216     target_ip->cgid = tswap32(host_ip->cgid);
3217 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3218     target_ip->mode = tswap32(host_ip->mode);
3219 #else
3220     target_ip->mode = tswap16(host_ip->mode);
3221 #endif
3222 #if defined(TARGET_PPC)
3223     target_ip->__seq = tswap32(host_ip->__seq);
3224 #else
3225     target_ip->__seq = tswap16(host_ip->__seq);
3226 #endif
3227     unlock_user_struct(target_sd, target_addr, 1);
3228     return 0;
3229 }
3230 
3231 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3232                                                abi_ulong target_addr)
3233 {
3234     struct target_semid64_ds *target_sd;
3235 
3236     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3237         return -TARGET_EFAULT;
3238     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3239         return -TARGET_EFAULT;
3240     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3241     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3242     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3243     unlock_user_struct(target_sd, target_addr, 0);
3244     return 0;
3245 }
3246 
3247 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3248                                                struct semid_ds *host_sd)
3249 {
3250     struct target_semid64_ds *target_sd;
3251 
3252     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3253         return -TARGET_EFAULT;
3254     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3255         return -TARGET_EFAULT;
3256     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3257     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3258     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3259     unlock_user_struct(target_sd, target_addr, 1);
3260     return 0;
3261 }
3262 
3263 struct target_seminfo {
3264     int semmap;
3265     int semmni;
3266     int semmns;
3267     int semmnu;
3268     int semmsl;
3269     int semopm;
3270     int semume;
3271     int semusz;
3272     int semvmx;
3273     int semaem;
3274 };
3275 
3276 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3277                                               struct seminfo *host_seminfo)
3278 {
3279     struct target_seminfo *target_seminfo;
3280     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3281         return -TARGET_EFAULT;
3282     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3283     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3284     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3285     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3286     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3287     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3288     __put_user(host_seminfo->semume, &target_seminfo->semume);
3289     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3290     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3291     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3292     unlock_user_struct(target_seminfo, target_addr, 1);
3293     return 0;
3294 }
3295 
3296 union semun {
3297 	int val;
3298 	struct semid_ds *buf;
3299 	unsigned short *array;
3300 	struct seminfo *__buf;
3301 };
3302 
3303 union target_semun {
3304 	int val;
3305 	abi_ulong buf;
3306 	abi_ulong array;
3307 	abi_ulong __buf;
3308 };
3309 
3310 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3311                                                abi_ulong target_addr)
3312 {
3313     int nsems;
3314     unsigned short *array;
3315     union semun semun;
3316     struct semid_ds semid_ds;
3317     int i, ret;
3318 
3319     semun.buf = &semid_ds;
3320 
3321     ret = semctl(semid, 0, IPC_STAT, semun);
3322     if (ret == -1)
3323         return get_errno(ret);
3324 
3325     nsems = semid_ds.sem_nsems;
3326 
3327     *host_array = g_try_new(unsigned short, nsems);
3328     if (!*host_array) {
3329         return -TARGET_ENOMEM;
3330     }
3331     array = lock_user(VERIFY_READ, target_addr,
3332                       nsems*sizeof(unsigned short), 1);
3333     if (!array) {
3334         g_free(*host_array);
3335         return -TARGET_EFAULT;
3336     }
3337 
3338     for(i=0; i<nsems; i++) {
3339         __get_user((*host_array)[i], &array[i]);
3340     }
3341     unlock_user(array, target_addr, 0);
3342 
3343     return 0;
3344 }
3345 
3346 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3347                                                unsigned short **host_array)
3348 {
3349     int nsems;
3350     unsigned short *array;
3351     union semun semun;
3352     struct semid_ds semid_ds;
3353     int i, ret;
3354 
3355     semun.buf = &semid_ds;
3356 
3357     ret = semctl(semid, 0, IPC_STAT, semun);
3358     if (ret == -1)
3359         return get_errno(ret);
3360 
3361     nsems = semid_ds.sem_nsems;
3362 
3363     array = lock_user(VERIFY_WRITE, target_addr,
3364                       nsems*sizeof(unsigned short), 0);
3365     if (!array)
3366         return -TARGET_EFAULT;
3367 
3368     for(i=0; i<nsems; i++) {
3369         __put_user((*host_array)[i], &array[i]);
3370     }
3371     g_free(*host_array);
3372     unlock_user(array, target_addr, 1);
3373 
3374     return 0;
3375 }
3376 
3377 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3378                                  abi_ulong target_arg)
3379 {
3380     union target_semun target_su = { .buf = target_arg };
3381     union semun arg;
3382     struct semid_ds dsarg;
3383     unsigned short *array = NULL;
3384     struct seminfo seminfo;
3385     abi_long ret = -TARGET_EINVAL;
3386     abi_long err;
3387     cmd &= 0xff;
3388 
3389     switch( cmd ) {
3390 	case GETVAL:
3391 	case SETVAL:
3392             /* In 64 bit cross-endian situations, we will erroneously pick up
3393              * the wrong half of the union for the "val" element.  To rectify
3394              * this, the entire 8-byte structure is byteswapped, followed by
3395 	     * a swap of the 4 byte val field. In other cases, the data is
3396 	     * already in proper host byte order. */
3397 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3398 		target_su.buf = tswapal(target_su.buf);
3399 		arg.val = tswap32(target_su.val);
3400 	    } else {
3401 		arg.val = target_su.val;
3402 	    }
3403             ret = get_errno(semctl(semid, semnum, cmd, arg));
3404             break;
3405 	case GETALL:
3406 	case SETALL:
3407             err = target_to_host_semarray(semid, &array, target_su.array);
3408             if (err)
3409                 return err;
3410             arg.array = array;
3411             ret = get_errno(semctl(semid, semnum, cmd, arg));
3412             err = host_to_target_semarray(semid, target_su.array, &array);
3413             if (err)
3414                 return err;
3415             break;
3416 	case IPC_STAT:
3417 	case IPC_SET:
3418 	case SEM_STAT:
3419             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3420             if (err)
3421                 return err;
3422             arg.buf = &dsarg;
3423             ret = get_errno(semctl(semid, semnum, cmd, arg));
3424             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3425             if (err)
3426                 return err;
3427             break;
3428 	case IPC_INFO:
3429 	case SEM_INFO:
3430             arg.__buf = &seminfo;
3431             ret = get_errno(semctl(semid, semnum, cmd, arg));
3432             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3433             if (err)
3434                 return err;
3435             break;
3436 	case IPC_RMID:
3437 	case GETPID:
3438 	case GETNCNT:
3439 	case GETZCNT:
3440             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3441             break;
3442     }
3443 
3444     return ret;
3445 }
3446 
3447 struct target_sembuf {
3448     unsigned short sem_num;
3449     short sem_op;
3450     short sem_flg;
3451 };
3452 
3453 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3454                                              abi_ulong target_addr,
3455                                              unsigned nsops)
3456 {
3457     struct target_sembuf *target_sembuf;
3458     int i;
3459 
3460     target_sembuf = lock_user(VERIFY_READ, target_addr,
3461                               nsops*sizeof(struct target_sembuf), 1);
3462     if (!target_sembuf)
3463         return -TARGET_EFAULT;
3464 
3465     for(i=0; i<nsops; i++) {
3466         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3467         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3468         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3469     }
3470 
3471     unlock_user(target_sembuf, target_addr, 0);
3472 
3473     return 0;
3474 }
3475 
3476 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3477 {
3478     struct sembuf sops[nsops];
3479 
3480     if (target_to_host_sembuf(sops, ptr, nsops))
3481         return -TARGET_EFAULT;
3482 
3483     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3484 }
3485 
3486 struct target_msqid_ds
3487 {
3488     struct target_ipc_perm msg_perm;
3489     abi_ulong msg_stime;
3490 #if TARGET_ABI_BITS == 32
3491     abi_ulong __unused1;
3492 #endif
3493     abi_ulong msg_rtime;
3494 #if TARGET_ABI_BITS == 32
3495     abi_ulong __unused2;
3496 #endif
3497     abi_ulong msg_ctime;
3498 #if TARGET_ABI_BITS == 32
3499     abi_ulong __unused3;
3500 #endif
3501     abi_ulong __msg_cbytes;
3502     abi_ulong msg_qnum;
3503     abi_ulong msg_qbytes;
3504     abi_ulong msg_lspid;
3505     abi_ulong msg_lrpid;
3506     abi_ulong __unused4;
3507     abi_ulong __unused5;
3508 };
3509 
3510 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3511                                                abi_ulong target_addr)
3512 {
3513     struct target_msqid_ds *target_md;
3514 
3515     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3516         return -TARGET_EFAULT;
3517     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3518         return -TARGET_EFAULT;
3519     host_md->msg_stime = tswapal(target_md->msg_stime);
3520     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3521     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3522     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3523     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3524     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3525     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3526     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3527     unlock_user_struct(target_md, target_addr, 0);
3528     return 0;
3529 }
3530 
3531 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3532                                                struct msqid_ds *host_md)
3533 {
3534     struct target_msqid_ds *target_md;
3535 
3536     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3537         return -TARGET_EFAULT;
3538     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3539         return -TARGET_EFAULT;
3540     target_md->msg_stime = tswapal(host_md->msg_stime);
3541     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3542     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3543     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3544     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3545     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3546     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3547     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3548     unlock_user_struct(target_md, target_addr, 1);
3549     return 0;
3550 }
3551 
3552 struct target_msginfo {
3553     int msgpool;
3554     int msgmap;
3555     int msgmax;
3556     int msgmnb;
3557     int msgmni;
3558     int msgssz;
3559     int msgtql;
3560     unsigned short int msgseg;
3561 };
3562 
3563 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3564                                               struct msginfo *host_msginfo)
3565 {
3566     struct target_msginfo *target_msginfo;
3567     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3568         return -TARGET_EFAULT;
3569     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3570     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3571     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3572     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3573     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3574     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3575     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3576     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3577     unlock_user_struct(target_msginfo, target_addr, 1);
3578     return 0;
3579 }
3580 
3581 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3582 {
3583     struct msqid_ds dsarg;
3584     struct msginfo msginfo;
3585     abi_long ret = -TARGET_EINVAL;
3586 
3587     cmd &= 0xff;
3588 
3589     switch (cmd) {
3590     case IPC_STAT:
3591     case IPC_SET:
3592     case MSG_STAT:
3593         if (target_to_host_msqid_ds(&dsarg,ptr))
3594             return -TARGET_EFAULT;
3595         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3596         if (host_to_target_msqid_ds(ptr,&dsarg))
3597             return -TARGET_EFAULT;
3598         break;
3599     case IPC_RMID:
3600         ret = get_errno(msgctl(msgid, cmd, NULL));
3601         break;
3602     case IPC_INFO:
3603     case MSG_INFO:
3604         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3605         if (host_to_target_msginfo(ptr, &msginfo))
3606             return -TARGET_EFAULT;
3607         break;
3608     }
3609 
3610     return ret;
3611 }
3612 
3613 struct target_msgbuf {
3614     abi_long mtype;
3615     char	mtext[1];
3616 };
3617 
3618 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3619                                  ssize_t msgsz, int msgflg)
3620 {
3621     struct target_msgbuf *target_mb;
3622     struct msgbuf *host_mb;
3623     abi_long ret = 0;
3624 
3625     if (msgsz < 0) {
3626         return -TARGET_EINVAL;
3627     }
3628 
3629     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3630         return -TARGET_EFAULT;
3631     host_mb = g_try_malloc(msgsz + sizeof(long));
3632     if (!host_mb) {
3633         unlock_user_struct(target_mb, msgp, 0);
3634         return -TARGET_ENOMEM;
3635     }
3636     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3637     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3638     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3639     g_free(host_mb);
3640     unlock_user_struct(target_mb, msgp, 0);
3641 
3642     return ret;
3643 }
3644 
3645 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3646                                  ssize_t msgsz, abi_long msgtyp,
3647                                  int msgflg)
3648 {
3649     struct target_msgbuf *target_mb;
3650     char *target_mtext;
3651     struct msgbuf *host_mb;
3652     abi_long ret = 0;
3653 
3654     if (msgsz < 0) {
3655         return -TARGET_EINVAL;
3656     }
3657 
3658     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3659         return -TARGET_EFAULT;
3660 
3661     host_mb = g_try_malloc(msgsz + sizeof(long));
3662     if (!host_mb) {
3663         ret = -TARGET_ENOMEM;
3664         goto end;
3665     }
3666     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3667 
3668     if (ret > 0) {
3669         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3670         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3671         if (!target_mtext) {
3672             ret = -TARGET_EFAULT;
3673             goto end;
3674         }
3675         memcpy(target_mb->mtext, host_mb->mtext, ret);
3676         unlock_user(target_mtext, target_mtext_addr, ret);
3677     }
3678 
3679     target_mb->mtype = tswapal(host_mb->mtype);
3680 
3681 end:
3682     if (target_mb)
3683         unlock_user_struct(target_mb, msgp, 1);
3684     g_free(host_mb);
3685     return ret;
3686 }
3687 
3688 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3689                                                abi_ulong target_addr)
3690 {
3691     struct target_shmid_ds *target_sd;
3692 
3693     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3694         return -TARGET_EFAULT;
3695     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3696         return -TARGET_EFAULT;
3697     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3698     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3699     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3700     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3701     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3702     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3703     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3704     unlock_user_struct(target_sd, target_addr, 0);
3705     return 0;
3706 }
3707 
3708 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3709                                                struct shmid_ds *host_sd)
3710 {
3711     struct target_shmid_ds *target_sd;
3712 
3713     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3714         return -TARGET_EFAULT;
3715     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3716         return -TARGET_EFAULT;
3717     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3718     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3719     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3720     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3721     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3722     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3723     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3724     unlock_user_struct(target_sd, target_addr, 1);
3725     return 0;
3726 }
3727 
3728 struct  target_shminfo {
3729     abi_ulong shmmax;
3730     abi_ulong shmmin;
3731     abi_ulong shmmni;
3732     abi_ulong shmseg;
3733     abi_ulong shmall;
3734 };
3735 
3736 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3737                                               struct shminfo *host_shminfo)
3738 {
3739     struct target_shminfo *target_shminfo;
3740     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3741         return -TARGET_EFAULT;
3742     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3743     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3744     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3745     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3746     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3747     unlock_user_struct(target_shminfo, target_addr, 1);
3748     return 0;
3749 }
3750 
3751 struct target_shm_info {
3752     int used_ids;
3753     abi_ulong shm_tot;
3754     abi_ulong shm_rss;
3755     abi_ulong shm_swp;
3756     abi_ulong swap_attempts;
3757     abi_ulong swap_successes;
3758 };
3759 
3760 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3761                                                struct shm_info *host_shm_info)
3762 {
3763     struct target_shm_info *target_shm_info;
3764     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3765         return -TARGET_EFAULT;
3766     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3767     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3768     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3769     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3770     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3771     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3772     unlock_user_struct(target_shm_info, target_addr, 1);
3773     return 0;
3774 }
3775 
3776 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3777 {
3778     struct shmid_ds dsarg;
3779     struct shminfo shminfo;
3780     struct shm_info shm_info;
3781     abi_long ret = -TARGET_EINVAL;
3782 
3783     cmd &= 0xff;
3784 
3785     switch(cmd) {
3786     case IPC_STAT:
3787     case IPC_SET:
3788     case SHM_STAT:
3789         if (target_to_host_shmid_ds(&dsarg, buf))
3790             return -TARGET_EFAULT;
3791         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3792         if (host_to_target_shmid_ds(buf, &dsarg))
3793             return -TARGET_EFAULT;
3794         break;
3795     case IPC_INFO:
3796         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3797         if (host_to_target_shminfo(buf, &shminfo))
3798             return -TARGET_EFAULT;
3799         break;
3800     case SHM_INFO:
3801         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3802         if (host_to_target_shm_info(buf, &shm_info))
3803             return -TARGET_EFAULT;
3804         break;
3805     case IPC_RMID:
3806     case SHM_LOCK:
3807     case SHM_UNLOCK:
3808         ret = get_errno(shmctl(shmid, cmd, NULL));
3809         break;
3810     }
3811 
3812     return ret;
3813 }
3814 
3815 #ifndef TARGET_FORCE_SHMLBA
3816 /* For most architectures, SHMLBA is the same as the page size;
3817  * some architectures have larger values, in which case they should
3818  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3819  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3820  * and defining its own value for SHMLBA.
3821  *
3822  * The kernel also permits SHMLBA to be set by the architecture to a
3823  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3824  * this means that addresses are rounded to the large size if
3825  * SHM_RND is set but addresses not aligned to that size are not rejected
3826  * as long as they are at least page-aligned. Since the only architecture
3827  * which uses this is ia64 this code doesn't provide for that oddity.
3828  */
3829 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3830 {
3831     return TARGET_PAGE_SIZE;
3832 }
3833 #endif
3834 
3835 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3836                                  int shmid, abi_ulong shmaddr, int shmflg)
3837 {
3838     abi_long raddr;
3839     void *host_raddr;
3840     struct shmid_ds shm_info;
3841     int i,ret;
3842     abi_ulong shmlba;
3843 
3844     /* find out the length of the shared memory segment */
3845     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3846     if (is_error(ret)) {
3847         /* can't get length, bail out */
3848         return ret;
3849     }
3850 
3851     shmlba = target_shmlba(cpu_env);
3852 
3853     if (shmaddr & (shmlba - 1)) {
3854         if (shmflg & SHM_RND) {
3855             shmaddr &= ~(shmlba - 1);
3856         } else {
3857             return -TARGET_EINVAL;
3858         }
3859     }
3860     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3861         return -TARGET_EINVAL;
3862     }
3863 
3864     mmap_lock();
3865 
3866     if (shmaddr)
3867         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3868     else {
3869         abi_ulong mmap_start;
3870 
3871         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3872 
3873         if (mmap_start == -1) {
3874             errno = ENOMEM;
3875             host_raddr = (void *)-1;
3876         } else
3877             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3878     }
3879 
3880     if (host_raddr == (void *)-1) {
3881         mmap_unlock();
3882         return get_errno((long)host_raddr);
3883     }
3884     raddr=h2g((unsigned long)host_raddr);
3885 
3886     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3887                    PAGE_VALID | PAGE_READ |
3888                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3889 
3890     for (i = 0; i < N_SHM_REGIONS; i++) {
3891         if (!shm_regions[i].in_use) {
3892             shm_regions[i].in_use = true;
3893             shm_regions[i].start = raddr;
3894             shm_regions[i].size = shm_info.shm_segsz;
3895             break;
3896         }
3897     }
3898 
3899     mmap_unlock();
3900     return raddr;
3901 
3902 }
3903 
3904 static inline abi_long do_shmdt(abi_ulong shmaddr)
3905 {
3906     int i;
3907     abi_long rv;
3908 
3909     mmap_lock();
3910 
3911     for (i = 0; i < N_SHM_REGIONS; ++i) {
3912         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3913             shm_regions[i].in_use = false;
3914             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3915             break;
3916         }
3917     }
3918     rv = get_errno(shmdt(g2h(shmaddr)));
3919 
3920     mmap_unlock();
3921 
3922     return rv;
3923 }
3924 
3925 #ifdef TARGET_NR_ipc
3926 /* ??? This only works with linear mappings.  */
3927 /* do_ipc() must return target values and target errnos. */
3928 static abi_long do_ipc(CPUArchState *cpu_env,
3929                        unsigned int call, abi_long first,
3930                        abi_long second, abi_long third,
3931                        abi_long ptr, abi_long fifth)
3932 {
3933     int version;
3934     abi_long ret = 0;
3935 
3936     version = call >> 16;
3937     call &= 0xffff;
3938 
3939     switch (call) {
3940     case IPCOP_semop:
3941         ret = do_semop(first, ptr, second);
3942         break;
3943 
3944     case IPCOP_semget:
3945         ret = get_errno(semget(first, second, third));
3946         break;
3947 
3948     case IPCOP_semctl: {
3949         /* The semun argument to semctl is passed by value, so dereference the
3950          * ptr argument. */
3951         abi_ulong atptr;
3952         get_user_ual(atptr, ptr);
3953         ret = do_semctl(first, second, third, atptr);
3954         break;
3955     }
3956 
3957     case IPCOP_msgget:
3958         ret = get_errno(msgget(first, second));
3959         break;
3960 
3961     case IPCOP_msgsnd:
3962         ret = do_msgsnd(first, ptr, second, third);
3963         break;
3964 
3965     case IPCOP_msgctl:
3966         ret = do_msgctl(first, second, ptr);
3967         break;
3968 
3969     case IPCOP_msgrcv:
3970         switch (version) {
3971         case 0:
3972             {
3973                 struct target_ipc_kludge {
3974                     abi_long msgp;
3975                     abi_long msgtyp;
3976                 } *tmp;
3977 
3978                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3979                     ret = -TARGET_EFAULT;
3980                     break;
3981                 }
3982 
3983                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3984 
3985                 unlock_user_struct(tmp, ptr, 0);
3986                 break;
3987             }
3988         default:
3989             ret = do_msgrcv(first, ptr, second, fifth, third);
3990         }
3991         break;
3992 
3993     case IPCOP_shmat:
3994         switch (version) {
3995         default:
3996         {
3997             abi_ulong raddr;
3998             raddr = do_shmat(cpu_env, first, ptr, second);
3999             if (is_error(raddr))
4000                 return get_errno(raddr);
4001             if (put_user_ual(raddr, third))
4002                 return -TARGET_EFAULT;
4003             break;
4004         }
4005         case 1:
4006             ret = -TARGET_EINVAL;
4007             break;
4008         }
4009 	break;
4010     case IPCOP_shmdt:
4011         ret = do_shmdt(ptr);
4012 	break;
4013 
4014     case IPCOP_shmget:
4015 	/* IPC_* flag values are the same on all linux platforms */
4016 	ret = get_errno(shmget(first, second, third));
4017 	break;
4018 
4019 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4020     case IPCOP_shmctl:
4021         ret = do_shmctl(first, second, ptr);
4022         break;
4023     default:
4024 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4025 	ret = -TARGET_ENOSYS;
4026 	break;
4027     }
4028     return ret;
4029 }
4030 #endif
4031 
4032 /* kernel structure types definitions */
4033 
4034 #define STRUCT(name, ...) STRUCT_ ## name,
4035 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4036 enum {
4037 #include "syscall_types.h"
4038 STRUCT_MAX
4039 };
4040 #undef STRUCT
4041 #undef STRUCT_SPECIAL
4042 
4043 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4044 #define STRUCT_SPECIAL(name)
4045 #include "syscall_types.h"
4046 #undef STRUCT
4047 #undef STRUCT_SPECIAL
4048 
4049 typedef struct IOCTLEntry IOCTLEntry;
4050 
4051 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4052                              int fd, int cmd, abi_long arg);
4053 
4054 struct IOCTLEntry {
4055     int target_cmd;
4056     unsigned int host_cmd;
4057     const char *name;
4058     int access;
4059     do_ioctl_fn *do_ioctl;
4060     const argtype arg_type[5];
4061 };
4062 
4063 #define IOC_R 0x0001
4064 #define IOC_W 0x0002
4065 #define IOC_RW (IOC_R | IOC_W)
4066 
4067 #define MAX_STRUCT_SIZE 4096
4068 
4069 #ifdef CONFIG_FIEMAP
4070 /* So fiemap access checks don't overflow on 32 bit systems.
4071  * This is very slightly smaller than the limit imposed by
4072  * the underlying kernel.
4073  */
4074 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4075                             / sizeof(struct fiemap_extent))
4076 
4077 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4078                                        int fd, int cmd, abi_long arg)
4079 {
4080     /* The parameter for this ioctl is a struct fiemap followed
4081      * by an array of struct fiemap_extent whose size is set
4082      * in fiemap->fm_extent_count. The array is filled in by the
4083      * ioctl.
4084      */
4085     int target_size_in, target_size_out;
4086     struct fiemap *fm;
4087     const argtype *arg_type = ie->arg_type;
4088     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4089     void *argptr, *p;
4090     abi_long ret;
4091     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4092     uint32_t outbufsz;
4093     int free_fm = 0;
4094 
4095     assert(arg_type[0] == TYPE_PTR);
4096     assert(ie->access == IOC_RW);
4097     arg_type++;
4098     target_size_in = thunk_type_size(arg_type, 0);
4099     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4100     if (!argptr) {
4101         return -TARGET_EFAULT;
4102     }
4103     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4104     unlock_user(argptr, arg, 0);
4105     fm = (struct fiemap *)buf_temp;
4106     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4107         return -TARGET_EINVAL;
4108     }
4109 
4110     outbufsz = sizeof (*fm) +
4111         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4112 
4113     if (outbufsz > MAX_STRUCT_SIZE) {
4114         /* We can't fit all the extents into the fixed size buffer.
4115          * Allocate one that is large enough and use it instead.
4116          */
4117         fm = g_try_malloc(outbufsz);
4118         if (!fm) {
4119             return -TARGET_ENOMEM;
4120         }
4121         memcpy(fm, buf_temp, sizeof(struct fiemap));
4122         free_fm = 1;
4123     }
4124     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4125     if (!is_error(ret)) {
4126         target_size_out = target_size_in;
4127         /* An extent_count of 0 means we were only counting the extents
4128          * so there are no structs to copy
4129          */
4130         if (fm->fm_extent_count != 0) {
4131             target_size_out += fm->fm_mapped_extents * extent_size;
4132         }
4133         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4134         if (!argptr) {
4135             ret = -TARGET_EFAULT;
4136         } else {
4137             /* Convert the struct fiemap */
4138             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4139             if (fm->fm_extent_count != 0) {
4140                 p = argptr + target_size_in;
4141                 /* ...and then all the struct fiemap_extents */
4142                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4143                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4144                                   THUNK_TARGET);
4145                     p += extent_size;
4146                 }
4147             }
4148             unlock_user(argptr, arg, target_size_out);
4149         }
4150     }
4151     if (free_fm) {
4152         g_free(fm);
4153     }
4154     return ret;
4155 }
4156 #endif
4157 
4158 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4159                                 int fd, int cmd, abi_long arg)
4160 {
4161     const argtype *arg_type = ie->arg_type;
4162     int target_size;
4163     void *argptr;
4164     int ret;
4165     struct ifconf *host_ifconf;
4166     uint32_t outbufsz;
4167     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4168     int target_ifreq_size;
4169     int nb_ifreq;
4170     int free_buf = 0;
4171     int i;
4172     int target_ifc_len;
4173     abi_long target_ifc_buf;
4174     int host_ifc_len;
4175     char *host_ifc_buf;
4176 
4177     assert(arg_type[0] == TYPE_PTR);
4178     assert(ie->access == IOC_RW);
4179 
4180     arg_type++;
4181     target_size = thunk_type_size(arg_type, 0);
4182 
4183     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4184     if (!argptr)
4185         return -TARGET_EFAULT;
4186     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4187     unlock_user(argptr, arg, 0);
4188 
4189     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4190     target_ifc_len = host_ifconf->ifc_len;
4191     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4192 
4193     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4194     nb_ifreq = target_ifc_len / target_ifreq_size;
4195     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4196 
4197     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4198     if (outbufsz > MAX_STRUCT_SIZE) {
4199         /* We can't fit all the extents into the fixed size buffer.
4200          * Allocate one that is large enough and use it instead.
4201          */
4202         host_ifconf = malloc(outbufsz);
4203         if (!host_ifconf) {
4204             return -TARGET_ENOMEM;
4205         }
4206         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4207         free_buf = 1;
4208     }
4209     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4210 
4211     host_ifconf->ifc_len = host_ifc_len;
4212     host_ifconf->ifc_buf = host_ifc_buf;
4213 
4214     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4215     if (!is_error(ret)) {
4216 	/* convert host ifc_len to target ifc_len */
4217 
4218         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4219         target_ifc_len = nb_ifreq * target_ifreq_size;
4220         host_ifconf->ifc_len = target_ifc_len;
4221 
4222 	/* restore target ifc_buf */
4223 
4224         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4225 
4226 	/* copy struct ifconf to target user */
4227 
4228         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4229         if (!argptr)
4230             return -TARGET_EFAULT;
4231         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4232         unlock_user(argptr, arg, target_size);
4233 
4234 	/* copy ifreq[] to target user */
4235 
4236         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4237         for (i = 0; i < nb_ifreq ; i++) {
4238             thunk_convert(argptr + i * target_ifreq_size,
4239                           host_ifc_buf + i * sizeof(struct ifreq),
4240                           ifreq_arg_type, THUNK_TARGET);
4241         }
4242         unlock_user(argptr, target_ifc_buf, target_ifc_len);
4243     }
4244 
4245     if (free_buf) {
4246         free(host_ifconf);
4247     }
4248 
4249     return ret;
4250 }
4251 
4252 #if defined(CONFIG_USBFS)
4253 #if HOST_LONG_BITS > 64
4254 #error USBDEVFS thunks do not support >64 bit hosts yet.
4255 #endif
4256 struct live_urb {
4257     uint64_t target_urb_adr;
4258     uint64_t target_buf_adr;
4259     char *target_buf_ptr;
4260     struct usbdevfs_urb host_urb;
4261 };
4262 
4263 static GHashTable *usbdevfs_urb_hashtable(void)
4264 {
4265     static GHashTable *urb_hashtable;
4266 
4267     if (!urb_hashtable) {
4268         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4269     }
4270     return urb_hashtable;
4271 }
4272 
4273 static void urb_hashtable_insert(struct live_urb *urb)
4274 {
4275     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4276     g_hash_table_insert(urb_hashtable, urb, urb);
4277 }
4278 
4279 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4280 {
4281     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4282     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4283 }
4284 
4285 static void urb_hashtable_remove(struct live_urb *urb)
4286 {
4287     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4288     g_hash_table_remove(urb_hashtable, urb);
4289 }
4290 
4291 static abi_long
4292 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4293                           int fd, int cmd, abi_long arg)
4294 {
4295     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4296     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4297     struct live_urb *lurb;
4298     void *argptr;
4299     uint64_t hurb;
4300     int target_size;
4301     uintptr_t target_urb_adr;
4302     abi_long ret;
4303 
4304     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4305 
4306     memset(buf_temp, 0, sizeof(uint64_t));
4307     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4308     if (is_error(ret)) {
4309         return ret;
4310     }
4311 
4312     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4313     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4314     if (!lurb->target_urb_adr) {
4315         return -TARGET_EFAULT;
4316     }
4317     urb_hashtable_remove(lurb);
4318     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4319         lurb->host_urb.buffer_length);
4320     lurb->target_buf_ptr = NULL;
4321 
4322     /* restore the guest buffer pointer */
4323     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4324 
4325     /* update the guest urb struct */
4326     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4327     if (!argptr) {
4328         g_free(lurb);
4329         return -TARGET_EFAULT;
4330     }
4331     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4332     unlock_user(argptr, lurb->target_urb_adr, target_size);
4333 
4334     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4335     /* write back the urb handle */
4336     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4337     if (!argptr) {
4338         g_free(lurb);
4339         return -TARGET_EFAULT;
4340     }
4341 
4342     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4343     target_urb_adr = lurb->target_urb_adr;
4344     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4345     unlock_user(argptr, arg, target_size);
4346 
4347     g_free(lurb);
4348     return ret;
4349 }
4350 
4351 static abi_long
4352 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4353                              uint8_t *buf_temp __attribute__((unused)),
4354                              int fd, int cmd, abi_long arg)
4355 {
4356     struct live_urb *lurb;
4357 
4358     /* map target address back to host URB with metadata. */
4359     lurb = urb_hashtable_lookup(arg);
4360     if (!lurb) {
4361         return -TARGET_EFAULT;
4362     }
4363     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4364 }
4365 
4366 static abi_long
4367 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4368                             int fd, int cmd, abi_long arg)
4369 {
4370     const argtype *arg_type = ie->arg_type;
4371     int target_size;
4372     abi_long ret;
4373     void *argptr;
4374     int rw_dir;
4375     struct live_urb *lurb;
4376 
4377     /*
4378      * each submitted URB needs to map to a unique ID for the
4379      * kernel, and that unique ID needs to be a pointer to
4380      * host memory.  hence, we need to malloc for each URB.
4381      * isochronous transfers have a variable length struct.
4382      */
4383     arg_type++;
4384     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4385 
4386     /* construct host copy of urb and metadata */
4387     lurb = g_try_malloc0(sizeof(struct live_urb));
4388     if (!lurb) {
4389         return -TARGET_ENOMEM;
4390     }
4391 
4392     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4393     if (!argptr) {
4394         g_free(lurb);
4395         return -TARGET_EFAULT;
4396     }
4397     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4398     unlock_user(argptr, arg, 0);
4399 
4400     lurb->target_urb_adr = arg;
4401     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4402 
4403     /* buffer space used depends on endpoint type so lock the entire buffer */
4404     /* control type urbs should check the buffer contents for true direction */
4405     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4406     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4407         lurb->host_urb.buffer_length, 1);
4408     if (lurb->target_buf_ptr == NULL) {
4409         g_free(lurb);
4410         return -TARGET_EFAULT;
4411     }
4412 
4413     /* update buffer pointer in host copy */
4414     lurb->host_urb.buffer = lurb->target_buf_ptr;
4415 
4416     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4417     if (is_error(ret)) {
4418         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4419         g_free(lurb);
4420     } else {
4421         urb_hashtable_insert(lurb);
4422     }
4423 
4424     return ret;
4425 }
4426 #endif /* CONFIG_USBFS */
4427 
4428 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4429                             int cmd, abi_long arg)
4430 {
4431     void *argptr;
4432     struct dm_ioctl *host_dm;
4433     abi_long guest_data;
4434     uint32_t guest_data_size;
4435     int target_size;
4436     const argtype *arg_type = ie->arg_type;
4437     abi_long ret;
4438     void *big_buf = NULL;
4439     char *host_data;
4440 
4441     arg_type++;
4442     target_size = thunk_type_size(arg_type, 0);
4443     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4444     if (!argptr) {
4445         ret = -TARGET_EFAULT;
4446         goto out;
4447     }
4448     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4449     unlock_user(argptr, arg, 0);
4450 
4451     /* buf_temp is too small, so fetch things into a bigger buffer */
4452     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4453     memcpy(big_buf, buf_temp, target_size);
4454     buf_temp = big_buf;
4455     host_dm = big_buf;
4456 
4457     guest_data = arg + host_dm->data_start;
4458     if ((guest_data - arg) < 0) {
4459         ret = -TARGET_EINVAL;
4460         goto out;
4461     }
4462     guest_data_size = host_dm->data_size - host_dm->data_start;
4463     host_data = (char*)host_dm + host_dm->data_start;
4464 
4465     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4466     if (!argptr) {
4467         ret = -TARGET_EFAULT;
4468         goto out;
4469     }
4470 
4471     switch (ie->host_cmd) {
4472     case DM_REMOVE_ALL:
4473     case DM_LIST_DEVICES:
4474     case DM_DEV_CREATE:
4475     case DM_DEV_REMOVE:
4476     case DM_DEV_SUSPEND:
4477     case DM_DEV_STATUS:
4478     case DM_DEV_WAIT:
4479     case DM_TABLE_STATUS:
4480     case DM_TABLE_CLEAR:
4481     case DM_TABLE_DEPS:
4482     case DM_LIST_VERSIONS:
4483         /* no input data */
4484         break;
4485     case DM_DEV_RENAME:
4486     case DM_DEV_SET_GEOMETRY:
4487         /* data contains only strings */
4488         memcpy(host_data, argptr, guest_data_size);
4489         break;
4490     case DM_TARGET_MSG:
4491         memcpy(host_data, argptr, guest_data_size);
4492         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4493         break;
4494     case DM_TABLE_LOAD:
4495     {
4496         void *gspec = argptr;
4497         void *cur_data = host_data;
4498         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4499         int spec_size = thunk_type_size(arg_type, 0);
4500         int i;
4501 
4502         for (i = 0; i < host_dm->target_count; i++) {
4503             struct dm_target_spec *spec = cur_data;
4504             uint32_t next;
4505             int slen;
4506 
4507             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4508             slen = strlen((char*)gspec + spec_size) + 1;
4509             next = spec->next;
4510             spec->next = sizeof(*spec) + slen;
4511             strcpy((char*)&spec[1], gspec + spec_size);
4512             gspec += next;
4513             cur_data += spec->next;
4514         }
4515         break;
4516     }
4517     default:
4518         ret = -TARGET_EINVAL;
4519         unlock_user(argptr, guest_data, 0);
4520         goto out;
4521     }
4522     unlock_user(argptr, guest_data, 0);
4523 
4524     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4525     if (!is_error(ret)) {
4526         guest_data = arg + host_dm->data_start;
4527         guest_data_size = host_dm->data_size - host_dm->data_start;
4528         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4529         switch (ie->host_cmd) {
4530         case DM_REMOVE_ALL:
4531         case DM_DEV_CREATE:
4532         case DM_DEV_REMOVE:
4533         case DM_DEV_RENAME:
4534         case DM_DEV_SUSPEND:
4535         case DM_DEV_STATUS:
4536         case DM_TABLE_LOAD:
4537         case DM_TABLE_CLEAR:
4538         case DM_TARGET_MSG:
4539         case DM_DEV_SET_GEOMETRY:
4540             /* no return data */
4541             break;
4542         case DM_LIST_DEVICES:
4543         {
4544             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4545             uint32_t remaining_data = guest_data_size;
4546             void *cur_data = argptr;
4547             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4548             int nl_size = 12; /* can't use thunk_size due to alignment */
4549 
4550             while (1) {
4551                 uint32_t next = nl->next;
4552                 if (next) {
4553                     nl->next = nl_size + (strlen(nl->name) + 1);
4554                 }
4555                 if (remaining_data < nl->next) {
4556                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4557                     break;
4558                 }
4559                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4560                 strcpy(cur_data + nl_size, nl->name);
4561                 cur_data += nl->next;
4562                 remaining_data -= nl->next;
4563                 if (!next) {
4564                     break;
4565                 }
4566                 nl = (void*)nl + next;
4567             }
4568             break;
4569         }
4570         case DM_DEV_WAIT:
4571         case DM_TABLE_STATUS:
4572         {
4573             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4574             void *cur_data = argptr;
4575             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4576             int spec_size = thunk_type_size(arg_type, 0);
4577             int i;
4578 
4579             for (i = 0; i < host_dm->target_count; i++) {
4580                 uint32_t next = spec->next;
4581                 int slen = strlen((char*)&spec[1]) + 1;
4582                 spec->next = (cur_data - argptr) + spec_size + slen;
4583                 if (guest_data_size < spec->next) {
4584                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4585                     break;
4586                 }
4587                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4588                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4589                 cur_data = argptr + spec->next;
4590                 spec = (void*)host_dm + host_dm->data_start + next;
4591             }
4592             break;
4593         }
4594         case DM_TABLE_DEPS:
4595         {
4596             void *hdata = (void*)host_dm + host_dm->data_start;
4597             int count = *(uint32_t*)hdata;
4598             uint64_t *hdev = hdata + 8;
4599             uint64_t *gdev = argptr + 8;
4600             int i;
4601 
4602             *(uint32_t*)argptr = tswap32(count);
4603             for (i = 0; i < count; i++) {
4604                 *gdev = tswap64(*hdev);
4605                 gdev++;
4606                 hdev++;
4607             }
4608             break;
4609         }
4610         case DM_LIST_VERSIONS:
4611         {
4612             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4613             uint32_t remaining_data = guest_data_size;
4614             void *cur_data = argptr;
4615             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4616             int vers_size = thunk_type_size(arg_type, 0);
4617 
4618             while (1) {
4619                 uint32_t next = vers->next;
4620                 if (next) {
4621                     vers->next = vers_size + (strlen(vers->name) + 1);
4622                 }
4623                 if (remaining_data < vers->next) {
4624                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4625                     break;
4626                 }
4627                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4628                 strcpy(cur_data + vers_size, vers->name);
4629                 cur_data += vers->next;
4630                 remaining_data -= vers->next;
4631                 if (!next) {
4632                     break;
4633                 }
4634                 vers = (void*)vers + next;
4635             }
4636             break;
4637         }
4638         default:
4639             unlock_user(argptr, guest_data, 0);
4640             ret = -TARGET_EINVAL;
4641             goto out;
4642         }
4643         unlock_user(argptr, guest_data, guest_data_size);
4644 
4645         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4646         if (!argptr) {
4647             ret = -TARGET_EFAULT;
4648             goto out;
4649         }
4650         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4651         unlock_user(argptr, arg, target_size);
4652     }
4653 out:
4654     g_free(big_buf);
4655     return ret;
4656 }
4657 
4658 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4659                                int cmd, abi_long arg)
4660 {
4661     void *argptr;
4662     int target_size;
4663     const argtype *arg_type = ie->arg_type;
4664     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4665     abi_long ret;
4666 
4667     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4668     struct blkpg_partition host_part;
4669 
4670     /* Read and convert blkpg */
4671     arg_type++;
4672     target_size = thunk_type_size(arg_type, 0);
4673     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4674     if (!argptr) {
4675         ret = -TARGET_EFAULT;
4676         goto out;
4677     }
4678     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4679     unlock_user(argptr, arg, 0);
4680 
4681     switch (host_blkpg->op) {
4682     case BLKPG_ADD_PARTITION:
4683     case BLKPG_DEL_PARTITION:
4684         /* payload is struct blkpg_partition */
4685         break;
4686     default:
4687         /* Unknown opcode */
4688         ret = -TARGET_EINVAL;
4689         goto out;
4690     }
4691 
4692     /* Read and convert blkpg->data */
4693     arg = (abi_long)(uintptr_t)host_blkpg->data;
4694     target_size = thunk_type_size(part_arg_type, 0);
4695     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4696     if (!argptr) {
4697         ret = -TARGET_EFAULT;
4698         goto out;
4699     }
4700     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4701     unlock_user(argptr, arg, 0);
4702 
4703     /* Swizzle the data pointer to our local copy and call! */
4704     host_blkpg->data = &host_part;
4705     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4706 
4707 out:
4708     return ret;
4709 }
4710 
4711 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4712                                 int fd, int cmd, abi_long arg)
4713 {
4714     const argtype *arg_type = ie->arg_type;
4715     const StructEntry *se;
4716     const argtype *field_types;
4717     const int *dst_offsets, *src_offsets;
4718     int target_size;
4719     void *argptr;
4720     abi_ulong *target_rt_dev_ptr;
4721     unsigned long *host_rt_dev_ptr;
4722     abi_long ret;
4723     int i;
4724 
4725     assert(ie->access == IOC_W);
4726     assert(*arg_type == TYPE_PTR);
4727     arg_type++;
4728     assert(*arg_type == TYPE_STRUCT);
4729     target_size = thunk_type_size(arg_type, 0);
4730     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4731     if (!argptr) {
4732         return -TARGET_EFAULT;
4733     }
4734     arg_type++;
4735     assert(*arg_type == (int)STRUCT_rtentry);
4736     se = struct_entries + *arg_type++;
4737     assert(se->convert[0] == NULL);
4738     /* convert struct here to be able to catch rt_dev string */
4739     field_types = se->field_types;
4740     dst_offsets = se->field_offsets[THUNK_HOST];
4741     src_offsets = se->field_offsets[THUNK_TARGET];
4742     for (i = 0; i < se->nb_fields; i++) {
4743         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4744             assert(*field_types == TYPE_PTRVOID);
4745             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4746             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4747             if (*target_rt_dev_ptr != 0) {
4748                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4749                                                   tswapal(*target_rt_dev_ptr));
4750                 if (!*host_rt_dev_ptr) {
4751                     unlock_user(argptr, arg, 0);
4752                     return -TARGET_EFAULT;
4753                 }
4754             } else {
4755                 *host_rt_dev_ptr = 0;
4756             }
4757             field_types++;
4758             continue;
4759         }
4760         field_types = thunk_convert(buf_temp + dst_offsets[i],
4761                                     argptr + src_offsets[i],
4762                                     field_types, THUNK_HOST);
4763     }
4764     unlock_user(argptr, arg, 0);
4765 
4766     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4767     if (*host_rt_dev_ptr != 0) {
4768         unlock_user((void *)*host_rt_dev_ptr,
4769                     *target_rt_dev_ptr, 0);
4770     }
4771     return ret;
4772 }
4773 
4774 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4775                                      int fd, int cmd, abi_long arg)
4776 {
4777     int sig = target_to_host_signal(arg);
4778     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4779 }
4780 
4781 #ifdef TIOCGPTPEER
4782 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4783                                      int fd, int cmd, abi_long arg)
4784 {
4785     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4786     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4787 }
4788 #endif
4789 
4790 static IOCTLEntry ioctl_entries[] = {
4791 #define IOCTL(cmd, access, ...) \
4792     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4793 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4794     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4795 #define IOCTL_IGNORE(cmd) \
4796     { TARGET_ ## cmd, 0, #cmd },
4797 #include "ioctls.h"
4798     { 0, 0, },
4799 };
4800 
4801 /* ??? Implement proper locking for ioctls.  */
4802 /* do_ioctl() Must return target values and target errnos. */
4803 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4804 {
4805     const IOCTLEntry *ie;
4806     const argtype *arg_type;
4807     abi_long ret;
4808     uint8_t buf_temp[MAX_STRUCT_SIZE];
4809     int target_size;
4810     void *argptr;
4811 
4812     ie = ioctl_entries;
4813     for(;;) {
4814         if (ie->target_cmd == 0) {
4815             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4816             return -TARGET_ENOSYS;
4817         }
4818         if (ie->target_cmd == cmd)
4819             break;
4820         ie++;
4821     }
4822     arg_type = ie->arg_type;
4823     if (ie->do_ioctl) {
4824         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4825     } else if (!ie->host_cmd) {
4826         /* Some architectures define BSD ioctls in their headers
4827            that are not implemented in Linux.  */
4828         return -TARGET_ENOSYS;
4829     }
4830 
4831     switch(arg_type[0]) {
4832     case TYPE_NULL:
4833         /* no argument */
4834         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4835         break;
4836     case TYPE_PTRVOID:
4837     case TYPE_INT:
4838         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4839         break;
4840     case TYPE_PTR:
4841         arg_type++;
4842         target_size = thunk_type_size(arg_type, 0);
4843         switch(ie->access) {
4844         case IOC_R:
4845             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4846             if (!is_error(ret)) {
4847                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4848                 if (!argptr)
4849                     return -TARGET_EFAULT;
4850                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4851                 unlock_user(argptr, arg, target_size);
4852             }
4853             break;
4854         case IOC_W:
4855             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4856             if (!argptr)
4857                 return -TARGET_EFAULT;
4858             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4859             unlock_user(argptr, arg, 0);
4860             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4861             break;
4862         default:
4863         case IOC_RW:
4864             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4865             if (!argptr)
4866                 return -TARGET_EFAULT;
4867             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4868             unlock_user(argptr, arg, 0);
4869             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4870             if (!is_error(ret)) {
4871                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4872                 if (!argptr)
4873                     return -TARGET_EFAULT;
4874                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4875                 unlock_user(argptr, arg, target_size);
4876             }
4877             break;
4878         }
4879         break;
4880     default:
4881         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4882                  (long)cmd, arg_type[0]);
4883         ret = -TARGET_ENOSYS;
4884         break;
4885     }
4886     return ret;
4887 }
4888 
4889 static const bitmask_transtbl iflag_tbl[] = {
4890         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4891         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4892         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4893         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4894         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4895         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4896         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4897         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4898         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4899         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4900         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4901         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4902         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4903         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4904         { 0, 0, 0, 0 }
4905 };
4906 
4907 static const bitmask_transtbl oflag_tbl[] = {
4908 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4909 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4910 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4911 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4912 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4913 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4914 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4915 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4916 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4917 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4918 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4919 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4920 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4921 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4922 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4923 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4924 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4925 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4926 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4927 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4928 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4929 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4930 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4931 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4932 	{ 0, 0, 0, 0 }
4933 };
4934 
4935 static const bitmask_transtbl cflag_tbl[] = {
4936 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4937 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4938 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4939 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4940 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4941 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4942 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4943 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4944 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4945 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4946 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4947 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4948 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4949 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4950 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4951 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4952 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4953 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4954 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4955 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4956 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4957 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4958 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4959 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4960 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4961 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4962 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4963 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4964 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4965 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4966 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4967 	{ 0, 0, 0, 0 }
4968 };
4969 
4970 static const bitmask_transtbl lflag_tbl[] = {
4971 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4972 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4973 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4974 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4975 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4976 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4977 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4978 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4979 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4980 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4981 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4982 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4983 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4984 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4985 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4986 	{ 0, 0, 0, 0 }
4987 };
4988 
4989 static void target_to_host_termios (void *dst, const void *src)
4990 {
4991     struct host_termios *host = dst;
4992     const struct target_termios *target = src;
4993 
4994     host->c_iflag =
4995         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4996     host->c_oflag =
4997         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4998     host->c_cflag =
4999         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5000     host->c_lflag =
5001         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5002     host->c_line = target->c_line;
5003 
5004     memset(host->c_cc, 0, sizeof(host->c_cc));
5005     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5006     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5007     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5008     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5009     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5010     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5011     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5012     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5013     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5014     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5015     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5016     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5017     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5018     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5019     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5020     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5021     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5022 }
5023 
5024 static void host_to_target_termios (void *dst, const void *src)
5025 {
5026     struct target_termios *target = dst;
5027     const struct host_termios *host = src;
5028 
5029     target->c_iflag =
5030         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5031     target->c_oflag =
5032         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5033     target->c_cflag =
5034         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5035     target->c_lflag =
5036         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5037     target->c_line = host->c_line;
5038 
5039     memset(target->c_cc, 0, sizeof(target->c_cc));
5040     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5041     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5042     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5043     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5044     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5045     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5046     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5047     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5048     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5049     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5050     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5051     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5052     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5053     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5054     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5055     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5056     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5057 }
5058 
5059 static const StructEntry struct_termios_def = {
5060     .convert = { host_to_target_termios, target_to_host_termios },
5061     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5062     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5063 };
5064 
5065 static bitmask_transtbl mmap_flags_tbl[] = {
5066     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5067     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5068     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5069     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5070       MAP_ANONYMOUS, MAP_ANONYMOUS },
5071     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5072       MAP_GROWSDOWN, MAP_GROWSDOWN },
5073     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5074       MAP_DENYWRITE, MAP_DENYWRITE },
5075     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5076       MAP_EXECUTABLE, MAP_EXECUTABLE },
5077     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5078     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5079       MAP_NORESERVE, MAP_NORESERVE },
5080     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5081     /* MAP_STACK had been ignored by the kernel for quite some time.
5082        Recognize it for the target insofar as we do not want to pass
5083        it through to the host.  */
5084     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5085     { 0, 0, 0, 0 }
5086 };
5087 
5088 #if defined(TARGET_I386)
5089 
5090 /* NOTE: there is really one LDT for all the threads */
5091 static uint8_t *ldt_table;
5092 
5093 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5094 {
5095     int size;
5096     void *p;
5097 
5098     if (!ldt_table)
5099         return 0;
5100     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5101     if (size > bytecount)
5102         size = bytecount;
5103     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5104     if (!p)
5105         return -TARGET_EFAULT;
5106     /* ??? Should this by byteswapped?  */
5107     memcpy(p, ldt_table, size);
5108     unlock_user(p, ptr, size);
5109     return size;
5110 }
5111 
5112 /* XXX: add locking support */
5113 static abi_long write_ldt(CPUX86State *env,
5114                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5115 {
5116     struct target_modify_ldt_ldt_s ldt_info;
5117     struct target_modify_ldt_ldt_s *target_ldt_info;
5118     int seg_32bit, contents, read_exec_only, limit_in_pages;
5119     int seg_not_present, useable, lm;
5120     uint32_t *lp, entry_1, entry_2;
5121 
5122     if (bytecount != sizeof(ldt_info))
5123         return -TARGET_EINVAL;
5124     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5125         return -TARGET_EFAULT;
5126     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5127     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5128     ldt_info.limit = tswap32(target_ldt_info->limit);
5129     ldt_info.flags = tswap32(target_ldt_info->flags);
5130     unlock_user_struct(target_ldt_info, ptr, 0);
5131 
5132     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5133         return -TARGET_EINVAL;
5134     seg_32bit = ldt_info.flags & 1;
5135     contents = (ldt_info.flags >> 1) & 3;
5136     read_exec_only = (ldt_info.flags >> 3) & 1;
5137     limit_in_pages = (ldt_info.flags >> 4) & 1;
5138     seg_not_present = (ldt_info.flags >> 5) & 1;
5139     useable = (ldt_info.flags >> 6) & 1;
5140 #ifdef TARGET_ABI32
5141     lm = 0;
5142 #else
5143     lm = (ldt_info.flags >> 7) & 1;
5144 #endif
5145     if (contents == 3) {
5146         if (oldmode)
5147             return -TARGET_EINVAL;
5148         if (seg_not_present == 0)
5149             return -TARGET_EINVAL;
5150     }
5151     /* allocate the LDT */
5152     if (!ldt_table) {
5153         env->ldt.base = target_mmap(0,
5154                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5155                                     PROT_READ|PROT_WRITE,
5156                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5157         if (env->ldt.base == -1)
5158             return -TARGET_ENOMEM;
5159         memset(g2h(env->ldt.base), 0,
5160                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5161         env->ldt.limit = 0xffff;
5162         ldt_table = g2h(env->ldt.base);
5163     }
5164 
5165     /* NOTE: same code as Linux kernel */
5166     /* Allow LDTs to be cleared by the user. */
5167     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5168         if (oldmode ||
5169             (contents == 0		&&
5170              read_exec_only == 1	&&
5171              seg_32bit == 0		&&
5172              limit_in_pages == 0	&&
5173              seg_not_present == 1	&&
5174              useable == 0 )) {
5175             entry_1 = 0;
5176             entry_2 = 0;
5177             goto install;
5178         }
5179     }
5180 
5181     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5182         (ldt_info.limit & 0x0ffff);
5183     entry_2 = (ldt_info.base_addr & 0xff000000) |
5184         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5185         (ldt_info.limit & 0xf0000) |
5186         ((read_exec_only ^ 1) << 9) |
5187         (contents << 10) |
5188         ((seg_not_present ^ 1) << 15) |
5189         (seg_32bit << 22) |
5190         (limit_in_pages << 23) |
5191         (lm << 21) |
5192         0x7000;
5193     if (!oldmode)
5194         entry_2 |= (useable << 20);
5195 
5196     /* Install the new entry ...  */
5197 install:
5198     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5199     lp[0] = tswap32(entry_1);
5200     lp[1] = tswap32(entry_2);
5201     return 0;
5202 }
5203 
5204 /* specific and weird i386 syscalls */
5205 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5206                               unsigned long bytecount)
5207 {
5208     abi_long ret;
5209 
5210     switch (func) {
5211     case 0:
5212         ret = read_ldt(ptr, bytecount);
5213         break;
5214     case 1:
5215         ret = write_ldt(env, ptr, bytecount, 1);
5216         break;
5217     case 0x11:
5218         ret = write_ldt(env, ptr, bytecount, 0);
5219         break;
5220     default:
5221         ret = -TARGET_ENOSYS;
5222         break;
5223     }
5224     return ret;
5225 }
5226 
5227 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5228 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5229 {
5230     uint64_t *gdt_table = g2h(env->gdt.base);
5231     struct target_modify_ldt_ldt_s ldt_info;
5232     struct target_modify_ldt_ldt_s *target_ldt_info;
5233     int seg_32bit, contents, read_exec_only, limit_in_pages;
5234     int seg_not_present, useable, lm;
5235     uint32_t *lp, entry_1, entry_2;
5236     int i;
5237 
5238     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5239     if (!target_ldt_info)
5240         return -TARGET_EFAULT;
5241     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5242     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5243     ldt_info.limit = tswap32(target_ldt_info->limit);
5244     ldt_info.flags = tswap32(target_ldt_info->flags);
5245     if (ldt_info.entry_number == -1) {
5246         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5247             if (gdt_table[i] == 0) {
5248                 ldt_info.entry_number = i;
5249                 target_ldt_info->entry_number = tswap32(i);
5250                 break;
5251             }
5252         }
5253     }
5254     unlock_user_struct(target_ldt_info, ptr, 1);
5255 
5256     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5257         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5258            return -TARGET_EINVAL;
5259     seg_32bit = ldt_info.flags & 1;
5260     contents = (ldt_info.flags >> 1) & 3;
5261     read_exec_only = (ldt_info.flags >> 3) & 1;
5262     limit_in_pages = (ldt_info.flags >> 4) & 1;
5263     seg_not_present = (ldt_info.flags >> 5) & 1;
5264     useable = (ldt_info.flags >> 6) & 1;
5265 #ifdef TARGET_ABI32
5266     lm = 0;
5267 #else
5268     lm = (ldt_info.flags >> 7) & 1;
5269 #endif
5270 
5271     if (contents == 3) {
5272         if (seg_not_present == 0)
5273             return -TARGET_EINVAL;
5274     }
5275 
5276     /* NOTE: same code as Linux kernel */
5277     /* Allow LDTs to be cleared by the user. */
5278     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5279         if ((contents == 0             &&
5280              read_exec_only == 1       &&
5281              seg_32bit == 0            &&
5282              limit_in_pages == 0       &&
5283              seg_not_present == 1      &&
5284              useable == 0 )) {
5285             entry_1 = 0;
5286             entry_2 = 0;
5287             goto install;
5288         }
5289     }
5290 
5291     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5292         (ldt_info.limit & 0x0ffff);
5293     entry_2 = (ldt_info.base_addr & 0xff000000) |
5294         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5295         (ldt_info.limit & 0xf0000) |
5296         ((read_exec_only ^ 1) << 9) |
5297         (contents << 10) |
5298         ((seg_not_present ^ 1) << 15) |
5299         (seg_32bit << 22) |
5300         (limit_in_pages << 23) |
5301         (useable << 20) |
5302         (lm << 21) |
5303         0x7000;
5304 
5305     /* Install the new entry ...  */
5306 install:
5307     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5308     lp[0] = tswap32(entry_1);
5309     lp[1] = tswap32(entry_2);
5310     return 0;
5311 }
5312 
5313 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5314 {
5315     struct target_modify_ldt_ldt_s *target_ldt_info;
5316     uint64_t *gdt_table = g2h(env->gdt.base);
5317     uint32_t base_addr, limit, flags;
5318     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5319     int seg_not_present, useable, lm;
5320     uint32_t *lp, entry_1, entry_2;
5321 
5322     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5323     if (!target_ldt_info)
5324         return -TARGET_EFAULT;
5325     idx = tswap32(target_ldt_info->entry_number);
5326     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5327         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5328         unlock_user_struct(target_ldt_info, ptr, 1);
5329         return -TARGET_EINVAL;
5330     }
5331     lp = (uint32_t *)(gdt_table + idx);
5332     entry_1 = tswap32(lp[0]);
5333     entry_2 = tswap32(lp[1]);
5334 
5335     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5336     contents = (entry_2 >> 10) & 3;
5337     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5338     seg_32bit = (entry_2 >> 22) & 1;
5339     limit_in_pages = (entry_2 >> 23) & 1;
5340     useable = (entry_2 >> 20) & 1;
5341 #ifdef TARGET_ABI32
5342     lm = 0;
5343 #else
5344     lm = (entry_2 >> 21) & 1;
5345 #endif
5346     flags = (seg_32bit << 0) | (contents << 1) |
5347         (read_exec_only << 3) | (limit_in_pages << 4) |
5348         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5349     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5350     base_addr = (entry_1 >> 16) |
5351         (entry_2 & 0xff000000) |
5352         ((entry_2 & 0xff) << 16);
5353     target_ldt_info->base_addr = tswapal(base_addr);
5354     target_ldt_info->limit = tswap32(limit);
5355     target_ldt_info->flags = tswap32(flags);
5356     unlock_user_struct(target_ldt_info, ptr, 1);
5357     return 0;
5358 }
5359 #endif /* TARGET_I386 && TARGET_ABI32 */
5360 
5361 #ifndef TARGET_ABI32
5362 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5363 {
5364     abi_long ret = 0;
5365     abi_ulong val;
5366     int idx;
5367 
5368     switch(code) {
5369     case TARGET_ARCH_SET_GS:
5370     case TARGET_ARCH_SET_FS:
5371         if (code == TARGET_ARCH_SET_GS)
5372             idx = R_GS;
5373         else
5374             idx = R_FS;
5375         cpu_x86_load_seg(env, idx, 0);
5376         env->segs[idx].base = addr;
5377         break;
5378     case TARGET_ARCH_GET_GS:
5379     case TARGET_ARCH_GET_FS:
5380         if (code == TARGET_ARCH_GET_GS)
5381             idx = R_GS;
5382         else
5383             idx = R_FS;
5384         val = env->segs[idx].base;
5385         if (put_user(val, addr, abi_ulong))
5386             ret = -TARGET_EFAULT;
5387         break;
5388     default:
5389         ret = -TARGET_EINVAL;
5390         break;
5391     }
5392     return ret;
5393 }
5394 #endif
5395 
5396 #endif /* defined(TARGET_I386) */
5397 
5398 #define NEW_STACK_SIZE 0x40000
5399 
5400 
5401 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5402 typedef struct {
5403     CPUArchState *env;
5404     pthread_mutex_t mutex;
5405     pthread_cond_t cond;
5406     pthread_t thread;
5407     uint32_t tid;
5408     abi_ulong child_tidptr;
5409     abi_ulong parent_tidptr;
5410     sigset_t sigmask;
5411 } new_thread_info;
5412 
5413 static void *clone_func(void *arg)
5414 {
5415     new_thread_info *info = arg;
5416     CPUArchState *env;
5417     CPUState *cpu;
5418     TaskState *ts;
5419 
5420     rcu_register_thread();
5421     tcg_register_thread();
5422     env = info->env;
5423     cpu = ENV_GET_CPU(env);
5424     thread_cpu = cpu;
5425     ts = (TaskState *)cpu->opaque;
5426     info->tid = gettid();
5427     task_settid(ts);
5428     if (info->child_tidptr)
5429         put_user_u32(info->tid, info->child_tidptr);
5430     if (info->parent_tidptr)
5431         put_user_u32(info->tid, info->parent_tidptr);
5432     /* Enable signals.  */
5433     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5434     /* Signal to the parent that we're ready.  */
5435     pthread_mutex_lock(&info->mutex);
5436     pthread_cond_broadcast(&info->cond);
5437     pthread_mutex_unlock(&info->mutex);
5438     /* Wait until the parent has finished initializing the tls state.  */
5439     pthread_mutex_lock(&clone_lock);
5440     pthread_mutex_unlock(&clone_lock);
5441     cpu_loop(env);
5442     /* never exits */
5443     return NULL;
5444 }
5445 
5446 /* do_fork() Must return host values and target errnos (unlike most
5447    do_*() functions). */
5448 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5449                    abi_ulong parent_tidptr, target_ulong newtls,
5450                    abi_ulong child_tidptr)
5451 {
5452     CPUState *cpu = ENV_GET_CPU(env);
5453     int ret;
5454     TaskState *ts;
5455     CPUState *new_cpu;
5456     CPUArchState *new_env;
5457     sigset_t sigmask;
5458 
5459     flags &= ~CLONE_IGNORED_FLAGS;
5460 
5461     /* Emulate vfork() with fork() */
5462     if (flags & CLONE_VFORK)
5463         flags &= ~(CLONE_VFORK | CLONE_VM);
5464 
5465     if (flags & CLONE_VM) {
5466         TaskState *parent_ts = (TaskState *)cpu->opaque;
5467         new_thread_info info;
5468         pthread_attr_t attr;
5469 
5470         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5471             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5472             return -TARGET_EINVAL;
5473         }
5474 
5475         ts = g_new0(TaskState, 1);
5476         init_task_state(ts);
5477 
5478         /* Grab a mutex so that thread setup appears atomic.  */
5479         pthread_mutex_lock(&clone_lock);
5480 
5481         /* we create a new CPU instance. */
5482         new_env = cpu_copy(env);
5483         /* Init regs that differ from the parent.  */
5484         cpu_clone_regs(new_env, newsp);
5485         new_cpu = ENV_GET_CPU(new_env);
5486         new_cpu->opaque = ts;
5487         ts->bprm = parent_ts->bprm;
5488         ts->info = parent_ts->info;
5489         ts->signal_mask = parent_ts->signal_mask;
5490 
5491         if (flags & CLONE_CHILD_CLEARTID) {
5492             ts->child_tidptr = child_tidptr;
5493         }
5494 
5495         if (flags & CLONE_SETTLS) {
5496             cpu_set_tls (new_env, newtls);
5497         }
5498 
5499         memset(&info, 0, sizeof(info));
5500         pthread_mutex_init(&info.mutex, NULL);
5501         pthread_mutex_lock(&info.mutex);
5502         pthread_cond_init(&info.cond, NULL);
5503         info.env = new_env;
5504         if (flags & CLONE_CHILD_SETTID) {
5505             info.child_tidptr = child_tidptr;
5506         }
5507         if (flags & CLONE_PARENT_SETTID) {
5508             info.parent_tidptr = parent_tidptr;
5509         }
5510 
5511         ret = pthread_attr_init(&attr);
5512         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5513         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5514         /* It is not safe to deliver signals until the child has finished
5515            initializing, so temporarily block all signals.  */
5516         sigfillset(&sigmask);
5517         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5518 
5519         /* If this is our first additional thread, we need to ensure we
5520          * generate code for parallel execution and flush old translations.
5521          */
5522         if (!parallel_cpus) {
5523             parallel_cpus = true;
5524             tb_flush(cpu);
5525         }
5526 
5527         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5528         /* TODO: Free new CPU state if thread creation failed.  */
5529 
5530         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5531         pthread_attr_destroy(&attr);
5532         if (ret == 0) {
5533             /* Wait for the child to initialize.  */
5534             pthread_cond_wait(&info.cond, &info.mutex);
5535             ret = info.tid;
5536         } else {
5537             ret = -1;
5538         }
5539         pthread_mutex_unlock(&info.mutex);
5540         pthread_cond_destroy(&info.cond);
5541         pthread_mutex_destroy(&info.mutex);
5542         pthread_mutex_unlock(&clone_lock);
5543     } else {
5544         /* if no CLONE_VM, we consider it is a fork */
5545         if (flags & CLONE_INVALID_FORK_FLAGS) {
5546             return -TARGET_EINVAL;
5547         }
5548 
5549         /* We can't support custom termination signals */
5550         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5551             return -TARGET_EINVAL;
5552         }
5553 
5554         if (block_signals()) {
5555             return -TARGET_ERESTARTSYS;
5556         }
5557 
5558         fork_start();
5559         ret = fork();
5560         if (ret == 0) {
5561             /* Child Process.  */
5562             cpu_clone_regs(env, newsp);
5563             fork_end(1);
5564             /* There is a race condition here.  The parent process could
5565                theoretically read the TID in the child process before the child
5566                tid is set.  This would require using either ptrace
5567                (not implemented) or having *_tidptr to point at a shared memory
5568                mapping.  We can't repeat the spinlock hack used above because
5569                the child process gets its own copy of the lock.  */
5570             if (flags & CLONE_CHILD_SETTID)
5571                 put_user_u32(gettid(), child_tidptr);
5572             if (flags & CLONE_PARENT_SETTID)
5573                 put_user_u32(gettid(), parent_tidptr);
5574             ts = (TaskState *)cpu->opaque;
5575             if (flags & CLONE_SETTLS)
5576                 cpu_set_tls (env, newtls);
5577             if (flags & CLONE_CHILD_CLEARTID)
5578                 ts->child_tidptr = child_tidptr;
5579         } else {
5580             fork_end(0);
5581         }
5582     }
5583     return ret;
5584 }
5585 
5586 /* warning : doesn't handle linux specific flags... */
5587 static int target_to_host_fcntl_cmd(int cmd)
5588 {
5589     int ret;
5590 
5591     switch(cmd) {
5592     case TARGET_F_DUPFD:
5593     case TARGET_F_GETFD:
5594     case TARGET_F_SETFD:
5595     case TARGET_F_GETFL:
5596     case TARGET_F_SETFL:
5597         ret = cmd;
5598         break;
5599     case TARGET_F_GETLK:
5600         ret = F_GETLK64;
5601         break;
5602     case TARGET_F_SETLK:
5603         ret = F_SETLK64;
5604         break;
5605     case TARGET_F_SETLKW:
5606         ret = F_SETLKW64;
5607         break;
5608     case TARGET_F_GETOWN:
5609         ret = F_GETOWN;
5610         break;
5611     case TARGET_F_SETOWN:
5612         ret = F_SETOWN;
5613         break;
5614     case TARGET_F_GETSIG:
5615         ret = F_GETSIG;
5616         break;
5617     case TARGET_F_SETSIG:
5618         ret = F_SETSIG;
5619         break;
5620 #if TARGET_ABI_BITS == 32
5621     case TARGET_F_GETLK64:
5622         ret = F_GETLK64;
5623         break;
5624     case TARGET_F_SETLK64:
5625         ret = F_SETLK64;
5626         break;
5627     case TARGET_F_SETLKW64:
5628         ret = F_SETLKW64;
5629         break;
5630 #endif
5631     case TARGET_F_SETLEASE:
5632         ret = F_SETLEASE;
5633         break;
5634     case TARGET_F_GETLEASE:
5635         ret = F_GETLEASE;
5636         break;
5637 #ifdef F_DUPFD_CLOEXEC
5638     case TARGET_F_DUPFD_CLOEXEC:
5639         ret = F_DUPFD_CLOEXEC;
5640         break;
5641 #endif
5642     case TARGET_F_NOTIFY:
5643         ret = F_NOTIFY;
5644         break;
5645 #ifdef F_GETOWN_EX
5646     case TARGET_F_GETOWN_EX:
5647         ret = F_GETOWN_EX;
5648         break;
5649 #endif
5650 #ifdef F_SETOWN_EX
5651     case TARGET_F_SETOWN_EX:
5652         ret = F_SETOWN_EX;
5653         break;
5654 #endif
5655 #ifdef F_SETPIPE_SZ
5656     case TARGET_F_SETPIPE_SZ:
5657         ret = F_SETPIPE_SZ;
5658         break;
5659     case TARGET_F_GETPIPE_SZ:
5660         ret = F_GETPIPE_SZ;
5661         break;
5662 #endif
5663     default:
5664         ret = -TARGET_EINVAL;
5665         break;
5666     }
5667 
5668 #if defined(__powerpc64__)
5669     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5670      * is not supported by kernel. The glibc fcntl call actually adjusts
5671      * them to 5, 6 and 7 before making the syscall(). Since we make the
5672      * syscall directly, adjust to what is supported by the kernel.
5673      */
5674     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5675         ret -= F_GETLK64 - 5;
5676     }
5677 #endif
5678 
5679     return ret;
5680 }
5681 
5682 #define FLOCK_TRANSTBL \
5683     switch (type) { \
5684     TRANSTBL_CONVERT(F_RDLCK); \
5685     TRANSTBL_CONVERT(F_WRLCK); \
5686     TRANSTBL_CONVERT(F_UNLCK); \
5687     TRANSTBL_CONVERT(F_EXLCK); \
5688     TRANSTBL_CONVERT(F_SHLCK); \
5689     }
5690 
5691 static int target_to_host_flock(int type)
5692 {
5693 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5694     FLOCK_TRANSTBL
5695 #undef  TRANSTBL_CONVERT
5696     return -TARGET_EINVAL;
5697 }
5698 
5699 static int host_to_target_flock(int type)
5700 {
5701 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5702     FLOCK_TRANSTBL
5703 #undef  TRANSTBL_CONVERT
5704     /* if we don't know how to convert the value coming
5705      * from the host we copy to the target field as-is
5706      */
5707     return type;
5708 }
5709 
5710 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5711                                             abi_ulong target_flock_addr)
5712 {
5713     struct target_flock *target_fl;
5714     int l_type;
5715 
5716     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5717         return -TARGET_EFAULT;
5718     }
5719 
5720     __get_user(l_type, &target_fl->l_type);
5721     l_type = target_to_host_flock(l_type);
5722     if (l_type < 0) {
5723         return l_type;
5724     }
5725     fl->l_type = l_type;
5726     __get_user(fl->l_whence, &target_fl->l_whence);
5727     __get_user(fl->l_start, &target_fl->l_start);
5728     __get_user(fl->l_len, &target_fl->l_len);
5729     __get_user(fl->l_pid, &target_fl->l_pid);
5730     unlock_user_struct(target_fl, target_flock_addr, 0);
5731     return 0;
5732 }
5733 
5734 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5735                                           const struct flock64 *fl)
5736 {
5737     struct target_flock *target_fl;
5738     short l_type;
5739 
5740     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5741         return -TARGET_EFAULT;
5742     }
5743 
5744     l_type = host_to_target_flock(fl->l_type);
5745     __put_user(l_type, &target_fl->l_type);
5746     __put_user(fl->l_whence, &target_fl->l_whence);
5747     __put_user(fl->l_start, &target_fl->l_start);
5748     __put_user(fl->l_len, &target_fl->l_len);
5749     __put_user(fl->l_pid, &target_fl->l_pid);
5750     unlock_user_struct(target_fl, target_flock_addr, 1);
5751     return 0;
5752 }
5753 
5754 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5755 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5756 
5757 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5758 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5759                                                    abi_ulong target_flock_addr)
5760 {
5761     struct target_oabi_flock64 *target_fl;
5762     int l_type;
5763 
5764     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5765         return -TARGET_EFAULT;
5766     }
5767 
5768     __get_user(l_type, &target_fl->l_type);
5769     l_type = target_to_host_flock(l_type);
5770     if (l_type < 0) {
5771         return l_type;
5772     }
5773     fl->l_type = l_type;
5774     __get_user(fl->l_whence, &target_fl->l_whence);
5775     __get_user(fl->l_start, &target_fl->l_start);
5776     __get_user(fl->l_len, &target_fl->l_len);
5777     __get_user(fl->l_pid, &target_fl->l_pid);
5778     unlock_user_struct(target_fl, target_flock_addr, 0);
5779     return 0;
5780 }
5781 
5782 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5783                                                  const struct flock64 *fl)
5784 {
5785     struct target_oabi_flock64 *target_fl;
5786     short l_type;
5787 
5788     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5789         return -TARGET_EFAULT;
5790     }
5791 
5792     l_type = host_to_target_flock(fl->l_type);
5793     __put_user(l_type, &target_fl->l_type);
5794     __put_user(fl->l_whence, &target_fl->l_whence);
5795     __put_user(fl->l_start, &target_fl->l_start);
5796     __put_user(fl->l_len, &target_fl->l_len);
5797     __put_user(fl->l_pid, &target_fl->l_pid);
5798     unlock_user_struct(target_fl, target_flock_addr, 1);
5799     return 0;
5800 }
5801 #endif
5802 
5803 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5804                                               abi_ulong target_flock_addr)
5805 {
5806     struct target_flock64 *target_fl;
5807     int l_type;
5808 
5809     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5810         return -TARGET_EFAULT;
5811     }
5812 
5813     __get_user(l_type, &target_fl->l_type);
5814     l_type = target_to_host_flock(l_type);
5815     if (l_type < 0) {
5816         return l_type;
5817     }
5818     fl->l_type = l_type;
5819     __get_user(fl->l_whence, &target_fl->l_whence);
5820     __get_user(fl->l_start, &target_fl->l_start);
5821     __get_user(fl->l_len, &target_fl->l_len);
5822     __get_user(fl->l_pid, &target_fl->l_pid);
5823     unlock_user_struct(target_fl, target_flock_addr, 0);
5824     return 0;
5825 }
5826 
5827 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5828                                             const struct flock64 *fl)
5829 {
5830     struct target_flock64 *target_fl;
5831     short l_type;
5832 
5833     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5834         return -TARGET_EFAULT;
5835     }
5836 
5837     l_type = host_to_target_flock(fl->l_type);
5838     __put_user(l_type, &target_fl->l_type);
5839     __put_user(fl->l_whence, &target_fl->l_whence);
5840     __put_user(fl->l_start, &target_fl->l_start);
5841     __put_user(fl->l_len, &target_fl->l_len);
5842     __put_user(fl->l_pid, &target_fl->l_pid);
5843     unlock_user_struct(target_fl, target_flock_addr, 1);
5844     return 0;
5845 }
5846 
5847 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5848 {
5849     struct flock64 fl64;
5850 #ifdef F_GETOWN_EX
5851     struct f_owner_ex fox;
5852     struct target_f_owner_ex *target_fox;
5853 #endif
5854     abi_long ret;
5855     int host_cmd = target_to_host_fcntl_cmd(cmd);
5856 
5857     if (host_cmd == -TARGET_EINVAL)
5858 	    return host_cmd;
5859 
5860     switch(cmd) {
5861     case TARGET_F_GETLK:
5862         ret = copy_from_user_flock(&fl64, arg);
5863         if (ret) {
5864             return ret;
5865         }
5866         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5867         if (ret == 0) {
5868             ret = copy_to_user_flock(arg, &fl64);
5869         }
5870         break;
5871 
5872     case TARGET_F_SETLK:
5873     case TARGET_F_SETLKW:
5874         ret = copy_from_user_flock(&fl64, arg);
5875         if (ret) {
5876             return ret;
5877         }
5878         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5879         break;
5880 
5881     case TARGET_F_GETLK64:
5882         ret = copy_from_user_flock64(&fl64, arg);
5883         if (ret) {
5884             return ret;
5885         }
5886         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5887         if (ret == 0) {
5888             ret = copy_to_user_flock64(arg, &fl64);
5889         }
5890         break;
5891     case TARGET_F_SETLK64:
5892     case TARGET_F_SETLKW64:
5893         ret = copy_from_user_flock64(&fl64, arg);
5894         if (ret) {
5895             return ret;
5896         }
5897         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5898         break;
5899 
5900     case TARGET_F_GETFL:
5901         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5902         if (ret >= 0) {
5903             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5904         }
5905         break;
5906 
5907     case TARGET_F_SETFL:
5908         ret = get_errno(safe_fcntl(fd, host_cmd,
5909                                    target_to_host_bitmask(arg,
5910                                                           fcntl_flags_tbl)));
5911         break;
5912 
5913 #ifdef F_GETOWN_EX
5914     case TARGET_F_GETOWN_EX:
5915         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5916         if (ret >= 0) {
5917             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5918                 return -TARGET_EFAULT;
5919             target_fox->type = tswap32(fox.type);
5920             target_fox->pid = tswap32(fox.pid);
5921             unlock_user_struct(target_fox, arg, 1);
5922         }
5923         break;
5924 #endif
5925 
5926 #ifdef F_SETOWN_EX
5927     case TARGET_F_SETOWN_EX:
5928         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5929             return -TARGET_EFAULT;
5930         fox.type = tswap32(target_fox->type);
5931         fox.pid = tswap32(target_fox->pid);
5932         unlock_user_struct(target_fox, arg, 0);
5933         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5934         break;
5935 #endif
5936 
5937     case TARGET_F_SETOWN:
5938     case TARGET_F_GETOWN:
5939     case TARGET_F_SETSIG:
5940     case TARGET_F_GETSIG:
5941     case TARGET_F_SETLEASE:
5942     case TARGET_F_GETLEASE:
5943     case TARGET_F_SETPIPE_SZ:
5944     case TARGET_F_GETPIPE_SZ:
5945         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5946         break;
5947 
5948     default:
5949         ret = get_errno(safe_fcntl(fd, cmd, arg));
5950         break;
5951     }
5952     return ret;
5953 }
5954 
5955 #ifdef USE_UID16
5956 
5957 static inline int high2lowuid(int uid)
5958 {
5959     if (uid > 65535)
5960         return 65534;
5961     else
5962         return uid;
5963 }
5964 
5965 static inline int high2lowgid(int gid)
5966 {
5967     if (gid > 65535)
5968         return 65534;
5969     else
5970         return gid;
5971 }
5972 
5973 static inline int low2highuid(int uid)
5974 {
5975     if ((int16_t)uid == -1)
5976         return -1;
5977     else
5978         return uid;
5979 }
5980 
5981 static inline int low2highgid(int gid)
5982 {
5983     if ((int16_t)gid == -1)
5984         return -1;
5985     else
5986         return gid;
5987 }
5988 static inline int tswapid(int id)
5989 {
5990     return tswap16(id);
5991 }
5992 
5993 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5994 
5995 #else /* !USE_UID16 */
5996 static inline int high2lowuid(int uid)
5997 {
5998     return uid;
5999 }
6000 static inline int high2lowgid(int gid)
6001 {
6002     return gid;
6003 }
6004 static inline int low2highuid(int uid)
6005 {
6006     return uid;
6007 }
6008 static inline int low2highgid(int gid)
6009 {
6010     return gid;
6011 }
6012 static inline int tswapid(int id)
6013 {
6014     return tswap32(id);
6015 }
6016 
6017 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6018 
6019 #endif /* USE_UID16 */
6020 
6021 /* We must do direct syscalls for setting UID/GID, because we want to
6022  * implement the Linux system call semantics of "change only for this thread",
6023  * not the libc/POSIX semantics of "change for all threads in process".
6024  * (See http://ewontfix.com/17/ for more details.)
6025  * We use the 32-bit version of the syscalls if present; if it is not
6026  * then either the host architecture supports 32-bit UIDs natively with
6027  * the standard syscall, or the 16-bit UID is the best we can do.
6028  */
6029 #ifdef __NR_setuid32
6030 #define __NR_sys_setuid __NR_setuid32
6031 #else
6032 #define __NR_sys_setuid __NR_setuid
6033 #endif
6034 #ifdef __NR_setgid32
6035 #define __NR_sys_setgid __NR_setgid32
6036 #else
6037 #define __NR_sys_setgid __NR_setgid
6038 #endif
6039 #ifdef __NR_setresuid32
6040 #define __NR_sys_setresuid __NR_setresuid32
6041 #else
6042 #define __NR_sys_setresuid __NR_setresuid
6043 #endif
6044 #ifdef __NR_setresgid32
6045 #define __NR_sys_setresgid __NR_setresgid32
6046 #else
6047 #define __NR_sys_setresgid __NR_setresgid
6048 #endif
6049 
6050 _syscall1(int, sys_setuid, uid_t, uid)
6051 _syscall1(int, sys_setgid, gid_t, gid)
6052 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6053 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6054 
6055 void syscall_init(void)
6056 {
6057     IOCTLEntry *ie;
6058     const argtype *arg_type;
6059     int size;
6060     int i;
6061 
6062     thunk_init(STRUCT_MAX);
6063 
6064 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6065 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6066 #include "syscall_types.h"
6067 #undef STRUCT
6068 #undef STRUCT_SPECIAL
6069 
6070     /* Build target_to_host_errno_table[] table from
6071      * host_to_target_errno_table[]. */
6072     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6073         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6074     }
6075 
6076     /* we patch the ioctl size if necessary. We rely on the fact that
6077        no ioctl has all the bits at '1' in the size field */
6078     ie = ioctl_entries;
6079     while (ie->target_cmd != 0) {
6080         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6081             TARGET_IOC_SIZEMASK) {
6082             arg_type = ie->arg_type;
6083             if (arg_type[0] != TYPE_PTR) {
6084                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6085                         ie->target_cmd);
6086                 exit(1);
6087             }
6088             arg_type++;
6089             size = thunk_type_size(arg_type, 0);
6090             ie->target_cmd = (ie->target_cmd &
6091                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6092                 (size << TARGET_IOC_SIZESHIFT);
6093         }
6094 
6095         /* automatic consistency check if same arch */
6096 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6097     (defined(__x86_64__) && defined(TARGET_X86_64))
6098         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6099             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6100                     ie->name, ie->target_cmd, ie->host_cmd);
6101         }
6102 #endif
6103         ie++;
6104     }
6105 }
6106 
6107 #if TARGET_ABI_BITS == 32
6108 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6109 {
6110 #ifdef TARGET_WORDS_BIGENDIAN
6111     return ((uint64_t)word0 << 32) | word1;
6112 #else
6113     return ((uint64_t)word1 << 32) | word0;
6114 #endif
6115 }
6116 #else /* TARGET_ABI_BITS == 32 */
6117 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6118 {
6119     return word0;
6120 }
6121 #endif /* TARGET_ABI_BITS != 32 */
6122 
6123 #ifdef TARGET_NR_truncate64
6124 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6125                                          abi_long arg2,
6126                                          abi_long arg3,
6127                                          abi_long arg4)
6128 {
6129     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6130         arg2 = arg3;
6131         arg3 = arg4;
6132     }
6133     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6134 }
6135 #endif
6136 
6137 #ifdef TARGET_NR_ftruncate64
6138 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6139                                           abi_long arg2,
6140                                           abi_long arg3,
6141                                           abi_long arg4)
6142 {
6143     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6144         arg2 = arg3;
6145         arg3 = arg4;
6146     }
6147     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6148 }
6149 #endif
6150 
6151 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6152                                                abi_ulong target_addr)
6153 {
6154     struct target_timespec *target_ts;
6155 
6156     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6157         return -TARGET_EFAULT;
6158     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6159     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6160     unlock_user_struct(target_ts, target_addr, 0);
6161     return 0;
6162 }
6163 
6164 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6165                                                struct timespec *host_ts)
6166 {
6167     struct target_timespec *target_ts;
6168 
6169     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6170         return -TARGET_EFAULT;
6171     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6172     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6173     unlock_user_struct(target_ts, target_addr, 1);
6174     return 0;
6175 }
6176 
6177 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6178                                                  abi_ulong target_addr)
6179 {
6180     struct target_itimerspec *target_itspec;
6181 
6182     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6183         return -TARGET_EFAULT;
6184     }
6185 
6186     host_itspec->it_interval.tv_sec =
6187                             tswapal(target_itspec->it_interval.tv_sec);
6188     host_itspec->it_interval.tv_nsec =
6189                             tswapal(target_itspec->it_interval.tv_nsec);
6190     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6191     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6192 
6193     unlock_user_struct(target_itspec, target_addr, 1);
6194     return 0;
6195 }
6196 
6197 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6198                                                struct itimerspec *host_its)
6199 {
6200     struct target_itimerspec *target_itspec;
6201 
6202     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6203         return -TARGET_EFAULT;
6204     }
6205 
6206     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6207     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6208 
6209     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6210     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6211 
6212     unlock_user_struct(target_itspec, target_addr, 0);
6213     return 0;
6214 }
6215 
6216 static inline abi_long target_to_host_timex(struct timex *host_tx,
6217                                             abi_long target_addr)
6218 {
6219     struct target_timex *target_tx;
6220 
6221     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6222         return -TARGET_EFAULT;
6223     }
6224 
6225     __get_user(host_tx->modes, &target_tx->modes);
6226     __get_user(host_tx->offset, &target_tx->offset);
6227     __get_user(host_tx->freq, &target_tx->freq);
6228     __get_user(host_tx->maxerror, &target_tx->maxerror);
6229     __get_user(host_tx->esterror, &target_tx->esterror);
6230     __get_user(host_tx->status, &target_tx->status);
6231     __get_user(host_tx->constant, &target_tx->constant);
6232     __get_user(host_tx->precision, &target_tx->precision);
6233     __get_user(host_tx->tolerance, &target_tx->tolerance);
6234     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6235     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6236     __get_user(host_tx->tick, &target_tx->tick);
6237     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6238     __get_user(host_tx->jitter, &target_tx->jitter);
6239     __get_user(host_tx->shift, &target_tx->shift);
6240     __get_user(host_tx->stabil, &target_tx->stabil);
6241     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6242     __get_user(host_tx->calcnt, &target_tx->calcnt);
6243     __get_user(host_tx->errcnt, &target_tx->errcnt);
6244     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6245     __get_user(host_tx->tai, &target_tx->tai);
6246 
6247     unlock_user_struct(target_tx, target_addr, 0);
6248     return 0;
6249 }
6250 
6251 static inline abi_long host_to_target_timex(abi_long target_addr,
6252                                             struct timex *host_tx)
6253 {
6254     struct target_timex *target_tx;
6255 
6256     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6257         return -TARGET_EFAULT;
6258     }
6259 
6260     __put_user(host_tx->modes, &target_tx->modes);
6261     __put_user(host_tx->offset, &target_tx->offset);
6262     __put_user(host_tx->freq, &target_tx->freq);
6263     __put_user(host_tx->maxerror, &target_tx->maxerror);
6264     __put_user(host_tx->esterror, &target_tx->esterror);
6265     __put_user(host_tx->status, &target_tx->status);
6266     __put_user(host_tx->constant, &target_tx->constant);
6267     __put_user(host_tx->precision, &target_tx->precision);
6268     __put_user(host_tx->tolerance, &target_tx->tolerance);
6269     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6270     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6271     __put_user(host_tx->tick, &target_tx->tick);
6272     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6273     __put_user(host_tx->jitter, &target_tx->jitter);
6274     __put_user(host_tx->shift, &target_tx->shift);
6275     __put_user(host_tx->stabil, &target_tx->stabil);
6276     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6277     __put_user(host_tx->calcnt, &target_tx->calcnt);
6278     __put_user(host_tx->errcnt, &target_tx->errcnt);
6279     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6280     __put_user(host_tx->tai, &target_tx->tai);
6281 
6282     unlock_user_struct(target_tx, target_addr, 1);
6283     return 0;
6284 }
6285 
6286 
6287 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6288                                                abi_ulong target_addr)
6289 {
6290     struct target_sigevent *target_sevp;
6291 
6292     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6293         return -TARGET_EFAULT;
6294     }
6295 
6296     /* This union is awkward on 64 bit systems because it has a 32 bit
6297      * integer and a pointer in it; we follow the conversion approach
6298      * used for handling sigval types in signal.c so the guest should get
6299      * the correct value back even if we did a 64 bit byteswap and it's
6300      * using the 32 bit integer.
6301      */
6302     host_sevp->sigev_value.sival_ptr =
6303         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6304     host_sevp->sigev_signo =
6305         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6306     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6307     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6308 
6309     unlock_user_struct(target_sevp, target_addr, 1);
6310     return 0;
6311 }
6312 
6313 #if defined(TARGET_NR_mlockall)
6314 static inline int target_to_host_mlockall_arg(int arg)
6315 {
6316     int result = 0;
6317 
6318     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6319         result |= MCL_CURRENT;
6320     }
6321     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6322         result |= MCL_FUTURE;
6323     }
6324     return result;
6325 }
6326 #endif
6327 
6328 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6329      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6330      defined(TARGET_NR_newfstatat))
6331 static inline abi_long host_to_target_stat64(void *cpu_env,
6332                                              abi_ulong target_addr,
6333                                              struct stat *host_st)
6334 {
6335 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6336     if (((CPUARMState *)cpu_env)->eabi) {
6337         struct target_eabi_stat64 *target_st;
6338 
6339         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6340             return -TARGET_EFAULT;
6341         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6342         __put_user(host_st->st_dev, &target_st->st_dev);
6343         __put_user(host_st->st_ino, &target_st->st_ino);
6344 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6345         __put_user(host_st->st_ino, &target_st->__st_ino);
6346 #endif
6347         __put_user(host_st->st_mode, &target_st->st_mode);
6348         __put_user(host_st->st_nlink, &target_st->st_nlink);
6349         __put_user(host_st->st_uid, &target_st->st_uid);
6350         __put_user(host_st->st_gid, &target_st->st_gid);
6351         __put_user(host_st->st_rdev, &target_st->st_rdev);
6352         __put_user(host_st->st_size, &target_st->st_size);
6353         __put_user(host_st->st_blksize, &target_st->st_blksize);
6354         __put_user(host_st->st_blocks, &target_st->st_blocks);
6355         __put_user(host_st->st_atime, &target_st->target_st_atime);
6356         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6357         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6358         unlock_user_struct(target_st, target_addr, 1);
6359     } else
6360 #endif
6361     {
6362 #if defined(TARGET_HAS_STRUCT_STAT64)
6363         struct target_stat64 *target_st;
6364 #else
6365         struct target_stat *target_st;
6366 #endif
6367 
6368         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6369             return -TARGET_EFAULT;
6370         memset(target_st, 0, sizeof(*target_st));
6371         __put_user(host_st->st_dev, &target_st->st_dev);
6372         __put_user(host_st->st_ino, &target_st->st_ino);
6373 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6374         __put_user(host_st->st_ino, &target_st->__st_ino);
6375 #endif
6376         __put_user(host_st->st_mode, &target_st->st_mode);
6377         __put_user(host_st->st_nlink, &target_st->st_nlink);
6378         __put_user(host_st->st_uid, &target_st->st_uid);
6379         __put_user(host_st->st_gid, &target_st->st_gid);
6380         __put_user(host_st->st_rdev, &target_st->st_rdev);
6381         /* XXX: better use of kernel struct */
6382         __put_user(host_st->st_size, &target_st->st_size);
6383         __put_user(host_st->st_blksize, &target_st->st_blksize);
6384         __put_user(host_st->st_blocks, &target_st->st_blocks);
6385         __put_user(host_st->st_atime, &target_st->target_st_atime);
6386         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6387         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6388         unlock_user_struct(target_st, target_addr, 1);
6389     }
6390 
6391     return 0;
6392 }
6393 #endif
6394 
6395 /* ??? Using host futex calls even when target atomic operations
6396    are not really atomic probably breaks things.  However implementing
6397    futexes locally would make futexes shared between multiple processes
6398    tricky.  However they're probably useless because guest atomic
6399    operations won't work either.  */
6400 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6401                     target_ulong uaddr2, int val3)
6402 {
6403     struct timespec ts, *pts;
6404     int base_op;
6405 
6406     /* ??? We assume FUTEX_* constants are the same on both host
6407        and target.  */
6408 #ifdef FUTEX_CMD_MASK
6409     base_op = op & FUTEX_CMD_MASK;
6410 #else
6411     base_op = op;
6412 #endif
6413     switch (base_op) {
6414     case FUTEX_WAIT:
6415     case FUTEX_WAIT_BITSET:
6416         if (timeout) {
6417             pts = &ts;
6418             target_to_host_timespec(pts, timeout);
6419         } else {
6420             pts = NULL;
6421         }
6422         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6423                          pts, NULL, val3));
6424     case FUTEX_WAKE:
6425         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6426     case FUTEX_FD:
6427         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6428     case FUTEX_REQUEUE:
6429     case FUTEX_CMP_REQUEUE:
6430     case FUTEX_WAKE_OP:
6431         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6432            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6433            But the prototype takes a `struct timespec *'; insert casts
6434            to satisfy the compiler.  We do not need to tswap TIMEOUT
6435            since it's not compared to guest memory.  */
6436         pts = (struct timespec *)(uintptr_t) timeout;
6437         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6438                                     g2h(uaddr2),
6439                                     (base_op == FUTEX_CMP_REQUEUE
6440                                      ? tswap32(val3)
6441                                      : val3)));
6442     default:
6443         return -TARGET_ENOSYS;
6444     }
6445 }
6446 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6447 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6448                                      abi_long handle, abi_long mount_id,
6449                                      abi_long flags)
6450 {
6451     struct file_handle *target_fh;
6452     struct file_handle *fh;
6453     int mid = 0;
6454     abi_long ret;
6455     char *name;
6456     unsigned int size, total_size;
6457 
6458     if (get_user_s32(size, handle)) {
6459         return -TARGET_EFAULT;
6460     }
6461 
6462     name = lock_user_string(pathname);
6463     if (!name) {
6464         return -TARGET_EFAULT;
6465     }
6466 
6467     total_size = sizeof(struct file_handle) + size;
6468     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6469     if (!target_fh) {
6470         unlock_user(name, pathname, 0);
6471         return -TARGET_EFAULT;
6472     }
6473 
6474     fh = g_malloc0(total_size);
6475     fh->handle_bytes = size;
6476 
6477     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6478     unlock_user(name, pathname, 0);
6479 
6480     /* man name_to_handle_at(2):
6481      * Other than the use of the handle_bytes field, the caller should treat
6482      * the file_handle structure as an opaque data type
6483      */
6484 
6485     memcpy(target_fh, fh, total_size);
6486     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6487     target_fh->handle_type = tswap32(fh->handle_type);
6488     g_free(fh);
6489     unlock_user(target_fh, handle, total_size);
6490 
6491     if (put_user_s32(mid, mount_id)) {
6492         return -TARGET_EFAULT;
6493     }
6494 
6495     return ret;
6496 
6497 }
6498 #endif
6499 
6500 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6501 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6502                                      abi_long flags)
6503 {
6504     struct file_handle *target_fh;
6505     struct file_handle *fh;
6506     unsigned int size, total_size;
6507     abi_long ret;
6508 
6509     if (get_user_s32(size, handle)) {
6510         return -TARGET_EFAULT;
6511     }
6512 
6513     total_size = sizeof(struct file_handle) + size;
6514     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6515     if (!target_fh) {
6516         return -TARGET_EFAULT;
6517     }
6518 
6519     fh = g_memdup(target_fh, total_size);
6520     fh->handle_bytes = size;
6521     fh->handle_type = tswap32(target_fh->handle_type);
6522 
6523     ret = get_errno(open_by_handle_at(mount_fd, fh,
6524                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6525 
6526     g_free(fh);
6527 
6528     unlock_user(target_fh, handle, total_size);
6529 
6530     return ret;
6531 }
6532 #endif
6533 
6534 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6535 
6536 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6537 {
6538     int host_flags;
6539     target_sigset_t *target_mask;
6540     sigset_t host_mask;
6541     abi_long ret;
6542 
6543     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6544         return -TARGET_EINVAL;
6545     }
6546     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6547         return -TARGET_EFAULT;
6548     }
6549 
6550     target_to_host_sigset(&host_mask, target_mask);
6551 
6552     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6553 
6554     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6555     if (ret >= 0) {
6556         fd_trans_register(ret, &target_signalfd_trans);
6557     }
6558 
6559     unlock_user_struct(target_mask, mask, 0);
6560 
6561     return ret;
6562 }
6563 #endif
6564 
6565 /* Map host to target signal numbers for the wait family of syscalls.
6566    Assume all other status bits are the same.  */
6567 int host_to_target_waitstatus(int status)
6568 {
6569     if (WIFSIGNALED(status)) {
6570         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6571     }
6572     if (WIFSTOPPED(status)) {
6573         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6574                | (status & 0xff);
6575     }
6576     return status;
6577 }
6578 
6579 static int open_self_cmdline(void *cpu_env, int fd)
6580 {
6581     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6582     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6583     int i;
6584 
6585     for (i = 0; i < bprm->argc; i++) {
6586         size_t len = strlen(bprm->argv[i]) + 1;
6587 
6588         if (write(fd, bprm->argv[i], len) != len) {
6589             return -1;
6590         }
6591     }
6592 
6593     return 0;
6594 }
6595 
6596 static int open_self_maps(void *cpu_env, int fd)
6597 {
6598     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6599     TaskState *ts = cpu->opaque;
6600     FILE *fp;
6601     char *line = NULL;
6602     size_t len = 0;
6603     ssize_t read;
6604 
6605     fp = fopen("/proc/self/maps", "r");
6606     if (fp == NULL) {
6607         return -1;
6608     }
6609 
6610     while ((read = getline(&line, &len, fp)) != -1) {
6611         int fields, dev_maj, dev_min, inode;
6612         uint64_t min, max, offset;
6613         char flag_r, flag_w, flag_x, flag_p;
6614         char path[512] = "";
6615         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6616                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6617                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6618 
6619         if ((fields < 10) || (fields > 11)) {
6620             continue;
6621         }
6622         if (h2g_valid(min)) {
6623             int flags = page_get_flags(h2g(min));
6624             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6625             if (page_check_range(h2g(min), max - min, flags) == -1) {
6626                 continue;
6627             }
6628             if (h2g(min) == ts->info->stack_limit) {
6629                 pstrcpy(path, sizeof(path), "      [stack]");
6630             }
6631             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6632                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6633                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6634                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6635                     path[0] ? "         " : "", path);
6636         }
6637     }
6638 
6639     free(line);
6640     fclose(fp);
6641 
6642     return 0;
6643 }
6644 
6645 static int open_self_stat(void *cpu_env, int fd)
6646 {
6647     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6648     TaskState *ts = cpu->opaque;
6649     abi_ulong start_stack = ts->info->start_stack;
6650     int i;
6651 
6652     for (i = 0; i < 44; i++) {
6653       char buf[128];
6654       int len;
6655       uint64_t val = 0;
6656 
6657       if (i == 0) {
6658         /* pid */
6659         val = getpid();
6660         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6661       } else if (i == 1) {
6662         /* app name */
6663         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6664       } else if (i == 27) {
6665         /* stack bottom */
6666         val = start_stack;
6667         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6668       } else {
6669         /* for the rest, there is MasterCard */
6670         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6671       }
6672 
6673       len = strlen(buf);
6674       if (write(fd, buf, len) != len) {
6675           return -1;
6676       }
6677     }
6678 
6679     return 0;
6680 }
6681 
6682 static int open_self_auxv(void *cpu_env, int fd)
6683 {
6684     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6685     TaskState *ts = cpu->opaque;
6686     abi_ulong auxv = ts->info->saved_auxv;
6687     abi_ulong len = ts->info->auxv_len;
6688     char *ptr;
6689 
6690     /*
6691      * Auxiliary vector is stored in target process stack.
6692      * read in whole auxv vector and copy it to file
6693      */
6694     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6695     if (ptr != NULL) {
6696         while (len > 0) {
6697             ssize_t r;
6698             r = write(fd, ptr, len);
6699             if (r <= 0) {
6700                 break;
6701             }
6702             len -= r;
6703             ptr += r;
6704         }
6705         lseek(fd, 0, SEEK_SET);
6706         unlock_user(ptr, auxv, len);
6707     }
6708 
6709     return 0;
6710 }
6711 
6712 static int is_proc_myself(const char *filename, const char *entry)
6713 {
6714     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6715         filename += strlen("/proc/");
6716         if (!strncmp(filename, "self/", strlen("self/"))) {
6717             filename += strlen("self/");
6718         } else if (*filename >= '1' && *filename <= '9') {
6719             char myself[80];
6720             snprintf(myself, sizeof(myself), "%d/", getpid());
6721             if (!strncmp(filename, myself, strlen(myself))) {
6722                 filename += strlen(myself);
6723             } else {
6724                 return 0;
6725             }
6726         } else {
6727             return 0;
6728         }
6729         if (!strcmp(filename, entry)) {
6730             return 1;
6731         }
6732     }
6733     return 0;
6734 }
6735 
6736 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6737 static int is_proc(const char *filename, const char *entry)
6738 {
6739     return strcmp(filename, entry) == 0;
6740 }
6741 
6742 static int open_net_route(void *cpu_env, int fd)
6743 {
6744     FILE *fp;
6745     char *line = NULL;
6746     size_t len = 0;
6747     ssize_t read;
6748 
6749     fp = fopen("/proc/net/route", "r");
6750     if (fp == NULL) {
6751         return -1;
6752     }
6753 
6754     /* read header */
6755 
6756     read = getline(&line, &len, fp);
6757     dprintf(fd, "%s", line);
6758 
6759     /* read routes */
6760 
6761     while ((read = getline(&line, &len, fp)) != -1) {
6762         char iface[16];
6763         uint32_t dest, gw, mask;
6764         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6765         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6766                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6767                      &mask, &mtu, &window, &irtt);
6768         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6769                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6770                 metric, tswap32(mask), mtu, window, irtt);
6771     }
6772 
6773     free(line);
6774     fclose(fp);
6775 
6776     return 0;
6777 }
6778 #endif
6779 
6780 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6781 {
6782     struct fake_open {
6783         const char *filename;
6784         int (*fill)(void *cpu_env, int fd);
6785         int (*cmp)(const char *s1, const char *s2);
6786     };
6787     const struct fake_open *fake_open;
6788     static const struct fake_open fakes[] = {
6789         { "maps", open_self_maps, is_proc_myself },
6790         { "stat", open_self_stat, is_proc_myself },
6791         { "auxv", open_self_auxv, is_proc_myself },
6792         { "cmdline", open_self_cmdline, is_proc_myself },
6793 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6794         { "/proc/net/route", open_net_route, is_proc },
6795 #endif
6796         { NULL, NULL, NULL }
6797     };
6798 
6799     if (is_proc_myself(pathname, "exe")) {
6800         int execfd = qemu_getauxval(AT_EXECFD);
6801         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6802     }
6803 
6804     for (fake_open = fakes; fake_open->filename; fake_open++) {
6805         if (fake_open->cmp(pathname, fake_open->filename)) {
6806             break;
6807         }
6808     }
6809 
6810     if (fake_open->filename) {
6811         const char *tmpdir;
6812         char filename[PATH_MAX];
6813         int fd, r;
6814 
6815         /* create temporary file to map stat to */
6816         tmpdir = getenv("TMPDIR");
6817         if (!tmpdir)
6818             tmpdir = "/tmp";
6819         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6820         fd = mkstemp(filename);
6821         if (fd < 0) {
6822             return fd;
6823         }
6824         unlink(filename);
6825 
6826         if ((r = fake_open->fill(cpu_env, fd))) {
6827             int e = errno;
6828             close(fd);
6829             errno = e;
6830             return r;
6831         }
6832         lseek(fd, 0, SEEK_SET);
6833 
6834         return fd;
6835     }
6836 
6837     return safe_openat(dirfd, path(pathname), flags, mode);
6838 }
6839 
6840 #define TIMER_MAGIC 0x0caf0000
6841 #define TIMER_MAGIC_MASK 0xffff0000
6842 
6843 /* Convert QEMU provided timer ID back to internal 16bit index format */
6844 static target_timer_t get_timer_id(abi_long arg)
6845 {
6846     target_timer_t timerid = arg;
6847 
6848     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6849         return -TARGET_EINVAL;
6850     }
6851 
6852     timerid &= 0xffff;
6853 
6854     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6855         return -TARGET_EINVAL;
6856     }
6857 
6858     return timerid;
6859 }
6860 
6861 static int target_to_host_cpu_mask(unsigned long *host_mask,
6862                                    size_t host_size,
6863                                    abi_ulong target_addr,
6864                                    size_t target_size)
6865 {
6866     unsigned target_bits = sizeof(abi_ulong) * 8;
6867     unsigned host_bits = sizeof(*host_mask) * 8;
6868     abi_ulong *target_mask;
6869     unsigned i, j;
6870 
6871     assert(host_size >= target_size);
6872 
6873     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6874     if (!target_mask) {
6875         return -TARGET_EFAULT;
6876     }
6877     memset(host_mask, 0, host_size);
6878 
6879     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6880         unsigned bit = i * target_bits;
6881         abi_ulong val;
6882 
6883         __get_user(val, &target_mask[i]);
6884         for (j = 0; j < target_bits; j++, bit++) {
6885             if (val & (1UL << j)) {
6886                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6887             }
6888         }
6889     }
6890 
6891     unlock_user(target_mask, target_addr, 0);
6892     return 0;
6893 }
6894 
6895 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6896                                    size_t host_size,
6897                                    abi_ulong target_addr,
6898                                    size_t target_size)
6899 {
6900     unsigned target_bits = sizeof(abi_ulong) * 8;
6901     unsigned host_bits = sizeof(*host_mask) * 8;
6902     abi_ulong *target_mask;
6903     unsigned i, j;
6904 
6905     assert(host_size >= target_size);
6906 
6907     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6908     if (!target_mask) {
6909         return -TARGET_EFAULT;
6910     }
6911 
6912     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6913         unsigned bit = i * target_bits;
6914         abi_ulong val = 0;
6915 
6916         for (j = 0; j < target_bits; j++, bit++) {
6917             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6918                 val |= 1UL << j;
6919             }
6920         }
6921         __put_user(val, &target_mask[i]);
6922     }
6923 
6924     unlock_user(target_mask, target_addr, target_size);
6925     return 0;
6926 }
6927 
6928 /* This is an internal helper for do_syscall so that it is easier
6929  * to have a single return point, so that actions, such as logging
6930  * of syscall results, can be performed.
6931  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6932  */
6933 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6934                             abi_long arg2, abi_long arg3, abi_long arg4,
6935                             abi_long arg5, abi_long arg6, abi_long arg7,
6936                             abi_long arg8)
6937 {
6938     CPUState *cpu = ENV_GET_CPU(cpu_env);
6939     abi_long ret;
6940 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6941     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6942     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6943     struct stat st;
6944 #endif
6945 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6946     || defined(TARGET_NR_fstatfs)
6947     struct statfs stfs;
6948 #endif
6949     void *p;
6950 
6951     switch(num) {
6952     case TARGET_NR_exit:
6953         /* In old applications this may be used to implement _exit(2).
6954            However in threaded applictions it is used for thread termination,
6955            and _exit_group is used for application termination.
6956            Do thread termination if we have more then one thread.  */
6957 
6958         if (block_signals()) {
6959             return -TARGET_ERESTARTSYS;
6960         }
6961 
6962         cpu_list_lock();
6963 
6964         if (CPU_NEXT(first_cpu)) {
6965             TaskState *ts;
6966 
6967             /* Remove the CPU from the list.  */
6968             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6969 
6970             cpu_list_unlock();
6971 
6972             ts = cpu->opaque;
6973             if (ts->child_tidptr) {
6974                 put_user_u32(0, ts->child_tidptr);
6975                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6976                           NULL, NULL, 0);
6977             }
6978             thread_cpu = NULL;
6979             object_unref(OBJECT(cpu));
6980             g_free(ts);
6981             rcu_unregister_thread();
6982             pthread_exit(NULL);
6983         }
6984 
6985         cpu_list_unlock();
6986         preexit_cleanup(cpu_env, arg1);
6987         _exit(arg1);
6988         return 0; /* avoid warning */
6989     case TARGET_NR_read:
6990         if (arg3 == 0) {
6991             return 0;
6992         } else {
6993             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6994                 return -TARGET_EFAULT;
6995             ret = get_errno(safe_read(arg1, p, arg3));
6996             if (ret >= 0 &&
6997                 fd_trans_host_to_target_data(arg1)) {
6998                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6999             }
7000             unlock_user(p, arg2, ret);
7001         }
7002         return ret;
7003     case TARGET_NR_write:
7004         if (arg2 == 0 && arg3 == 0) {
7005             return get_errno(safe_write(arg1, 0, 0));
7006         }
7007         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7008             return -TARGET_EFAULT;
7009         if (fd_trans_target_to_host_data(arg1)) {
7010             void *copy = g_malloc(arg3);
7011             memcpy(copy, p, arg3);
7012             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7013             if (ret >= 0) {
7014                 ret = get_errno(safe_write(arg1, copy, ret));
7015             }
7016             g_free(copy);
7017         } else {
7018             ret = get_errno(safe_write(arg1, p, arg3));
7019         }
7020         unlock_user(p, arg2, 0);
7021         return ret;
7022 
7023 #ifdef TARGET_NR_open
7024     case TARGET_NR_open:
7025         if (!(p = lock_user_string(arg1)))
7026             return -TARGET_EFAULT;
7027         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7028                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7029                                   arg3));
7030         fd_trans_unregister(ret);
7031         unlock_user(p, arg1, 0);
7032         return ret;
7033 #endif
7034     case TARGET_NR_openat:
7035         if (!(p = lock_user_string(arg2)))
7036             return -TARGET_EFAULT;
7037         ret = get_errno(do_openat(cpu_env, arg1, p,
7038                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7039                                   arg4));
7040         fd_trans_unregister(ret);
7041         unlock_user(p, arg2, 0);
7042         return ret;
7043 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7044     case TARGET_NR_name_to_handle_at:
7045         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7046         return ret;
7047 #endif
7048 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7049     case TARGET_NR_open_by_handle_at:
7050         ret = do_open_by_handle_at(arg1, arg2, arg3);
7051         fd_trans_unregister(ret);
7052         return ret;
7053 #endif
7054     case TARGET_NR_close:
7055         fd_trans_unregister(arg1);
7056         return get_errno(close(arg1));
7057 
7058     case TARGET_NR_brk:
7059         return do_brk(arg1);
7060 #ifdef TARGET_NR_fork
7061     case TARGET_NR_fork:
7062         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7063 #endif
7064 #ifdef TARGET_NR_waitpid
7065     case TARGET_NR_waitpid:
7066         {
7067             int status;
7068             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7069             if (!is_error(ret) && arg2 && ret
7070                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7071                 return -TARGET_EFAULT;
7072         }
7073         return ret;
7074 #endif
7075 #ifdef TARGET_NR_waitid
7076     case TARGET_NR_waitid:
7077         {
7078             siginfo_t info;
7079             info.si_pid = 0;
7080             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7081             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7082                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7083                     return -TARGET_EFAULT;
7084                 host_to_target_siginfo(p, &info);
7085                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7086             }
7087         }
7088         return ret;
7089 #endif
7090 #ifdef TARGET_NR_creat /* not on alpha */
7091     case TARGET_NR_creat:
7092         if (!(p = lock_user_string(arg1)))
7093             return -TARGET_EFAULT;
7094         ret = get_errno(creat(p, arg2));
7095         fd_trans_unregister(ret);
7096         unlock_user(p, arg1, 0);
7097         return ret;
7098 #endif
7099 #ifdef TARGET_NR_link
7100     case TARGET_NR_link:
7101         {
7102             void * p2;
7103             p = lock_user_string(arg1);
7104             p2 = lock_user_string(arg2);
7105             if (!p || !p2)
7106                 ret = -TARGET_EFAULT;
7107             else
7108                 ret = get_errno(link(p, p2));
7109             unlock_user(p2, arg2, 0);
7110             unlock_user(p, arg1, 0);
7111         }
7112         return ret;
7113 #endif
7114 #if defined(TARGET_NR_linkat)
7115     case TARGET_NR_linkat:
7116         {
7117             void * p2 = NULL;
7118             if (!arg2 || !arg4)
7119                 return -TARGET_EFAULT;
7120             p  = lock_user_string(arg2);
7121             p2 = lock_user_string(arg4);
7122             if (!p || !p2)
7123                 ret = -TARGET_EFAULT;
7124             else
7125                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7126             unlock_user(p, arg2, 0);
7127             unlock_user(p2, arg4, 0);
7128         }
7129         return ret;
7130 #endif
7131 #ifdef TARGET_NR_unlink
7132     case TARGET_NR_unlink:
7133         if (!(p = lock_user_string(arg1)))
7134             return -TARGET_EFAULT;
7135         ret = get_errno(unlink(p));
7136         unlock_user(p, arg1, 0);
7137         return ret;
7138 #endif
7139 #if defined(TARGET_NR_unlinkat)
7140     case TARGET_NR_unlinkat:
7141         if (!(p = lock_user_string(arg2)))
7142             return -TARGET_EFAULT;
7143         ret = get_errno(unlinkat(arg1, p, arg3));
7144         unlock_user(p, arg2, 0);
7145         return ret;
7146 #endif
7147     case TARGET_NR_execve:
7148         {
7149             char **argp, **envp;
7150             int argc, envc;
7151             abi_ulong gp;
7152             abi_ulong guest_argp;
7153             abi_ulong guest_envp;
7154             abi_ulong addr;
7155             char **q;
7156             int total_size = 0;
7157 
7158             argc = 0;
7159             guest_argp = arg2;
7160             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7161                 if (get_user_ual(addr, gp))
7162                     return -TARGET_EFAULT;
7163                 if (!addr)
7164                     break;
7165                 argc++;
7166             }
7167             envc = 0;
7168             guest_envp = arg3;
7169             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7170                 if (get_user_ual(addr, gp))
7171                     return -TARGET_EFAULT;
7172                 if (!addr)
7173                     break;
7174                 envc++;
7175             }
7176 
7177             argp = g_new0(char *, argc + 1);
7178             envp = g_new0(char *, envc + 1);
7179 
7180             for (gp = guest_argp, q = argp; gp;
7181                   gp += sizeof(abi_ulong), q++) {
7182                 if (get_user_ual(addr, gp))
7183                     goto execve_efault;
7184                 if (!addr)
7185                     break;
7186                 if (!(*q = lock_user_string(addr)))
7187                     goto execve_efault;
7188                 total_size += strlen(*q) + 1;
7189             }
7190             *q = NULL;
7191 
7192             for (gp = guest_envp, q = envp; gp;
7193                   gp += sizeof(abi_ulong), q++) {
7194                 if (get_user_ual(addr, gp))
7195                     goto execve_efault;
7196                 if (!addr)
7197                     break;
7198                 if (!(*q = lock_user_string(addr)))
7199                     goto execve_efault;
7200                 total_size += strlen(*q) + 1;
7201             }
7202             *q = NULL;
7203 
7204             if (!(p = lock_user_string(arg1)))
7205                 goto execve_efault;
7206             /* Although execve() is not an interruptible syscall it is
7207              * a special case where we must use the safe_syscall wrapper:
7208              * if we allow a signal to happen before we make the host
7209              * syscall then we will 'lose' it, because at the point of
7210              * execve the process leaves QEMU's control. So we use the
7211              * safe syscall wrapper to ensure that we either take the
7212              * signal as a guest signal, or else it does not happen
7213              * before the execve completes and makes it the other
7214              * program's problem.
7215              */
7216             ret = get_errno(safe_execve(p, argp, envp));
7217             unlock_user(p, arg1, 0);
7218 
7219             goto execve_end;
7220 
7221         execve_efault:
7222             ret = -TARGET_EFAULT;
7223 
7224         execve_end:
7225             for (gp = guest_argp, q = argp; *q;
7226                   gp += sizeof(abi_ulong), q++) {
7227                 if (get_user_ual(addr, gp)
7228                     || !addr)
7229                     break;
7230                 unlock_user(*q, addr, 0);
7231             }
7232             for (gp = guest_envp, q = envp; *q;
7233                   gp += sizeof(abi_ulong), q++) {
7234                 if (get_user_ual(addr, gp)
7235                     || !addr)
7236                     break;
7237                 unlock_user(*q, addr, 0);
7238             }
7239 
7240             g_free(argp);
7241             g_free(envp);
7242         }
7243         return ret;
7244     case TARGET_NR_chdir:
7245         if (!(p = lock_user_string(arg1)))
7246             return -TARGET_EFAULT;
7247         ret = get_errno(chdir(p));
7248         unlock_user(p, arg1, 0);
7249         return ret;
7250 #ifdef TARGET_NR_time
7251     case TARGET_NR_time:
7252         {
7253             time_t host_time;
7254             ret = get_errno(time(&host_time));
7255             if (!is_error(ret)
7256                 && arg1
7257                 && put_user_sal(host_time, arg1))
7258                 return -TARGET_EFAULT;
7259         }
7260         return ret;
7261 #endif
7262 #ifdef TARGET_NR_mknod
7263     case TARGET_NR_mknod:
7264         if (!(p = lock_user_string(arg1)))
7265             return -TARGET_EFAULT;
7266         ret = get_errno(mknod(p, arg2, arg3));
7267         unlock_user(p, arg1, 0);
7268         return ret;
7269 #endif
7270 #if defined(TARGET_NR_mknodat)
7271     case TARGET_NR_mknodat:
7272         if (!(p = lock_user_string(arg2)))
7273             return -TARGET_EFAULT;
7274         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7275         unlock_user(p, arg2, 0);
7276         return ret;
7277 #endif
7278 #ifdef TARGET_NR_chmod
7279     case TARGET_NR_chmod:
7280         if (!(p = lock_user_string(arg1)))
7281             return -TARGET_EFAULT;
7282         ret = get_errno(chmod(p, arg2));
7283         unlock_user(p, arg1, 0);
7284         return ret;
7285 #endif
7286 #ifdef TARGET_NR_lseek
7287     case TARGET_NR_lseek:
7288         return get_errno(lseek(arg1, arg2, arg3));
7289 #endif
7290 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7291     /* Alpha specific */
7292     case TARGET_NR_getxpid:
7293         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7294         return get_errno(getpid());
7295 #endif
7296 #ifdef TARGET_NR_getpid
7297     case TARGET_NR_getpid:
7298         return get_errno(getpid());
7299 #endif
7300     case TARGET_NR_mount:
7301         {
7302             /* need to look at the data field */
7303             void *p2, *p3;
7304 
7305             if (arg1) {
7306                 p = lock_user_string(arg1);
7307                 if (!p) {
7308                     return -TARGET_EFAULT;
7309                 }
7310             } else {
7311                 p = NULL;
7312             }
7313 
7314             p2 = lock_user_string(arg2);
7315             if (!p2) {
7316                 if (arg1) {
7317                     unlock_user(p, arg1, 0);
7318                 }
7319                 return -TARGET_EFAULT;
7320             }
7321 
7322             if (arg3) {
7323                 p3 = lock_user_string(arg3);
7324                 if (!p3) {
7325                     if (arg1) {
7326                         unlock_user(p, arg1, 0);
7327                     }
7328                     unlock_user(p2, arg2, 0);
7329                     return -TARGET_EFAULT;
7330                 }
7331             } else {
7332                 p3 = NULL;
7333             }
7334 
7335             /* FIXME - arg5 should be locked, but it isn't clear how to
7336              * do that since it's not guaranteed to be a NULL-terminated
7337              * string.
7338              */
7339             if (!arg5) {
7340                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7341             } else {
7342                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7343             }
7344             ret = get_errno(ret);
7345 
7346             if (arg1) {
7347                 unlock_user(p, arg1, 0);
7348             }
7349             unlock_user(p2, arg2, 0);
7350             if (arg3) {
7351                 unlock_user(p3, arg3, 0);
7352             }
7353         }
7354         return ret;
7355 #ifdef TARGET_NR_umount
7356     case TARGET_NR_umount:
7357         if (!(p = lock_user_string(arg1)))
7358             return -TARGET_EFAULT;
7359         ret = get_errno(umount(p));
7360         unlock_user(p, arg1, 0);
7361         return ret;
7362 #endif
7363 #ifdef TARGET_NR_stime /* not on alpha */
7364     case TARGET_NR_stime:
7365         {
7366             time_t host_time;
7367             if (get_user_sal(host_time, arg1))
7368                 return -TARGET_EFAULT;
7369             return get_errno(stime(&host_time));
7370         }
7371 #endif
7372 #ifdef TARGET_NR_alarm /* not on alpha */
7373     case TARGET_NR_alarm:
7374         return alarm(arg1);
7375 #endif
7376 #ifdef TARGET_NR_pause /* not on alpha */
7377     case TARGET_NR_pause:
7378         if (!block_signals()) {
7379             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7380         }
7381         return -TARGET_EINTR;
7382 #endif
7383 #ifdef TARGET_NR_utime
7384     case TARGET_NR_utime:
7385         {
7386             struct utimbuf tbuf, *host_tbuf;
7387             struct target_utimbuf *target_tbuf;
7388             if (arg2) {
7389                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7390                     return -TARGET_EFAULT;
7391                 tbuf.actime = tswapal(target_tbuf->actime);
7392                 tbuf.modtime = tswapal(target_tbuf->modtime);
7393                 unlock_user_struct(target_tbuf, arg2, 0);
7394                 host_tbuf = &tbuf;
7395             } else {
7396                 host_tbuf = NULL;
7397             }
7398             if (!(p = lock_user_string(arg1)))
7399                 return -TARGET_EFAULT;
7400             ret = get_errno(utime(p, host_tbuf));
7401             unlock_user(p, arg1, 0);
7402         }
7403         return ret;
7404 #endif
7405 #ifdef TARGET_NR_utimes
7406     case TARGET_NR_utimes:
7407         {
7408             struct timeval *tvp, tv[2];
7409             if (arg2) {
7410                 if (copy_from_user_timeval(&tv[0], arg2)
7411                     || copy_from_user_timeval(&tv[1],
7412                                               arg2 + sizeof(struct target_timeval)))
7413                     return -TARGET_EFAULT;
7414                 tvp = tv;
7415             } else {
7416                 tvp = NULL;
7417             }
7418             if (!(p = lock_user_string(arg1)))
7419                 return -TARGET_EFAULT;
7420             ret = get_errno(utimes(p, tvp));
7421             unlock_user(p, arg1, 0);
7422         }
7423         return ret;
7424 #endif
7425 #if defined(TARGET_NR_futimesat)
7426     case TARGET_NR_futimesat:
7427         {
7428             struct timeval *tvp, tv[2];
7429             if (arg3) {
7430                 if (copy_from_user_timeval(&tv[0], arg3)
7431                     || copy_from_user_timeval(&tv[1],
7432                                               arg3 + sizeof(struct target_timeval)))
7433                     return -TARGET_EFAULT;
7434                 tvp = tv;
7435             } else {
7436                 tvp = NULL;
7437             }
7438             if (!(p = lock_user_string(arg2))) {
7439                 return -TARGET_EFAULT;
7440             }
7441             ret = get_errno(futimesat(arg1, path(p), tvp));
7442             unlock_user(p, arg2, 0);
7443         }
7444         return ret;
7445 #endif
7446 #ifdef TARGET_NR_access
7447     case TARGET_NR_access:
7448         if (!(p = lock_user_string(arg1))) {
7449             return -TARGET_EFAULT;
7450         }
7451         ret = get_errno(access(path(p), arg2));
7452         unlock_user(p, arg1, 0);
7453         return ret;
7454 #endif
7455 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7456     case TARGET_NR_faccessat:
7457         if (!(p = lock_user_string(arg2))) {
7458             return -TARGET_EFAULT;
7459         }
7460         ret = get_errno(faccessat(arg1, p, arg3, 0));
7461         unlock_user(p, arg2, 0);
7462         return ret;
7463 #endif
7464 #ifdef TARGET_NR_nice /* not on alpha */
7465     case TARGET_NR_nice:
7466         return get_errno(nice(arg1));
7467 #endif
7468     case TARGET_NR_sync:
7469         sync();
7470         return 0;
7471 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7472     case TARGET_NR_syncfs:
7473         return get_errno(syncfs(arg1));
7474 #endif
7475     case TARGET_NR_kill:
7476         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7477 #ifdef TARGET_NR_rename
7478     case TARGET_NR_rename:
7479         {
7480             void *p2;
7481             p = lock_user_string(arg1);
7482             p2 = lock_user_string(arg2);
7483             if (!p || !p2)
7484                 ret = -TARGET_EFAULT;
7485             else
7486                 ret = get_errno(rename(p, p2));
7487             unlock_user(p2, arg2, 0);
7488             unlock_user(p, arg1, 0);
7489         }
7490         return ret;
7491 #endif
7492 #if defined(TARGET_NR_renameat)
7493     case TARGET_NR_renameat:
7494         {
7495             void *p2;
7496             p  = lock_user_string(arg2);
7497             p2 = lock_user_string(arg4);
7498             if (!p || !p2)
7499                 ret = -TARGET_EFAULT;
7500             else
7501                 ret = get_errno(renameat(arg1, p, arg3, p2));
7502             unlock_user(p2, arg4, 0);
7503             unlock_user(p, arg2, 0);
7504         }
7505         return ret;
7506 #endif
7507 #if defined(TARGET_NR_renameat2)
7508     case TARGET_NR_renameat2:
7509         {
7510             void *p2;
7511             p  = lock_user_string(arg2);
7512             p2 = lock_user_string(arg4);
7513             if (!p || !p2) {
7514                 ret = -TARGET_EFAULT;
7515             } else {
7516                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7517             }
7518             unlock_user(p2, arg4, 0);
7519             unlock_user(p, arg2, 0);
7520         }
7521         return ret;
7522 #endif
7523 #ifdef TARGET_NR_mkdir
7524     case TARGET_NR_mkdir:
7525         if (!(p = lock_user_string(arg1)))
7526             return -TARGET_EFAULT;
7527         ret = get_errno(mkdir(p, arg2));
7528         unlock_user(p, arg1, 0);
7529         return ret;
7530 #endif
7531 #if defined(TARGET_NR_mkdirat)
7532     case TARGET_NR_mkdirat:
7533         if (!(p = lock_user_string(arg2)))
7534             return -TARGET_EFAULT;
7535         ret = get_errno(mkdirat(arg1, p, arg3));
7536         unlock_user(p, arg2, 0);
7537         return ret;
7538 #endif
7539 #ifdef TARGET_NR_rmdir
7540     case TARGET_NR_rmdir:
7541         if (!(p = lock_user_string(arg1)))
7542             return -TARGET_EFAULT;
7543         ret = get_errno(rmdir(p));
7544         unlock_user(p, arg1, 0);
7545         return ret;
7546 #endif
7547     case TARGET_NR_dup:
7548         ret = get_errno(dup(arg1));
7549         if (ret >= 0) {
7550             fd_trans_dup(arg1, ret);
7551         }
7552         return ret;
7553 #ifdef TARGET_NR_pipe
7554     case TARGET_NR_pipe:
7555         return do_pipe(cpu_env, arg1, 0, 0);
7556 #endif
7557 #ifdef TARGET_NR_pipe2
7558     case TARGET_NR_pipe2:
7559         return do_pipe(cpu_env, arg1,
7560                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7561 #endif
7562     case TARGET_NR_times:
7563         {
7564             struct target_tms *tmsp;
7565             struct tms tms;
7566             ret = get_errno(times(&tms));
7567             if (arg1) {
7568                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7569                 if (!tmsp)
7570                     return -TARGET_EFAULT;
7571                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7572                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7573                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7574                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7575             }
7576             if (!is_error(ret))
7577                 ret = host_to_target_clock_t(ret);
7578         }
7579         return ret;
7580     case TARGET_NR_acct:
7581         if (arg1 == 0) {
7582             ret = get_errno(acct(NULL));
7583         } else {
7584             if (!(p = lock_user_string(arg1))) {
7585                 return -TARGET_EFAULT;
7586             }
7587             ret = get_errno(acct(path(p)));
7588             unlock_user(p, arg1, 0);
7589         }
7590         return ret;
7591 #ifdef TARGET_NR_umount2
7592     case TARGET_NR_umount2:
7593         if (!(p = lock_user_string(arg1)))
7594             return -TARGET_EFAULT;
7595         ret = get_errno(umount2(p, arg2));
7596         unlock_user(p, arg1, 0);
7597         return ret;
7598 #endif
7599     case TARGET_NR_ioctl:
7600         return do_ioctl(arg1, arg2, arg3);
7601 #ifdef TARGET_NR_fcntl
7602     case TARGET_NR_fcntl:
7603         return do_fcntl(arg1, arg2, arg3);
7604 #endif
7605     case TARGET_NR_setpgid:
7606         return get_errno(setpgid(arg1, arg2));
7607     case TARGET_NR_umask:
7608         return get_errno(umask(arg1));
7609     case TARGET_NR_chroot:
7610         if (!(p = lock_user_string(arg1)))
7611             return -TARGET_EFAULT;
7612         ret = get_errno(chroot(p));
7613         unlock_user(p, arg1, 0);
7614         return ret;
7615 #ifdef TARGET_NR_dup2
7616     case TARGET_NR_dup2:
7617         ret = get_errno(dup2(arg1, arg2));
7618         if (ret >= 0) {
7619             fd_trans_dup(arg1, arg2);
7620         }
7621         return ret;
7622 #endif
7623 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7624     case TARGET_NR_dup3:
7625     {
7626         int host_flags;
7627 
7628         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7629             return -EINVAL;
7630         }
7631         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7632         ret = get_errno(dup3(arg1, arg2, host_flags));
7633         if (ret >= 0) {
7634             fd_trans_dup(arg1, arg2);
7635         }
7636         return ret;
7637     }
7638 #endif
7639 #ifdef TARGET_NR_getppid /* not on alpha */
7640     case TARGET_NR_getppid:
7641         return get_errno(getppid());
7642 #endif
7643 #ifdef TARGET_NR_getpgrp
7644     case TARGET_NR_getpgrp:
7645         return get_errno(getpgrp());
7646 #endif
7647     case TARGET_NR_setsid:
7648         return get_errno(setsid());
7649 #ifdef TARGET_NR_sigaction
7650     case TARGET_NR_sigaction:
7651         {
7652 #if defined(TARGET_ALPHA)
7653             struct target_sigaction act, oact, *pact = 0;
7654             struct target_old_sigaction *old_act;
7655             if (arg2) {
7656                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7657                     return -TARGET_EFAULT;
7658                 act._sa_handler = old_act->_sa_handler;
7659                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7660                 act.sa_flags = old_act->sa_flags;
7661                 act.sa_restorer = 0;
7662                 unlock_user_struct(old_act, arg2, 0);
7663                 pact = &act;
7664             }
7665             ret = get_errno(do_sigaction(arg1, pact, &oact));
7666             if (!is_error(ret) && arg3) {
7667                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7668                     return -TARGET_EFAULT;
7669                 old_act->_sa_handler = oact._sa_handler;
7670                 old_act->sa_mask = oact.sa_mask.sig[0];
7671                 old_act->sa_flags = oact.sa_flags;
7672                 unlock_user_struct(old_act, arg3, 1);
7673             }
7674 #elif defined(TARGET_MIPS)
7675 	    struct target_sigaction act, oact, *pact, *old_act;
7676 
7677 	    if (arg2) {
7678                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7679                     return -TARGET_EFAULT;
7680 		act._sa_handler = old_act->_sa_handler;
7681 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7682 		act.sa_flags = old_act->sa_flags;
7683 		unlock_user_struct(old_act, arg2, 0);
7684 		pact = &act;
7685 	    } else {
7686 		pact = NULL;
7687 	    }
7688 
7689 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7690 
7691 	    if (!is_error(ret) && arg3) {
7692                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7693                     return -TARGET_EFAULT;
7694 		old_act->_sa_handler = oact._sa_handler;
7695 		old_act->sa_flags = oact.sa_flags;
7696 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7697 		old_act->sa_mask.sig[1] = 0;
7698 		old_act->sa_mask.sig[2] = 0;
7699 		old_act->sa_mask.sig[3] = 0;
7700 		unlock_user_struct(old_act, arg3, 1);
7701 	    }
7702 #else
7703             struct target_old_sigaction *old_act;
7704             struct target_sigaction act, oact, *pact;
7705             if (arg2) {
7706                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7707                     return -TARGET_EFAULT;
7708                 act._sa_handler = old_act->_sa_handler;
7709                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7710                 act.sa_flags = old_act->sa_flags;
7711                 act.sa_restorer = old_act->sa_restorer;
7712 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7713                 act.ka_restorer = 0;
7714 #endif
7715                 unlock_user_struct(old_act, arg2, 0);
7716                 pact = &act;
7717             } else {
7718                 pact = NULL;
7719             }
7720             ret = get_errno(do_sigaction(arg1, pact, &oact));
7721             if (!is_error(ret) && arg3) {
7722                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7723                     return -TARGET_EFAULT;
7724                 old_act->_sa_handler = oact._sa_handler;
7725                 old_act->sa_mask = oact.sa_mask.sig[0];
7726                 old_act->sa_flags = oact.sa_flags;
7727                 old_act->sa_restorer = oact.sa_restorer;
7728                 unlock_user_struct(old_act, arg3, 1);
7729             }
7730 #endif
7731         }
7732         return ret;
7733 #endif
7734     case TARGET_NR_rt_sigaction:
7735         {
7736 #if defined(TARGET_ALPHA)
7737             /* For Alpha and SPARC this is a 5 argument syscall, with
7738              * a 'restorer' parameter which must be copied into the
7739              * sa_restorer field of the sigaction struct.
7740              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7741              * and arg5 is the sigsetsize.
7742              * Alpha also has a separate rt_sigaction struct that it uses
7743              * here; SPARC uses the usual sigaction struct.
7744              */
7745             struct target_rt_sigaction *rt_act;
7746             struct target_sigaction act, oact, *pact = 0;
7747 
7748             if (arg4 != sizeof(target_sigset_t)) {
7749                 return -TARGET_EINVAL;
7750             }
7751             if (arg2) {
7752                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7753                     return -TARGET_EFAULT;
7754                 act._sa_handler = rt_act->_sa_handler;
7755                 act.sa_mask = rt_act->sa_mask;
7756                 act.sa_flags = rt_act->sa_flags;
7757                 act.sa_restorer = arg5;
7758                 unlock_user_struct(rt_act, arg2, 0);
7759                 pact = &act;
7760             }
7761             ret = get_errno(do_sigaction(arg1, pact, &oact));
7762             if (!is_error(ret) && arg3) {
7763                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7764                     return -TARGET_EFAULT;
7765                 rt_act->_sa_handler = oact._sa_handler;
7766                 rt_act->sa_mask = oact.sa_mask;
7767                 rt_act->sa_flags = oact.sa_flags;
7768                 unlock_user_struct(rt_act, arg3, 1);
7769             }
7770 #else
7771 #ifdef TARGET_SPARC
7772             target_ulong restorer = arg4;
7773             target_ulong sigsetsize = arg5;
7774 #else
7775             target_ulong sigsetsize = arg4;
7776 #endif
7777             struct target_sigaction *act;
7778             struct target_sigaction *oact;
7779 
7780             if (sigsetsize != sizeof(target_sigset_t)) {
7781                 return -TARGET_EINVAL;
7782             }
7783             if (arg2) {
7784                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7785                     return -TARGET_EFAULT;
7786                 }
7787 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7788                 act->ka_restorer = restorer;
7789 #endif
7790             } else {
7791                 act = NULL;
7792             }
7793             if (arg3) {
7794                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7795                     ret = -TARGET_EFAULT;
7796                     goto rt_sigaction_fail;
7797                 }
7798             } else
7799                 oact = NULL;
7800             ret = get_errno(do_sigaction(arg1, act, oact));
7801 	rt_sigaction_fail:
7802             if (act)
7803                 unlock_user_struct(act, arg2, 0);
7804             if (oact)
7805                 unlock_user_struct(oact, arg3, 1);
7806 #endif
7807         }
7808         return ret;
7809 #ifdef TARGET_NR_sgetmask /* not on alpha */
7810     case TARGET_NR_sgetmask:
7811         {
7812             sigset_t cur_set;
7813             abi_ulong target_set;
7814             ret = do_sigprocmask(0, NULL, &cur_set);
7815             if (!ret) {
7816                 host_to_target_old_sigset(&target_set, &cur_set);
7817                 ret = target_set;
7818             }
7819         }
7820         return ret;
7821 #endif
7822 #ifdef TARGET_NR_ssetmask /* not on alpha */
7823     case TARGET_NR_ssetmask:
7824         {
7825             sigset_t set, oset;
7826             abi_ulong target_set = arg1;
7827             target_to_host_old_sigset(&set, &target_set);
7828             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7829             if (!ret) {
7830                 host_to_target_old_sigset(&target_set, &oset);
7831                 ret = target_set;
7832             }
7833         }
7834         return ret;
7835 #endif
7836 #ifdef TARGET_NR_sigprocmask
7837     case TARGET_NR_sigprocmask:
7838         {
7839 #if defined(TARGET_ALPHA)
7840             sigset_t set, oldset;
7841             abi_ulong mask;
7842             int how;
7843 
7844             switch (arg1) {
7845             case TARGET_SIG_BLOCK:
7846                 how = SIG_BLOCK;
7847                 break;
7848             case TARGET_SIG_UNBLOCK:
7849                 how = SIG_UNBLOCK;
7850                 break;
7851             case TARGET_SIG_SETMASK:
7852                 how = SIG_SETMASK;
7853                 break;
7854             default:
7855                 return -TARGET_EINVAL;
7856             }
7857             mask = arg2;
7858             target_to_host_old_sigset(&set, &mask);
7859 
7860             ret = do_sigprocmask(how, &set, &oldset);
7861             if (!is_error(ret)) {
7862                 host_to_target_old_sigset(&mask, &oldset);
7863                 ret = mask;
7864                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7865             }
7866 #else
7867             sigset_t set, oldset, *set_ptr;
7868             int how;
7869 
7870             if (arg2) {
7871                 switch (arg1) {
7872                 case TARGET_SIG_BLOCK:
7873                     how = SIG_BLOCK;
7874                     break;
7875                 case TARGET_SIG_UNBLOCK:
7876                     how = SIG_UNBLOCK;
7877                     break;
7878                 case TARGET_SIG_SETMASK:
7879                     how = SIG_SETMASK;
7880                     break;
7881                 default:
7882                     return -TARGET_EINVAL;
7883                 }
7884                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7885                     return -TARGET_EFAULT;
7886                 target_to_host_old_sigset(&set, p);
7887                 unlock_user(p, arg2, 0);
7888                 set_ptr = &set;
7889             } else {
7890                 how = 0;
7891                 set_ptr = NULL;
7892             }
7893             ret = do_sigprocmask(how, set_ptr, &oldset);
7894             if (!is_error(ret) && arg3) {
7895                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7896                     return -TARGET_EFAULT;
7897                 host_to_target_old_sigset(p, &oldset);
7898                 unlock_user(p, arg3, sizeof(target_sigset_t));
7899             }
7900 #endif
7901         }
7902         return ret;
7903 #endif
7904     case TARGET_NR_rt_sigprocmask:
7905         {
7906             int how = arg1;
7907             sigset_t set, oldset, *set_ptr;
7908 
7909             if (arg4 != sizeof(target_sigset_t)) {
7910                 return -TARGET_EINVAL;
7911             }
7912 
7913             if (arg2) {
7914                 switch(how) {
7915                 case TARGET_SIG_BLOCK:
7916                     how = SIG_BLOCK;
7917                     break;
7918                 case TARGET_SIG_UNBLOCK:
7919                     how = SIG_UNBLOCK;
7920                     break;
7921                 case TARGET_SIG_SETMASK:
7922                     how = SIG_SETMASK;
7923                     break;
7924                 default:
7925                     return -TARGET_EINVAL;
7926                 }
7927                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7928                     return -TARGET_EFAULT;
7929                 target_to_host_sigset(&set, p);
7930                 unlock_user(p, arg2, 0);
7931                 set_ptr = &set;
7932             } else {
7933                 how = 0;
7934                 set_ptr = NULL;
7935             }
7936             ret = do_sigprocmask(how, set_ptr, &oldset);
7937             if (!is_error(ret) && arg3) {
7938                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7939                     return -TARGET_EFAULT;
7940                 host_to_target_sigset(p, &oldset);
7941                 unlock_user(p, arg3, sizeof(target_sigset_t));
7942             }
7943         }
7944         return ret;
7945 #ifdef TARGET_NR_sigpending
7946     case TARGET_NR_sigpending:
7947         {
7948             sigset_t set;
7949             ret = get_errno(sigpending(&set));
7950             if (!is_error(ret)) {
7951                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7952                     return -TARGET_EFAULT;
7953                 host_to_target_old_sigset(p, &set);
7954                 unlock_user(p, arg1, sizeof(target_sigset_t));
7955             }
7956         }
7957         return ret;
7958 #endif
7959     case TARGET_NR_rt_sigpending:
7960         {
7961             sigset_t set;
7962 
7963             /* Yes, this check is >, not != like most. We follow the kernel's
7964              * logic and it does it like this because it implements
7965              * NR_sigpending through the same code path, and in that case
7966              * the old_sigset_t is smaller in size.
7967              */
7968             if (arg2 > sizeof(target_sigset_t)) {
7969                 return -TARGET_EINVAL;
7970             }
7971 
7972             ret = get_errno(sigpending(&set));
7973             if (!is_error(ret)) {
7974                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7975                     return -TARGET_EFAULT;
7976                 host_to_target_sigset(p, &set);
7977                 unlock_user(p, arg1, sizeof(target_sigset_t));
7978             }
7979         }
7980         return ret;
7981 #ifdef TARGET_NR_sigsuspend
7982     case TARGET_NR_sigsuspend:
7983         {
7984             TaskState *ts = cpu->opaque;
7985 #if defined(TARGET_ALPHA)
7986             abi_ulong mask = arg1;
7987             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7988 #else
7989             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7990                 return -TARGET_EFAULT;
7991             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7992             unlock_user(p, arg1, 0);
7993 #endif
7994             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7995                                                SIGSET_T_SIZE));
7996             if (ret != -TARGET_ERESTARTSYS) {
7997                 ts->in_sigsuspend = 1;
7998             }
7999         }
8000         return ret;
8001 #endif
8002     case TARGET_NR_rt_sigsuspend:
8003         {
8004             TaskState *ts = cpu->opaque;
8005 
8006             if (arg2 != sizeof(target_sigset_t)) {
8007                 return -TARGET_EINVAL;
8008             }
8009             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8010                 return -TARGET_EFAULT;
8011             target_to_host_sigset(&ts->sigsuspend_mask, p);
8012             unlock_user(p, arg1, 0);
8013             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8014                                                SIGSET_T_SIZE));
8015             if (ret != -TARGET_ERESTARTSYS) {
8016                 ts->in_sigsuspend = 1;
8017             }
8018         }
8019         return ret;
8020     case TARGET_NR_rt_sigtimedwait:
8021         {
8022             sigset_t set;
8023             struct timespec uts, *puts;
8024             siginfo_t uinfo;
8025 
8026             if (arg4 != sizeof(target_sigset_t)) {
8027                 return -TARGET_EINVAL;
8028             }
8029 
8030             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8031                 return -TARGET_EFAULT;
8032             target_to_host_sigset(&set, p);
8033             unlock_user(p, arg1, 0);
8034             if (arg3) {
8035                 puts = &uts;
8036                 target_to_host_timespec(puts, arg3);
8037             } else {
8038                 puts = NULL;
8039             }
8040             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8041                                                  SIGSET_T_SIZE));
8042             if (!is_error(ret)) {
8043                 if (arg2) {
8044                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8045                                   0);
8046                     if (!p) {
8047                         return -TARGET_EFAULT;
8048                     }
8049                     host_to_target_siginfo(p, &uinfo);
8050                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8051                 }
8052                 ret = host_to_target_signal(ret);
8053             }
8054         }
8055         return ret;
8056     case TARGET_NR_rt_sigqueueinfo:
8057         {
8058             siginfo_t uinfo;
8059 
8060             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8061             if (!p) {
8062                 return -TARGET_EFAULT;
8063             }
8064             target_to_host_siginfo(&uinfo, p);
8065             unlock_user(p, arg3, 0);
8066             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8067         }
8068         return ret;
8069     case TARGET_NR_rt_tgsigqueueinfo:
8070         {
8071             siginfo_t uinfo;
8072 
8073             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8074             if (!p) {
8075                 return -TARGET_EFAULT;
8076             }
8077             target_to_host_siginfo(&uinfo, p);
8078             unlock_user(p, arg4, 0);
8079             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8080         }
8081         return ret;
8082 #ifdef TARGET_NR_sigreturn
8083     case TARGET_NR_sigreturn:
8084         if (block_signals()) {
8085             return -TARGET_ERESTARTSYS;
8086         }
8087         return do_sigreturn(cpu_env);
8088 #endif
8089     case TARGET_NR_rt_sigreturn:
8090         if (block_signals()) {
8091             return -TARGET_ERESTARTSYS;
8092         }
8093         return do_rt_sigreturn(cpu_env);
8094     case TARGET_NR_sethostname:
8095         if (!(p = lock_user_string(arg1)))
8096             return -TARGET_EFAULT;
8097         ret = get_errno(sethostname(p, arg2));
8098         unlock_user(p, arg1, 0);
8099         return ret;
8100 #ifdef TARGET_NR_setrlimit
8101     case TARGET_NR_setrlimit:
8102         {
8103             int resource = target_to_host_resource(arg1);
8104             struct target_rlimit *target_rlim;
8105             struct rlimit rlim;
8106             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8107                 return -TARGET_EFAULT;
8108             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8109             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8110             unlock_user_struct(target_rlim, arg2, 0);
8111             /*
8112              * If we just passed through resource limit settings for memory then
8113              * they would also apply to QEMU's own allocations, and QEMU will
8114              * crash or hang or die if its allocations fail. Ideally we would
8115              * track the guest allocations in QEMU and apply the limits ourselves.
8116              * For now, just tell the guest the call succeeded but don't actually
8117              * limit anything.
8118              */
8119             if (resource != RLIMIT_AS &&
8120                 resource != RLIMIT_DATA &&
8121                 resource != RLIMIT_STACK) {
8122                 return get_errno(setrlimit(resource, &rlim));
8123             } else {
8124                 return 0;
8125             }
8126         }
8127 #endif
8128 #ifdef TARGET_NR_getrlimit
8129     case TARGET_NR_getrlimit:
8130         {
8131             int resource = target_to_host_resource(arg1);
8132             struct target_rlimit *target_rlim;
8133             struct rlimit rlim;
8134 
8135             ret = get_errno(getrlimit(resource, &rlim));
8136             if (!is_error(ret)) {
8137                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8138                     return -TARGET_EFAULT;
8139                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8140                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8141                 unlock_user_struct(target_rlim, arg2, 1);
8142             }
8143         }
8144         return ret;
8145 #endif
8146     case TARGET_NR_getrusage:
8147         {
8148             struct rusage rusage;
8149             ret = get_errno(getrusage(arg1, &rusage));
8150             if (!is_error(ret)) {
8151                 ret = host_to_target_rusage(arg2, &rusage);
8152             }
8153         }
8154         return ret;
8155     case TARGET_NR_gettimeofday:
8156         {
8157             struct timeval tv;
8158             ret = get_errno(gettimeofday(&tv, NULL));
8159             if (!is_error(ret)) {
8160                 if (copy_to_user_timeval(arg1, &tv))
8161                     return -TARGET_EFAULT;
8162             }
8163         }
8164         return ret;
8165     case TARGET_NR_settimeofday:
8166         {
8167             struct timeval tv, *ptv = NULL;
8168             struct timezone tz, *ptz = NULL;
8169 
8170             if (arg1) {
8171                 if (copy_from_user_timeval(&tv, arg1)) {
8172                     return -TARGET_EFAULT;
8173                 }
8174                 ptv = &tv;
8175             }
8176 
8177             if (arg2) {
8178                 if (copy_from_user_timezone(&tz, arg2)) {
8179                     return -TARGET_EFAULT;
8180                 }
8181                 ptz = &tz;
8182             }
8183 
8184             return get_errno(settimeofday(ptv, ptz));
8185         }
8186 #if defined(TARGET_NR_select)
8187     case TARGET_NR_select:
8188 #if defined(TARGET_WANT_NI_OLD_SELECT)
8189         /* some architectures used to have old_select here
8190          * but now ENOSYS it.
8191          */
8192         ret = -TARGET_ENOSYS;
8193 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8194         ret = do_old_select(arg1);
8195 #else
8196         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8197 #endif
8198         return ret;
8199 #endif
8200 #ifdef TARGET_NR_pselect6
8201     case TARGET_NR_pselect6:
8202         {
8203             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8204             fd_set rfds, wfds, efds;
8205             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8206             struct timespec ts, *ts_ptr;
8207 
8208             /*
8209              * The 6th arg is actually two args smashed together,
8210              * so we cannot use the C library.
8211              */
8212             sigset_t set;
8213             struct {
8214                 sigset_t *set;
8215                 size_t size;
8216             } sig, *sig_ptr;
8217 
8218             abi_ulong arg_sigset, arg_sigsize, *arg7;
8219             target_sigset_t *target_sigset;
8220 
8221             n = arg1;
8222             rfd_addr = arg2;
8223             wfd_addr = arg3;
8224             efd_addr = arg4;
8225             ts_addr = arg5;
8226 
8227             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8228             if (ret) {
8229                 return ret;
8230             }
8231             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8232             if (ret) {
8233                 return ret;
8234             }
8235             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8236             if (ret) {
8237                 return ret;
8238             }
8239 
8240             /*
8241              * This takes a timespec, and not a timeval, so we cannot
8242              * use the do_select() helper ...
8243              */
8244             if (ts_addr) {
8245                 if (target_to_host_timespec(&ts, ts_addr)) {
8246                     return -TARGET_EFAULT;
8247                 }
8248                 ts_ptr = &ts;
8249             } else {
8250                 ts_ptr = NULL;
8251             }
8252 
8253             /* Extract the two packed args for the sigset */
8254             if (arg6) {
8255                 sig_ptr = &sig;
8256                 sig.size = SIGSET_T_SIZE;
8257 
8258                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8259                 if (!arg7) {
8260                     return -TARGET_EFAULT;
8261                 }
8262                 arg_sigset = tswapal(arg7[0]);
8263                 arg_sigsize = tswapal(arg7[1]);
8264                 unlock_user(arg7, arg6, 0);
8265 
8266                 if (arg_sigset) {
8267                     sig.set = &set;
8268                     if (arg_sigsize != sizeof(*target_sigset)) {
8269                         /* Like the kernel, we enforce correct size sigsets */
8270                         return -TARGET_EINVAL;
8271                     }
8272                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8273                                               sizeof(*target_sigset), 1);
8274                     if (!target_sigset) {
8275                         return -TARGET_EFAULT;
8276                     }
8277                     target_to_host_sigset(&set, target_sigset);
8278                     unlock_user(target_sigset, arg_sigset, 0);
8279                 } else {
8280                     sig.set = NULL;
8281                 }
8282             } else {
8283                 sig_ptr = NULL;
8284             }
8285 
8286             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8287                                           ts_ptr, sig_ptr));
8288 
8289             if (!is_error(ret)) {
8290                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8291                     return -TARGET_EFAULT;
8292                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8293                     return -TARGET_EFAULT;
8294                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8295                     return -TARGET_EFAULT;
8296 
8297                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8298                     return -TARGET_EFAULT;
8299             }
8300         }
8301         return ret;
8302 #endif
8303 #ifdef TARGET_NR_symlink
8304     case TARGET_NR_symlink:
8305         {
8306             void *p2;
8307             p = lock_user_string(arg1);
8308             p2 = lock_user_string(arg2);
8309             if (!p || !p2)
8310                 ret = -TARGET_EFAULT;
8311             else
8312                 ret = get_errno(symlink(p, p2));
8313             unlock_user(p2, arg2, 0);
8314             unlock_user(p, arg1, 0);
8315         }
8316         return ret;
8317 #endif
8318 #if defined(TARGET_NR_symlinkat)
8319     case TARGET_NR_symlinkat:
8320         {
8321             void *p2;
8322             p  = lock_user_string(arg1);
8323             p2 = lock_user_string(arg3);
8324             if (!p || !p2)
8325                 ret = -TARGET_EFAULT;
8326             else
8327                 ret = get_errno(symlinkat(p, arg2, p2));
8328             unlock_user(p2, arg3, 0);
8329             unlock_user(p, arg1, 0);
8330         }
8331         return ret;
8332 #endif
8333 #ifdef TARGET_NR_readlink
8334     case TARGET_NR_readlink:
8335         {
8336             void *p2;
8337             p = lock_user_string(arg1);
8338             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8339             if (!p || !p2) {
8340                 ret = -TARGET_EFAULT;
8341             } else if (!arg3) {
8342                 /* Short circuit this for the magic exe check. */
8343                 ret = -TARGET_EINVAL;
8344             } else if (is_proc_myself((const char *)p, "exe")) {
8345                 char real[PATH_MAX], *temp;
8346                 temp = realpath(exec_path, real);
8347                 /* Return value is # of bytes that we wrote to the buffer. */
8348                 if (temp == NULL) {
8349                     ret = get_errno(-1);
8350                 } else {
8351                     /* Don't worry about sign mismatch as earlier mapping
8352                      * logic would have thrown a bad address error. */
8353                     ret = MIN(strlen(real), arg3);
8354                     /* We cannot NUL terminate the string. */
8355                     memcpy(p2, real, ret);
8356                 }
8357             } else {
8358                 ret = get_errno(readlink(path(p), p2, arg3));
8359             }
8360             unlock_user(p2, arg2, ret);
8361             unlock_user(p, arg1, 0);
8362         }
8363         return ret;
8364 #endif
8365 #if defined(TARGET_NR_readlinkat)
8366     case TARGET_NR_readlinkat:
8367         {
8368             void *p2;
8369             p  = lock_user_string(arg2);
8370             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8371             if (!p || !p2) {
8372                 ret = -TARGET_EFAULT;
8373             } else if (is_proc_myself((const char *)p, "exe")) {
8374                 char real[PATH_MAX], *temp;
8375                 temp = realpath(exec_path, real);
8376                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8377                 snprintf((char *)p2, arg4, "%s", real);
8378             } else {
8379                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8380             }
8381             unlock_user(p2, arg3, ret);
8382             unlock_user(p, arg2, 0);
8383         }
8384         return ret;
8385 #endif
8386 #ifdef TARGET_NR_swapon
8387     case TARGET_NR_swapon:
8388         if (!(p = lock_user_string(arg1)))
8389             return -TARGET_EFAULT;
8390         ret = get_errno(swapon(p, arg2));
8391         unlock_user(p, arg1, 0);
8392         return ret;
8393 #endif
8394     case TARGET_NR_reboot:
8395         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8396            /* arg4 must be ignored in all other cases */
8397            p = lock_user_string(arg4);
8398            if (!p) {
8399                return -TARGET_EFAULT;
8400            }
8401            ret = get_errno(reboot(arg1, arg2, arg3, p));
8402            unlock_user(p, arg4, 0);
8403         } else {
8404            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8405         }
8406         return ret;
8407 #ifdef TARGET_NR_mmap
8408     case TARGET_NR_mmap:
8409 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8410     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8411     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8412     || defined(TARGET_S390X)
8413         {
8414             abi_ulong *v;
8415             abi_ulong v1, v2, v3, v4, v5, v6;
8416             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8417                 return -TARGET_EFAULT;
8418             v1 = tswapal(v[0]);
8419             v2 = tswapal(v[1]);
8420             v3 = tswapal(v[2]);
8421             v4 = tswapal(v[3]);
8422             v5 = tswapal(v[4]);
8423             v6 = tswapal(v[5]);
8424             unlock_user(v, arg1, 0);
8425             ret = get_errno(target_mmap(v1, v2, v3,
8426                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8427                                         v5, v6));
8428         }
8429 #else
8430         ret = get_errno(target_mmap(arg1, arg2, arg3,
8431                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8432                                     arg5,
8433                                     arg6));
8434 #endif
8435         return ret;
8436 #endif
8437 #ifdef TARGET_NR_mmap2
8438     case TARGET_NR_mmap2:
8439 #ifndef MMAP_SHIFT
8440 #define MMAP_SHIFT 12
8441 #endif
8442         ret = target_mmap(arg1, arg2, arg3,
8443                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8444                           arg5, arg6 << MMAP_SHIFT);
8445         return get_errno(ret);
8446 #endif
8447     case TARGET_NR_munmap:
8448         return get_errno(target_munmap(arg1, arg2));
8449     case TARGET_NR_mprotect:
8450         {
8451             TaskState *ts = cpu->opaque;
8452             /* Special hack to detect libc making the stack executable.  */
8453             if ((arg3 & PROT_GROWSDOWN)
8454                 && arg1 >= ts->info->stack_limit
8455                 && arg1 <= ts->info->start_stack) {
8456                 arg3 &= ~PROT_GROWSDOWN;
8457                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8458                 arg1 = ts->info->stack_limit;
8459             }
8460         }
8461         return get_errno(target_mprotect(arg1, arg2, arg3));
8462 #ifdef TARGET_NR_mremap
8463     case TARGET_NR_mremap:
8464         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8465 #endif
8466         /* ??? msync/mlock/munlock are broken for softmmu.  */
8467 #ifdef TARGET_NR_msync
8468     case TARGET_NR_msync:
8469         return get_errno(msync(g2h(arg1), arg2, arg3));
8470 #endif
8471 #ifdef TARGET_NR_mlock
8472     case TARGET_NR_mlock:
8473         return get_errno(mlock(g2h(arg1), arg2));
8474 #endif
8475 #ifdef TARGET_NR_munlock
8476     case TARGET_NR_munlock:
8477         return get_errno(munlock(g2h(arg1), arg2));
8478 #endif
8479 #ifdef TARGET_NR_mlockall
8480     case TARGET_NR_mlockall:
8481         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8482 #endif
8483 #ifdef TARGET_NR_munlockall
8484     case TARGET_NR_munlockall:
8485         return get_errno(munlockall());
8486 #endif
8487 #ifdef TARGET_NR_truncate
8488     case TARGET_NR_truncate:
8489         if (!(p = lock_user_string(arg1)))
8490             return -TARGET_EFAULT;
8491         ret = get_errno(truncate(p, arg2));
8492         unlock_user(p, arg1, 0);
8493         return ret;
8494 #endif
8495 #ifdef TARGET_NR_ftruncate
8496     case TARGET_NR_ftruncate:
8497         return get_errno(ftruncate(arg1, arg2));
8498 #endif
8499     case TARGET_NR_fchmod:
8500         return get_errno(fchmod(arg1, arg2));
8501 #if defined(TARGET_NR_fchmodat)
8502     case TARGET_NR_fchmodat:
8503         if (!(p = lock_user_string(arg2)))
8504             return -TARGET_EFAULT;
8505         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8506         unlock_user(p, arg2, 0);
8507         return ret;
8508 #endif
8509     case TARGET_NR_getpriority:
8510         /* Note that negative values are valid for getpriority, so we must
8511            differentiate based on errno settings.  */
8512         errno = 0;
8513         ret = getpriority(arg1, arg2);
8514         if (ret == -1 && errno != 0) {
8515             return -host_to_target_errno(errno);
8516         }
8517 #ifdef TARGET_ALPHA
8518         /* Return value is the unbiased priority.  Signal no error.  */
8519         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8520 #else
8521         /* Return value is a biased priority to avoid negative numbers.  */
8522         ret = 20 - ret;
8523 #endif
8524         return ret;
8525     case TARGET_NR_setpriority:
8526         return get_errno(setpriority(arg1, arg2, arg3));
8527 #ifdef TARGET_NR_statfs
8528     case TARGET_NR_statfs:
8529         if (!(p = lock_user_string(arg1))) {
8530             return -TARGET_EFAULT;
8531         }
8532         ret = get_errno(statfs(path(p), &stfs));
8533         unlock_user(p, arg1, 0);
8534     convert_statfs:
8535         if (!is_error(ret)) {
8536             struct target_statfs *target_stfs;
8537 
8538             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8539                 return -TARGET_EFAULT;
8540             __put_user(stfs.f_type, &target_stfs->f_type);
8541             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8542             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8543             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8544             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8545             __put_user(stfs.f_files, &target_stfs->f_files);
8546             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8547             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8548             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8549             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8550             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8551 #ifdef _STATFS_F_FLAGS
8552             __put_user(stfs.f_flags, &target_stfs->f_flags);
8553 #else
8554             __put_user(0, &target_stfs->f_flags);
8555 #endif
8556             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8557             unlock_user_struct(target_stfs, arg2, 1);
8558         }
8559         return ret;
8560 #endif
8561 #ifdef TARGET_NR_fstatfs
8562     case TARGET_NR_fstatfs:
8563         ret = get_errno(fstatfs(arg1, &stfs));
8564         goto convert_statfs;
8565 #endif
8566 #ifdef TARGET_NR_statfs64
8567     case TARGET_NR_statfs64:
8568         if (!(p = lock_user_string(arg1))) {
8569             return -TARGET_EFAULT;
8570         }
8571         ret = get_errno(statfs(path(p), &stfs));
8572         unlock_user(p, arg1, 0);
8573     convert_statfs64:
8574         if (!is_error(ret)) {
8575             struct target_statfs64 *target_stfs;
8576 
8577             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8578                 return -TARGET_EFAULT;
8579             __put_user(stfs.f_type, &target_stfs->f_type);
8580             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8581             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8582             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8583             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8584             __put_user(stfs.f_files, &target_stfs->f_files);
8585             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8586             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8587             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8588             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8589             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8590             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8591             unlock_user_struct(target_stfs, arg3, 1);
8592         }
8593         return ret;
8594     case TARGET_NR_fstatfs64:
8595         ret = get_errno(fstatfs(arg1, &stfs));
8596         goto convert_statfs64;
8597 #endif
8598 #ifdef TARGET_NR_socketcall
8599     case TARGET_NR_socketcall:
8600         return do_socketcall(arg1, arg2);
8601 #endif
8602 #ifdef TARGET_NR_accept
8603     case TARGET_NR_accept:
8604         return do_accept4(arg1, arg2, arg3, 0);
8605 #endif
8606 #ifdef TARGET_NR_accept4
8607     case TARGET_NR_accept4:
8608         return do_accept4(arg1, arg2, arg3, arg4);
8609 #endif
8610 #ifdef TARGET_NR_bind
8611     case TARGET_NR_bind:
8612         return do_bind(arg1, arg2, arg3);
8613 #endif
8614 #ifdef TARGET_NR_connect
8615     case TARGET_NR_connect:
8616         return do_connect(arg1, arg2, arg3);
8617 #endif
8618 #ifdef TARGET_NR_getpeername
8619     case TARGET_NR_getpeername:
8620         return do_getpeername(arg1, arg2, arg3);
8621 #endif
8622 #ifdef TARGET_NR_getsockname
8623     case TARGET_NR_getsockname:
8624         return do_getsockname(arg1, arg2, arg3);
8625 #endif
8626 #ifdef TARGET_NR_getsockopt
8627     case TARGET_NR_getsockopt:
8628         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8629 #endif
8630 #ifdef TARGET_NR_listen
8631     case TARGET_NR_listen:
8632         return get_errno(listen(arg1, arg2));
8633 #endif
8634 #ifdef TARGET_NR_recv
8635     case TARGET_NR_recv:
8636         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8637 #endif
8638 #ifdef TARGET_NR_recvfrom
8639     case TARGET_NR_recvfrom:
8640         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8641 #endif
8642 #ifdef TARGET_NR_recvmsg
8643     case TARGET_NR_recvmsg:
8644         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8645 #endif
8646 #ifdef TARGET_NR_send
8647     case TARGET_NR_send:
8648         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8649 #endif
8650 #ifdef TARGET_NR_sendmsg
8651     case TARGET_NR_sendmsg:
8652         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8653 #endif
8654 #ifdef TARGET_NR_sendmmsg
8655     case TARGET_NR_sendmmsg:
8656         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8657     case TARGET_NR_recvmmsg:
8658         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8659 #endif
8660 #ifdef TARGET_NR_sendto
8661     case TARGET_NR_sendto:
8662         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8663 #endif
8664 #ifdef TARGET_NR_shutdown
8665     case TARGET_NR_shutdown:
8666         return get_errno(shutdown(arg1, arg2));
8667 #endif
8668 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8669     case TARGET_NR_getrandom:
8670         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8671         if (!p) {
8672             return -TARGET_EFAULT;
8673         }
8674         ret = get_errno(getrandom(p, arg2, arg3));
8675         unlock_user(p, arg1, ret);
8676         return ret;
8677 #endif
8678 #ifdef TARGET_NR_socket
8679     case TARGET_NR_socket:
8680         return do_socket(arg1, arg2, arg3);
8681 #endif
8682 #ifdef TARGET_NR_socketpair
8683     case TARGET_NR_socketpair:
8684         return do_socketpair(arg1, arg2, arg3, arg4);
8685 #endif
8686 #ifdef TARGET_NR_setsockopt
8687     case TARGET_NR_setsockopt:
8688         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8689 #endif
8690 #if defined(TARGET_NR_syslog)
8691     case TARGET_NR_syslog:
8692         {
8693             int len = arg2;
8694 
8695             switch (arg1) {
8696             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8697             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8698             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8699             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8700             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8701             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8702             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8703             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8704                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8705             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8706             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8707             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8708                 {
8709                     if (len < 0) {
8710                         return -TARGET_EINVAL;
8711                     }
8712                     if (len == 0) {
8713                         return 0;
8714                     }
8715                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8716                     if (!p) {
8717                         return -TARGET_EFAULT;
8718                     }
8719                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8720                     unlock_user(p, arg2, arg3);
8721                 }
8722                 return ret;
8723             default:
8724                 return -TARGET_EINVAL;
8725             }
8726         }
8727         break;
8728 #endif
8729     case TARGET_NR_setitimer:
8730         {
8731             struct itimerval value, ovalue, *pvalue;
8732 
8733             if (arg2) {
8734                 pvalue = &value;
8735                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8736                     || copy_from_user_timeval(&pvalue->it_value,
8737                                               arg2 + sizeof(struct target_timeval)))
8738                     return -TARGET_EFAULT;
8739             } else {
8740                 pvalue = NULL;
8741             }
8742             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8743             if (!is_error(ret) && arg3) {
8744                 if (copy_to_user_timeval(arg3,
8745                                          &ovalue.it_interval)
8746                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8747                                             &ovalue.it_value))
8748                     return -TARGET_EFAULT;
8749             }
8750         }
8751         return ret;
8752     case TARGET_NR_getitimer:
8753         {
8754             struct itimerval value;
8755 
8756             ret = get_errno(getitimer(arg1, &value));
8757             if (!is_error(ret) && arg2) {
8758                 if (copy_to_user_timeval(arg2,
8759                                          &value.it_interval)
8760                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8761                                             &value.it_value))
8762                     return -TARGET_EFAULT;
8763             }
8764         }
8765         return ret;
8766 #ifdef TARGET_NR_stat
8767     case TARGET_NR_stat:
8768         if (!(p = lock_user_string(arg1))) {
8769             return -TARGET_EFAULT;
8770         }
8771         ret = get_errno(stat(path(p), &st));
8772         unlock_user(p, arg1, 0);
8773         goto do_stat;
8774 #endif
8775 #ifdef TARGET_NR_lstat
8776     case TARGET_NR_lstat:
8777         if (!(p = lock_user_string(arg1))) {
8778             return -TARGET_EFAULT;
8779         }
8780         ret = get_errno(lstat(path(p), &st));
8781         unlock_user(p, arg1, 0);
8782         goto do_stat;
8783 #endif
8784 #ifdef TARGET_NR_fstat
8785     case TARGET_NR_fstat:
8786         {
8787             ret = get_errno(fstat(arg1, &st));
8788 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8789         do_stat:
8790 #endif
8791             if (!is_error(ret)) {
8792                 struct target_stat *target_st;
8793 
8794                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8795                     return -TARGET_EFAULT;
8796                 memset(target_st, 0, sizeof(*target_st));
8797                 __put_user(st.st_dev, &target_st->st_dev);
8798                 __put_user(st.st_ino, &target_st->st_ino);
8799                 __put_user(st.st_mode, &target_st->st_mode);
8800                 __put_user(st.st_uid, &target_st->st_uid);
8801                 __put_user(st.st_gid, &target_st->st_gid);
8802                 __put_user(st.st_nlink, &target_st->st_nlink);
8803                 __put_user(st.st_rdev, &target_st->st_rdev);
8804                 __put_user(st.st_size, &target_st->st_size);
8805                 __put_user(st.st_blksize, &target_st->st_blksize);
8806                 __put_user(st.st_blocks, &target_st->st_blocks);
8807                 __put_user(st.st_atime, &target_st->target_st_atime);
8808                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8809                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8810                 unlock_user_struct(target_st, arg2, 1);
8811             }
8812         }
8813         return ret;
8814 #endif
8815     case TARGET_NR_vhangup:
8816         return get_errno(vhangup());
8817 #ifdef TARGET_NR_syscall
8818     case TARGET_NR_syscall:
8819         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8820                           arg6, arg7, arg8, 0);
8821 #endif
8822     case TARGET_NR_wait4:
8823         {
8824             int status;
8825             abi_long status_ptr = arg2;
8826             struct rusage rusage, *rusage_ptr;
8827             abi_ulong target_rusage = arg4;
8828             abi_long rusage_err;
8829             if (target_rusage)
8830                 rusage_ptr = &rusage;
8831             else
8832                 rusage_ptr = NULL;
8833             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8834             if (!is_error(ret)) {
8835                 if (status_ptr && ret) {
8836                     status = host_to_target_waitstatus(status);
8837                     if (put_user_s32(status, status_ptr))
8838                         return -TARGET_EFAULT;
8839                 }
8840                 if (target_rusage) {
8841                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8842                     if (rusage_err) {
8843                         ret = rusage_err;
8844                     }
8845                 }
8846             }
8847         }
8848         return ret;
8849 #ifdef TARGET_NR_swapoff
8850     case TARGET_NR_swapoff:
8851         if (!(p = lock_user_string(arg1)))
8852             return -TARGET_EFAULT;
8853         ret = get_errno(swapoff(p));
8854         unlock_user(p, arg1, 0);
8855         return ret;
8856 #endif
8857     case TARGET_NR_sysinfo:
8858         {
8859             struct target_sysinfo *target_value;
8860             struct sysinfo value;
8861             ret = get_errno(sysinfo(&value));
8862             if (!is_error(ret) && arg1)
8863             {
8864                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8865                     return -TARGET_EFAULT;
8866                 __put_user(value.uptime, &target_value->uptime);
8867                 __put_user(value.loads[0], &target_value->loads[0]);
8868                 __put_user(value.loads[1], &target_value->loads[1]);
8869                 __put_user(value.loads[2], &target_value->loads[2]);
8870                 __put_user(value.totalram, &target_value->totalram);
8871                 __put_user(value.freeram, &target_value->freeram);
8872                 __put_user(value.sharedram, &target_value->sharedram);
8873                 __put_user(value.bufferram, &target_value->bufferram);
8874                 __put_user(value.totalswap, &target_value->totalswap);
8875                 __put_user(value.freeswap, &target_value->freeswap);
8876                 __put_user(value.procs, &target_value->procs);
8877                 __put_user(value.totalhigh, &target_value->totalhigh);
8878                 __put_user(value.freehigh, &target_value->freehigh);
8879                 __put_user(value.mem_unit, &target_value->mem_unit);
8880                 unlock_user_struct(target_value, arg1, 1);
8881             }
8882         }
8883         return ret;
8884 #ifdef TARGET_NR_ipc
8885     case TARGET_NR_ipc:
8886         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8887 #endif
8888 #ifdef TARGET_NR_semget
8889     case TARGET_NR_semget:
8890         return get_errno(semget(arg1, arg2, arg3));
8891 #endif
8892 #ifdef TARGET_NR_semop
8893     case TARGET_NR_semop:
8894         return do_semop(arg1, arg2, arg3);
8895 #endif
8896 #ifdef TARGET_NR_semctl
8897     case TARGET_NR_semctl:
8898         return do_semctl(arg1, arg2, arg3, arg4);
8899 #endif
8900 #ifdef TARGET_NR_msgctl
8901     case TARGET_NR_msgctl:
8902         return do_msgctl(arg1, arg2, arg3);
8903 #endif
8904 #ifdef TARGET_NR_msgget
8905     case TARGET_NR_msgget:
8906         return get_errno(msgget(arg1, arg2));
8907 #endif
8908 #ifdef TARGET_NR_msgrcv
8909     case TARGET_NR_msgrcv:
8910         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8911 #endif
8912 #ifdef TARGET_NR_msgsnd
8913     case TARGET_NR_msgsnd:
8914         return do_msgsnd(arg1, arg2, arg3, arg4);
8915 #endif
8916 #ifdef TARGET_NR_shmget
8917     case TARGET_NR_shmget:
8918         return get_errno(shmget(arg1, arg2, arg3));
8919 #endif
8920 #ifdef TARGET_NR_shmctl
8921     case TARGET_NR_shmctl:
8922         return do_shmctl(arg1, arg2, arg3);
8923 #endif
8924 #ifdef TARGET_NR_shmat
8925     case TARGET_NR_shmat:
8926         return do_shmat(cpu_env, arg1, arg2, arg3);
8927 #endif
8928 #ifdef TARGET_NR_shmdt
8929     case TARGET_NR_shmdt:
8930         return do_shmdt(arg1);
8931 #endif
8932     case TARGET_NR_fsync:
8933         return get_errno(fsync(arg1));
8934     case TARGET_NR_clone:
8935         /* Linux manages to have three different orderings for its
8936          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8937          * match the kernel's CONFIG_CLONE_* settings.
8938          * Microblaze is further special in that it uses a sixth
8939          * implicit argument to clone for the TLS pointer.
8940          */
8941 #if defined(TARGET_MICROBLAZE)
8942         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8943 #elif defined(TARGET_CLONE_BACKWARDS)
8944         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8945 #elif defined(TARGET_CLONE_BACKWARDS2)
8946         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8947 #else
8948         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8949 #endif
8950         return ret;
8951 #ifdef __NR_exit_group
8952         /* new thread calls */
8953     case TARGET_NR_exit_group:
8954         preexit_cleanup(cpu_env, arg1);
8955         return get_errno(exit_group(arg1));
8956 #endif
8957     case TARGET_NR_setdomainname:
8958         if (!(p = lock_user_string(arg1)))
8959             return -TARGET_EFAULT;
8960         ret = get_errno(setdomainname(p, arg2));
8961         unlock_user(p, arg1, 0);
8962         return ret;
8963     case TARGET_NR_uname:
8964         /* no need to transcode because we use the linux syscall */
8965         {
8966             struct new_utsname * buf;
8967 
8968             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8969                 return -TARGET_EFAULT;
8970             ret = get_errno(sys_uname(buf));
8971             if (!is_error(ret)) {
8972                 /* Overwrite the native machine name with whatever is being
8973                    emulated. */
8974                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8975                           sizeof(buf->machine));
8976                 /* Allow the user to override the reported release.  */
8977                 if (qemu_uname_release && *qemu_uname_release) {
8978                     g_strlcpy(buf->release, qemu_uname_release,
8979                               sizeof(buf->release));
8980                 }
8981             }
8982             unlock_user_struct(buf, arg1, 1);
8983         }
8984         return ret;
8985 #ifdef TARGET_I386
8986     case TARGET_NR_modify_ldt:
8987         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
8988 #if !defined(TARGET_X86_64)
8989     case TARGET_NR_vm86:
8990         return do_vm86(cpu_env, arg1, arg2);
8991 #endif
8992 #endif
8993     case TARGET_NR_adjtimex:
8994         {
8995             struct timex host_buf;
8996 
8997             if (target_to_host_timex(&host_buf, arg1) != 0) {
8998                 return -TARGET_EFAULT;
8999             }
9000             ret = get_errno(adjtimex(&host_buf));
9001             if (!is_error(ret)) {
9002                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9003                     return -TARGET_EFAULT;
9004                 }
9005             }
9006         }
9007         return ret;
9008 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9009     case TARGET_NR_clock_adjtime:
9010         {
9011             struct timex htx, *phtx = &htx;
9012 
9013             if (target_to_host_timex(phtx, arg2) != 0) {
9014                 return -TARGET_EFAULT;
9015             }
9016             ret = get_errno(clock_adjtime(arg1, phtx));
9017             if (!is_error(ret) && phtx) {
9018                 if (host_to_target_timex(arg2, phtx) != 0) {
9019                     return -TARGET_EFAULT;
9020                 }
9021             }
9022         }
9023         return ret;
9024 #endif
9025     case TARGET_NR_getpgid:
9026         return get_errno(getpgid(arg1));
9027     case TARGET_NR_fchdir:
9028         return get_errno(fchdir(arg1));
9029     case TARGET_NR_personality:
9030         return get_errno(personality(arg1));
9031 #ifdef TARGET_NR__llseek /* Not on alpha */
9032     case TARGET_NR__llseek:
9033         {
9034             int64_t res;
9035 #if !defined(__NR_llseek)
9036             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9037             if (res == -1) {
9038                 ret = get_errno(res);
9039             } else {
9040                 ret = 0;
9041             }
9042 #else
9043             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9044 #endif
9045             if ((ret == 0) && put_user_s64(res, arg4)) {
9046                 return -TARGET_EFAULT;
9047             }
9048         }
9049         return ret;
9050 #endif
9051 #ifdef TARGET_NR_getdents
9052     case TARGET_NR_getdents:
9053 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9054 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9055         {
9056             struct target_dirent *target_dirp;
9057             struct linux_dirent *dirp;
9058             abi_long count = arg3;
9059 
9060             dirp = g_try_malloc(count);
9061             if (!dirp) {
9062                 return -TARGET_ENOMEM;
9063             }
9064 
9065             ret = get_errno(sys_getdents(arg1, dirp, count));
9066             if (!is_error(ret)) {
9067                 struct linux_dirent *de;
9068 		struct target_dirent *tde;
9069                 int len = ret;
9070                 int reclen, treclen;
9071 		int count1, tnamelen;
9072 
9073 		count1 = 0;
9074                 de = dirp;
9075                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9076                     return -TARGET_EFAULT;
9077 		tde = target_dirp;
9078                 while (len > 0) {
9079                     reclen = de->d_reclen;
9080                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9081                     assert(tnamelen >= 0);
9082                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9083                     assert(count1 + treclen <= count);
9084                     tde->d_reclen = tswap16(treclen);
9085                     tde->d_ino = tswapal(de->d_ino);
9086                     tde->d_off = tswapal(de->d_off);
9087                     memcpy(tde->d_name, de->d_name, tnamelen);
9088                     de = (struct linux_dirent *)((char *)de + reclen);
9089                     len -= reclen;
9090                     tde = (struct target_dirent *)((char *)tde + treclen);
9091 		    count1 += treclen;
9092                 }
9093 		ret = count1;
9094                 unlock_user(target_dirp, arg2, ret);
9095             }
9096             g_free(dirp);
9097         }
9098 #else
9099         {
9100             struct linux_dirent *dirp;
9101             abi_long count = arg3;
9102 
9103             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9104                 return -TARGET_EFAULT;
9105             ret = get_errno(sys_getdents(arg1, dirp, count));
9106             if (!is_error(ret)) {
9107                 struct linux_dirent *de;
9108                 int len = ret;
9109                 int reclen;
9110                 de = dirp;
9111                 while (len > 0) {
9112                     reclen = de->d_reclen;
9113                     if (reclen > len)
9114                         break;
9115                     de->d_reclen = tswap16(reclen);
9116                     tswapls(&de->d_ino);
9117                     tswapls(&de->d_off);
9118                     de = (struct linux_dirent *)((char *)de + reclen);
9119                     len -= reclen;
9120                 }
9121             }
9122             unlock_user(dirp, arg2, ret);
9123         }
9124 #endif
9125 #else
9126         /* Implement getdents in terms of getdents64 */
9127         {
9128             struct linux_dirent64 *dirp;
9129             abi_long count = arg3;
9130 
9131             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9132             if (!dirp) {
9133                 return -TARGET_EFAULT;
9134             }
9135             ret = get_errno(sys_getdents64(arg1, dirp, count));
9136             if (!is_error(ret)) {
9137                 /* Convert the dirent64 structs to target dirent.  We do this
9138                  * in-place, since we can guarantee that a target_dirent is no
9139                  * larger than a dirent64; however this means we have to be
9140                  * careful to read everything before writing in the new format.
9141                  */
9142                 struct linux_dirent64 *de;
9143                 struct target_dirent *tde;
9144                 int len = ret;
9145                 int tlen = 0;
9146 
9147                 de = dirp;
9148                 tde = (struct target_dirent *)dirp;
9149                 while (len > 0) {
9150                     int namelen, treclen;
9151                     int reclen = de->d_reclen;
9152                     uint64_t ino = de->d_ino;
9153                     int64_t off = de->d_off;
9154                     uint8_t type = de->d_type;
9155 
9156                     namelen = strlen(de->d_name);
9157                     treclen = offsetof(struct target_dirent, d_name)
9158                         + namelen + 2;
9159                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9160 
9161                     memmove(tde->d_name, de->d_name, namelen + 1);
9162                     tde->d_ino = tswapal(ino);
9163                     tde->d_off = tswapal(off);
9164                     tde->d_reclen = tswap16(treclen);
9165                     /* The target_dirent type is in what was formerly a padding
9166                      * byte at the end of the structure:
9167                      */
9168                     *(((char *)tde) + treclen - 1) = type;
9169 
9170                     de = (struct linux_dirent64 *)((char *)de + reclen);
9171                     tde = (struct target_dirent *)((char *)tde + treclen);
9172                     len -= reclen;
9173                     tlen += treclen;
9174                 }
9175                 ret = tlen;
9176             }
9177             unlock_user(dirp, arg2, ret);
9178         }
9179 #endif
9180         return ret;
9181 #endif /* TARGET_NR_getdents */
9182 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9183     case TARGET_NR_getdents64:
9184         {
9185             struct linux_dirent64 *dirp;
9186             abi_long count = arg3;
9187             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9188                 return -TARGET_EFAULT;
9189             ret = get_errno(sys_getdents64(arg1, dirp, count));
9190             if (!is_error(ret)) {
9191                 struct linux_dirent64 *de;
9192                 int len = ret;
9193                 int reclen;
9194                 de = dirp;
9195                 while (len > 0) {
9196                     reclen = de->d_reclen;
9197                     if (reclen > len)
9198                         break;
9199                     de->d_reclen = tswap16(reclen);
9200                     tswap64s((uint64_t *)&de->d_ino);
9201                     tswap64s((uint64_t *)&de->d_off);
9202                     de = (struct linux_dirent64 *)((char *)de + reclen);
9203                     len -= reclen;
9204                 }
9205             }
9206             unlock_user(dirp, arg2, ret);
9207         }
9208         return ret;
9209 #endif /* TARGET_NR_getdents64 */
9210 #if defined(TARGET_NR__newselect)
9211     case TARGET_NR__newselect:
9212         return do_select(arg1, arg2, arg3, arg4, arg5);
9213 #endif
9214 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9215 # ifdef TARGET_NR_poll
9216     case TARGET_NR_poll:
9217 # endif
9218 # ifdef TARGET_NR_ppoll
9219     case TARGET_NR_ppoll:
9220 # endif
9221         {
9222             struct target_pollfd *target_pfd;
9223             unsigned int nfds = arg2;
9224             struct pollfd *pfd;
9225             unsigned int i;
9226 
9227             pfd = NULL;
9228             target_pfd = NULL;
9229             if (nfds) {
9230                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9231                     return -TARGET_EINVAL;
9232                 }
9233 
9234                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9235                                        sizeof(struct target_pollfd) * nfds, 1);
9236                 if (!target_pfd) {
9237                     return -TARGET_EFAULT;
9238                 }
9239 
9240                 pfd = alloca(sizeof(struct pollfd) * nfds);
9241                 for (i = 0; i < nfds; i++) {
9242                     pfd[i].fd = tswap32(target_pfd[i].fd);
9243                     pfd[i].events = tswap16(target_pfd[i].events);
9244                 }
9245             }
9246 
9247             switch (num) {
9248 # ifdef TARGET_NR_ppoll
9249             case TARGET_NR_ppoll:
9250             {
9251                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9252                 target_sigset_t *target_set;
9253                 sigset_t _set, *set = &_set;
9254 
9255                 if (arg3) {
9256                     if (target_to_host_timespec(timeout_ts, arg3)) {
9257                         unlock_user(target_pfd, arg1, 0);
9258                         return -TARGET_EFAULT;
9259                     }
9260                 } else {
9261                     timeout_ts = NULL;
9262                 }
9263 
9264                 if (arg4) {
9265                     if (arg5 != sizeof(target_sigset_t)) {
9266                         unlock_user(target_pfd, arg1, 0);
9267                         return -TARGET_EINVAL;
9268                     }
9269 
9270                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9271                     if (!target_set) {
9272                         unlock_user(target_pfd, arg1, 0);
9273                         return -TARGET_EFAULT;
9274                     }
9275                     target_to_host_sigset(set, target_set);
9276                 } else {
9277                     set = NULL;
9278                 }
9279 
9280                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9281                                            set, SIGSET_T_SIZE));
9282 
9283                 if (!is_error(ret) && arg3) {
9284                     host_to_target_timespec(arg3, timeout_ts);
9285                 }
9286                 if (arg4) {
9287                     unlock_user(target_set, arg4, 0);
9288                 }
9289                 break;
9290             }
9291 # endif
9292 # ifdef TARGET_NR_poll
9293             case TARGET_NR_poll:
9294             {
9295                 struct timespec ts, *pts;
9296 
9297                 if (arg3 >= 0) {
9298                     /* Convert ms to secs, ns */
9299                     ts.tv_sec = arg3 / 1000;
9300                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9301                     pts = &ts;
9302                 } else {
9303                     /* -ve poll() timeout means "infinite" */
9304                     pts = NULL;
9305                 }
9306                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9307                 break;
9308             }
9309 # endif
9310             default:
9311                 g_assert_not_reached();
9312             }
9313 
9314             if (!is_error(ret)) {
9315                 for(i = 0; i < nfds; i++) {
9316                     target_pfd[i].revents = tswap16(pfd[i].revents);
9317                 }
9318             }
9319             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9320         }
9321         return ret;
9322 #endif
9323     case TARGET_NR_flock:
9324         /* NOTE: the flock constant seems to be the same for every
9325            Linux platform */
9326         return get_errno(safe_flock(arg1, arg2));
9327     case TARGET_NR_readv:
9328         {
9329             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9330             if (vec != NULL) {
9331                 ret = get_errno(safe_readv(arg1, vec, arg3));
9332                 unlock_iovec(vec, arg2, arg3, 1);
9333             } else {
9334                 ret = -host_to_target_errno(errno);
9335             }
9336         }
9337         return ret;
9338     case TARGET_NR_writev:
9339         {
9340             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9341             if (vec != NULL) {
9342                 ret = get_errno(safe_writev(arg1, vec, arg3));
9343                 unlock_iovec(vec, arg2, arg3, 0);
9344             } else {
9345                 ret = -host_to_target_errno(errno);
9346             }
9347         }
9348         return ret;
9349 #if defined(TARGET_NR_preadv)
9350     case TARGET_NR_preadv:
9351         {
9352             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9353             if (vec != NULL) {
9354                 unsigned long low, high;
9355 
9356                 target_to_host_low_high(arg4, arg5, &low, &high);
9357                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9358                 unlock_iovec(vec, arg2, arg3, 1);
9359             } else {
9360                 ret = -host_to_target_errno(errno);
9361            }
9362         }
9363         return ret;
9364 #endif
9365 #if defined(TARGET_NR_pwritev)
9366     case TARGET_NR_pwritev:
9367         {
9368             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9369             if (vec != NULL) {
9370                 unsigned long low, high;
9371 
9372                 target_to_host_low_high(arg4, arg5, &low, &high);
9373                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9374                 unlock_iovec(vec, arg2, arg3, 0);
9375             } else {
9376                 ret = -host_to_target_errno(errno);
9377            }
9378         }
9379         return ret;
9380 #endif
9381     case TARGET_NR_getsid:
9382         return get_errno(getsid(arg1));
9383 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9384     case TARGET_NR_fdatasync:
9385         return get_errno(fdatasync(arg1));
9386 #endif
9387 #ifdef TARGET_NR__sysctl
9388     case TARGET_NR__sysctl:
9389         /* We don't implement this, but ENOTDIR is always a safe
9390            return value. */
9391         return -TARGET_ENOTDIR;
9392 #endif
9393     case TARGET_NR_sched_getaffinity:
9394         {
9395             unsigned int mask_size;
9396             unsigned long *mask;
9397 
9398             /*
9399              * sched_getaffinity needs multiples of ulong, so need to take
9400              * care of mismatches between target ulong and host ulong sizes.
9401              */
9402             if (arg2 & (sizeof(abi_ulong) - 1)) {
9403                 return -TARGET_EINVAL;
9404             }
9405             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9406 
9407             mask = alloca(mask_size);
9408             memset(mask, 0, mask_size);
9409             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9410 
9411             if (!is_error(ret)) {
9412                 if (ret > arg2) {
9413                     /* More data returned than the caller's buffer will fit.
9414                      * This only happens if sizeof(abi_long) < sizeof(long)
9415                      * and the caller passed us a buffer holding an odd number
9416                      * of abi_longs. If the host kernel is actually using the
9417                      * extra 4 bytes then fail EINVAL; otherwise we can just
9418                      * ignore them and only copy the interesting part.
9419                      */
9420                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9421                     if (numcpus > arg2 * 8) {
9422                         return -TARGET_EINVAL;
9423                     }
9424                     ret = arg2;
9425                 }
9426 
9427                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9428                     return -TARGET_EFAULT;
9429                 }
9430             }
9431         }
9432         return ret;
9433     case TARGET_NR_sched_setaffinity:
9434         {
9435             unsigned int mask_size;
9436             unsigned long *mask;
9437 
9438             /*
9439              * sched_setaffinity needs multiples of ulong, so need to take
9440              * care of mismatches between target ulong and host ulong sizes.
9441              */
9442             if (arg2 & (sizeof(abi_ulong) - 1)) {
9443                 return -TARGET_EINVAL;
9444             }
9445             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9446             mask = alloca(mask_size);
9447 
9448             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9449             if (ret) {
9450                 return ret;
9451             }
9452 
9453             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9454         }
9455     case TARGET_NR_getcpu:
9456         {
9457             unsigned cpu, node;
9458             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9459                                        arg2 ? &node : NULL,
9460                                        NULL));
9461             if (is_error(ret)) {
9462                 return ret;
9463             }
9464             if (arg1 && put_user_u32(cpu, arg1)) {
9465                 return -TARGET_EFAULT;
9466             }
9467             if (arg2 && put_user_u32(node, arg2)) {
9468                 return -TARGET_EFAULT;
9469             }
9470         }
9471         return ret;
9472     case TARGET_NR_sched_setparam:
9473         {
9474             struct sched_param *target_schp;
9475             struct sched_param schp;
9476 
9477             if (arg2 == 0) {
9478                 return -TARGET_EINVAL;
9479             }
9480             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9481                 return -TARGET_EFAULT;
9482             schp.sched_priority = tswap32(target_schp->sched_priority);
9483             unlock_user_struct(target_schp, arg2, 0);
9484             return get_errno(sched_setparam(arg1, &schp));
9485         }
9486     case TARGET_NR_sched_getparam:
9487         {
9488             struct sched_param *target_schp;
9489             struct sched_param schp;
9490 
9491             if (arg2 == 0) {
9492                 return -TARGET_EINVAL;
9493             }
9494             ret = get_errno(sched_getparam(arg1, &schp));
9495             if (!is_error(ret)) {
9496                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9497                     return -TARGET_EFAULT;
9498                 target_schp->sched_priority = tswap32(schp.sched_priority);
9499                 unlock_user_struct(target_schp, arg2, 1);
9500             }
9501         }
9502         return ret;
9503     case TARGET_NR_sched_setscheduler:
9504         {
9505             struct sched_param *target_schp;
9506             struct sched_param schp;
9507             if (arg3 == 0) {
9508                 return -TARGET_EINVAL;
9509             }
9510             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9511                 return -TARGET_EFAULT;
9512             schp.sched_priority = tswap32(target_schp->sched_priority);
9513             unlock_user_struct(target_schp, arg3, 0);
9514             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9515         }
9516     case TARGET_NR_sched_getscheduler:
9517         return get_errno(sched_getscheduler(arg1));
9518     case TARGET_NR_sched_yield:
9519         return get_errno(sched_yield());
9520     case TARGET_NR_sched_get_priority_max:
9521         return get_errno(sched_get_priority_max(arg1));
9522     case TARGET_NR_sched_get_priority_min:
9523         return get_errno(sched_get_priority_min(arg1));
9524     case TARGET_NR_sched_rr_get_interval:
9525         {
9526             struct timespec ts;
9527             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9528             if (!is_error(ret)) {
9529                 ret = host_to_target_timespec(arg2, &ts);
9530             }
9531         }
9532         return ret;
9533     case TARGET_NR_nanosleep:
9534         {
9535             struct timespec req, rem;
9536             target_to_host_timespec(&req, arg1);
9537             ret = get_errno(safe_nanosleep(&req, &rem));
9538             if (is_error(ret) && arg2) {
9539                 host_to_target_timespec(arg2, &rem);
9540             }
9541         }
9542         return ret;
9543     case TARGET_NR_prctl:
9544         switch (arg1) {
9545         case PR_GET_PDEATHSIG:
9546         {
9547             int deathsig;
9548             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9549             if (!is_error(ret) && arg2
9550                 && put_user_ual(deathsig, arg2)) {
9551                 return -TARGET_EFAULT;
9552             }
9553             return ret;
9554         }
9555 #ifdef PR_GET_NAME
9556         case PR_GET_NAME:
9557         {
9558             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9559             if (!name) {
9560                 return -TARGET_EFAULT;
9561             }
9562             ret = get_errno(prctl(arg1, (unsigned long)name,
9563                                   arg3, arg4, arg5));
9564             unlock_user(name, arg2, 16);
9565             return ret;
9566         }
9567         case PR_SET_NAME:
9568         {
9569             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9570             if (!name) {
9571                 return -TARGET_EFAULT;
9572             }
9573             ret = get_errno(prctl(arg1, (unsigned long)name,
9574                                   arg3, arg4, arg5));
9575             unlock_user(name, arg2, 0);
9576             return ret;
9577         }
9578 #endif
9579 #ifdef TARGET_MIPS
9580         case TARGET_PR_GET_FP_MODE:
9581         {
9582             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9583             ret = 0;
9584             if (env->CP0_Status & (1 << CP0St_FR)) {
9585                 ret |= TARGET_PR_FP_MODE_FR;
9586             }
9587             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9588                 ret |= TARGET_PR_FP_MODE_FRE;
9589             }
9590             return ret;
9591         }
9592         case TARGET_PR_SET_FP_MODE:
9593         {
9594             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9595             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9596             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9597             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9598             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9599 
9600             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9601                                             TARGET_PR_FP_MODE_FRE;
9602 
9603             /* If nothing to change, return right away, successfully.  */
9604             if (old_fr == new_fr && old_fre == new_fre) {
9605                 return 0;
9606             }
9607             /* Check the value is valid */
9608             if (arg2 & ~known_bits) {
9609                 return -TARGET_EOPNOTSUPP;
9610             }
9611             /* Setting FRE without FR is not supported.  */
9612             if (new_fre && !new_fr) {
9613                 return -TARGET_EOPNOTSUPP;
9614             }
9615             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9616                 /* FR1 is not supported */
9617                 return -TARGET_EOPNOTSUPP;
9618             }
9619             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9620                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9621                 /* cannot set FR=0 */
9622                 return -TARGET_EOPNOTSUPP;
9623             }
9624             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9625                 /* Cannot set FRE=1 */
9626                 return -TARGET_EOPNOTSUPP;
9627             }
9628 
9629             int i;
9630             fpr_t *fpr = env->active_fpu.fpr;
9631             for (i = 0; i < 32 ; i += 2) {
9632                 if (!old_fr && new_fr) {
9633                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9634                 } else if (old_fr && !new_fr) {
9635                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9636                 }
9637             }
9638 
9639             if (new_fr) {
9640                 env->CP0_Status |= (1 << CP0St_FR);
9641                 env->hflags |= MIPS_HFLAG_F64;
9642             } else {
9643                 env->CP0_Status &= ~(1 << CP0St_FR);
9644                 env->hflags &= ~MIPS_HFLAG_F64;
9645             }
9646             if (new_fre) {
9647                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9648                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9649                     env->hflags |= MIPS_HFLAG_FRE;
9650                 }
9651             } else {
9652                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9653                 env->hflags &= ~MIPS_HFLAG_FRE;
9654             }
9655 
9656             return 0;
9657         }
9658 #endif /* MIPS */
9659 #ifdef TARGET_AARCH64
9660         case TARGET_PR_SVE_SET_VL:
9661             /*
9662              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9663              * PR_SVE_VL_INHERIT.  Note the kernel definition
9664              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9665              * even though the current architectural maximum is VQ=16.
9666              */
9667             ret = -TARGET_EINVAL;
9668             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9669                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9670                 CPUARMState *env = cpu_env;
9671                 ARMCPU *cpu = arm_env_get_cpu(env);
9672                 uint32_t vq, old_vq;
9673 
9674                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9675                 vq = MAX(arg2 / 16, 1);
9676                 vq = MIN(vq, cpu->sve_max_vq);
9677 
9678                 if (vq < old_vq) {
9679                     aarch64_sve_narrow_vq(env, vq);
9680                 }
9681                 env->vfp.zcr_el[1] = vq - 1;
9682                 ret = vq * 16;
9683             }
9684             return ret;
9685         case TARGET_PR_SVE_GET_VL:
9686             ret = -TARGET_EINVAL;
9687             {
9688                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9689                 if (cpu_isar_feature(aa64_sve, cpu)) {
9690                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9691                 }
9692             }
9693             return ret;
9694 #endif /* AARCH64 */
9695         case PR_GET_SECCOMP:
9696         case PR_SET_SECCOMP:
9697             /* Disable seccomp to prevent the target disabling syscalls we
9698              * need. */
9699             return -TARGET_EINVAL;
9700         default:
9701             /* Most prctl options have no pointer arguments */
9702             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9703         }
9704         break;
9705 #ifdef TARGET_NR_arch_prctl
9706     case TARGET_NR_arch_prctl:
9707 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9708         return do_arch_prctl(cpu_env, arg1, arg2);
9709 #else
9710 #error unreachable
9711 #endif
9712 #endif
9713 #ifdef TARGET_NR_pread64
9714     case TARGET_NR_pread64:
9715         if (regpairs_aligned(cpu_env, num)) {
9716             arg4 = arg5;
9717             arg5 = arg6;
9718         }
9719         if (arg2 == 0 && arg3 == 0) {
9720             /* Special-case NULL buffer and zero length, which should succeed */
9721             p = 0;
9722         } else {
9723             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9724             if (!p) {
9725                 return -TARGET_EFAULT;
9726             }
9727         }
9728         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9729         unlock_user(p, arg2, ret);
9730         return ret;
9731     case TARGET_NR_pwrite64:
9732         if (regpairs_aligned(cpu_env, num)) {
9733             arg4 = arg5;
9734             arg5 = arg6;
9735         }
9736         if (arg2 == 0 && arg3 == 0) {
9737             /* Special-case NULL buffer and zero length, which should succeed */
9738             p = 0;
9739         } else {
9740             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9741             if (!p) {
9742                 return -TARGET_EFAULT;
9743             }
9744         }
9745         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9746         unlock_user(p, arg2, 0);
9747         return ret;
9748 #endif
9749     case TARGET_NR_getcwd:
9750         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9751             return -TARGET_EFAULT;
9752         ret = get_errno(sys_getcwd1(p, arg2));
9753         unlock_user(p, arg1, ret);
9754         return ret;
9755     case TARGET_NR_capget:
9756     case TARGET_NR_capset:
9757     {
9758         struct target_user_cap_header *target_header;
9759         struct target_user_cap_data *target_data = NULL;
9760         struct __user_cap_header_struct header;
9761         struct __user_cap_data_struct data[2];
9762         struct __user_cap_data_struct *dataptr = NULL;
9763         int i, target_datalen;
9764         int data_items = 1;
9765 
9766         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9767             return -TARGET_EFAULT;
9768         }
9769         header.version = tswap32(target_header->version);
9770         header.pid = tswap32(target_header->pid);
9771 
9772         if (header.version != _LINUX_CAPABILITY_VERSION) {
9773             /* Version 2 and up takes pointer to two user_data structs */
9774             data_items = 2;
9775         }
9776 
9777         target_datalen = sizeof(*target_data) * data_items;
9778 
9779         if (arg2) {
9780             if (num == TARGET_NR_capget) {
9781                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9782             } else {
9783                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9784             }
9785             if (!target_data) {
9786                 unlock_user_struct(target_header, arg1, 0);
9787                 return -TARGET_EFAULT;
9788             }
9789 
9790             if (num == TARGET_NR_capset) {
9791                 for (i = 0; i < data_items; i++) {
9792                     data[i].effective = tswap32(target_data[i].effective);
9793                     data[i].permitted = tswap32(target_data[i].permitted);
9794                     data[i].inheritable = tswap32(target_data[i].inheritable);
9795                 }
9796             }
9797 
9798             dataptr = data;
9799         }
9800 
9801         if (num == TARGET_NR_capget) {
9802             ret = get_errno(capget(&header, dataptr));
9803         } else {
9804             ret = get_errno(capset(&header, dataptr));
9805         }
9806 
9807         /* The kernel always updates version for both capget and capset */
9808         target_header->version = tswap32(header.version);
9809         unlock_user_struct(target_header, arg1, 1);
9810 
9811         if (arg2) {
9812             if (num == TARGET_NR_capget) {
9813                 for (i = 0; i < data_items; i++) {
9814                     target_data[i].effective = tswap32(data[i].effective);
9815                     target_data[i].permitted = tswap32(data[i].permitted);
9816                     target_data[i].inheritable = tswap32(data[i].inheritable);
9817                 }
9818                 unlock_user(target_data, arg2, target_datalen);
9819             } else {
9820                 unlock_user(target_data, arg2, 0);
9821             }
9822         }
9823         return ret;
9824     }
9825     case TARGET_NR_sigaltstack:
9826         return do_sigaltstack(arg1, arg2,
9827                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9828 
9829 #ifdef CONFIG_SENDFILE
9830 #ifdef TARGET_NR_sendfile
9831     case TARGET_NR_sendfile:
9832     {
9833         off_t *offp = NULL;
9834         off_t off;
9835         if (arg3) {
9836             ret = get_user_sal(off, arg3);
9837             if (is_error(ret)) {
9838                 return ret;
9839             }
9840             offp = &off;
9841         }
9842         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9843         if (!is_error(ret) && arg3) {
9844             abi_long ret2 = put_user_sal(off, arg3);
9845             if (is_error(ret2)) {
9846                 ret = ret2;
9847             }
9848         }
9849         return ret;
9850     }
9851 #endif
9852 #ifdef TARGET_NR_sendfile64
9853     case TARGET_NR_sendfile64:
9854     {
9855         off_t *offp = NULL;
9856         off_t off;
9857         if (arg3) {
9858             ret = get_user_s64(off, arg3);
9859             if (is_error(ret)) {
9860                 return ret;
9861             }
9862             offp = &off;
9863         }
9864         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9865         if (!is_error(ret) && arg3) {
9866             abi_long ret2 = put_user_s64(off, arg3);
9867             if (is_error(ret2)) {
9868                 ret = ret2;
9869             }
9870         }
9871         return ret;
9872     }
9873 #endif
9874 #endif
9875 #ifdef TARGET_NR_vfork
9876     case TARGET_NR_vfork:
9877         return get_errno(do_fork(cpu_env,
9878                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9879                          0, 0, 0, 0));
9880 #endif
9881 #ifdef TARGET_NR_ugetrlimit
9882     case TARGET_NR_ugetrlimit:
9883     {
9884 	struct rlimit rlim;
9885 	int resource = target_to_host_resource(arg1);
9886 	ret = get_errno(getrlimit(resource, &rlim));
9887 	if (!is_error(ret)) {
9888 	    struct target_rlimit *target_rlim;
9889             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9890                 return -TARGET_EFAULT;
9891 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9892 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9893             unlock_user_struct(target_rlim, arg2, 1);
9894 	}
9895         return ret;
9896     }
9897 #endif
9898 #ifdef TARGET_NR_truncate64
9899     case TARGET_NR_truncate64:
9900         if (!(p = lock_user_string(arg1)))
9901             return -TARGET_EFAULT;
9902 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9903         unlock_user(p, arg1, 0);
9904         return ret;
9905 #endif
9906 #ifdef TARGET_NR_ftruncate64
9907     case TARGET_NR_ftruncate64:
9908         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9909 #endif
9910 #ifdef TARGET_NR_stat64
9911     case TARGET_NR_stat64:
9912         if (!(p = lock_user_string(arg1))) {
9913             return -TARGET_EFAULT;
9914         }
9915         ret = get_errno(stat(path(p), &st));
9916         unlock_user(p, arg1, 0);
9917         if (!is_error(ret))
9918             ret = host_to_target_stat64(cpu_env, arg2, &st);
9919         return ret;
9920 #endif
9921 #ifdef TARGET_NR_lstat64
9922     case TARGET_NR_lstat64:
9923         if (!(p = lock_user_string(arg1))) {
9924             return -TARGET_EFAULT;
9925         }
9926         ret = get_errno(lstat(path(p), &st));
9927         unlock_user(p, arg1, 0);
9928         if (!is_error(ret))
9929             ret = host_to_target_stat64(cpu_env, arg2, &st);
9930         return ret;
9931 #endif
9932 #ifdef TARGET_NR_fstat64
9933     case TARGET_NR_fstat64:
9934         ret = get_errno(fstat(arg1, &st));
9935         if (!is_error(ret))
9936             ret = host_to_target_stat64(cpu_env, arg2, &st);
9937         return ret;
9938 #endif
9939 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9940 #ifdef TARGET_NR_fstatat64
9941     case TARGET_NR_fstatat64:
9942 #endif
9943 #ifdef TARGET_NR_newfstatat
9944     case TARGET_NR_newfstatat:
9945 #endif
9946         if (!(p = lock_user_string(arg2))) {
9947             return -TARGET_EFAULT;
9948         }
9949         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9950         unlock_user(p, arg2, 0);
9951         if (!is_error(ret))
9952             ret = host_to_target_stat64(cpu_env, arg3, &st);
9953         return ret;
9954 #endif
9955 #ifdef TARGET_NR_lchown
9956     case TARGET_NR_lchown:
9957         if (!(p = lock_user_string(arg1)))
9958             return -TARGET_EFAULT;
9959         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9960         unlock_user(p, arg1, 0);
9961         return ret;
9962 #endif
9963 #ifdef TARGET_NR_getuid
9964     case TARGET_NR_getuid:
9965         return get_errno(high2lowuid(getuid()));
9966 #endif
9967 #ifdef TARGET_NR_getgid
9968     case TARGET_NR_getgid:
9969         return get_errno(high2lowgid(getgid()));
9970 #endif
9971 #ifdef TARGET_NR_geteuid
9972     case TARGET_NR_geteuid:
9973         return get_errno(high2lowuid(geteuid()));
9974 #endif
9975 #ifdef TARGET_NR_getegid
9976     case TARGET_NR_getegid:
9977         return get_errno(high2lowgid(getegid()));
9978 #endif
9979     case TARGET_NR_setreuid:
9980         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9981     case TARGET_NR_setregid:
9982         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9983     case TARGET_NR_getgroups:
9984         {
9985             int gidsetsize = arg1;
9986             target_id *target_grouplist;
9987             gid_t *grouplist;
9988             int i;
9989 
9990             grouplist = alloca(gidsetsize * sizeof(gid_t));
9991             ret = get_errno(getgroups(gidsetsize, grouplist));
9992             if (gidsetsize == 0)
9993                 return ret;
9994             if (!is_error(ret)) {
9995                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9996                 if (!target_grouplist)
9997                     return -TARGET_EFAULT;
9998                 for(i = 0;i < ret; i++)
9999                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10000                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10001             }
10002         }
10003         return ret;
10004     case TARGET_NR_setgroups:
10005         {
10006             int gidsetsize = arg1;
10007             target_id *target_grouplist;
10008             gid_t *grouplist = NULL;
10009             int i;
10010             if (gidsetsize) {
10011                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10012                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10013                 if (!target_grouplist) {
10014                     return -TARGET_EFAULT;
10015                 }
10016                 for (i = 0; i < gidsetsize; i++) {
10017                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10018                 }
10019                 unlock_user(target_grouplist, arg2, 0);
10020             }
10021             return get_errno(setgroups(gidsetsize, grouplist));
10022         }
10023     case TARGET_NR_fchown:
10024         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10025 #if defined(TARGET_NR_fchownat)
10026     case TARGET_NR_fchownat:
10027         if (!(p = lock_user_string(arg2)))
10028             return -TARGET_EFAULT;
10029         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10030                                  low2highgid(arg4), arg5));
10031         unlock_user(p, arg2, 0);
10032         return ret;
10033 #endif
10034 #ifdef TARGET_NR_setresuid
10035     case TARGET_NR_setresuid:
10036         return get_errno(sys_setresuid(low2highuid(arg1),
10037                                        low2highuid(arg2),
10038                                        low2highuid(arg3)));
10039 #endif
10040 #ifdef TARGET_NR_getresuid
10041     case TARGET_NR_getresuid:
10042         {
10043             uid_t ruid, euid, suid;
10044             ret = get_errno(getresuid(&ruid, &euid, &suid));
10045             if (!is_error(ret)) {
10046                 if (put_user_id(high2lowuid(ruid), arg1)
10047                     || put_user_id(high2lowuid(euid), arg2)
10048                     || put_user_id(high2lowuid(suid), arg3))
10049                     return -TARGET_EFAULT;
10050             }
10051         }
10052         return ret;
10053 #endif
10054 #ifdef TARGET_NR_getresgid
10055     case TARGET_NR_setresgid:
10056         return get_errno(sys_setresgid(low2highgid(arg1),
10057                                        low2highgid(arg2),
10058                                        low2highgid(arg3)));
10059 #endif
10060 #ifdef TARGET_NR_getresgid
10061     case TARGET_NR_getresgid:
10062         {
10063             gid_t rgid, egid, sgid;
10064             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10065             if (!is_error(ret)) {
10066                 if (put_user_id(high2lowgid(rgid), arg1)
10067                     || put_user_id(high2lowgid(egid), arg2)
10068                     || put_user_id(high2lowgid(sgid), arg3))
10069                     return -TARGET_EFAULT;
10070             }
10071         }
10072         return ret;
10073 #endif
10074 #ifdef TARGET_NR_chown
10075     case TARGET_NR_chown:
10076         if (!(p = lock_user_string(arg1)))
10077             return -TARGET_EFAULT;
10078         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10079         unlock_user(p, arg1, 0);
10080         return ret;
10081 #endif
10082     case TARGET_NR_setuid:
10083         return get_errno(sys_setuid(low2highuid(arg1)));
10084     case TARGET_NR_setgid:
10085         return get_errno(sys_setgid(low2highgid(arg1)));
10086     case TARGET_NR_setfsuid:
10087         return get_errno(setfsuid(arg1));
10088     case TARGET_NR_setfsgid:
10089         return get_errno(setfsgid(arg1));
10090 
10091 #ifdef TARGET_NR_lchown32
10092     case TARGET_NR_lchown32:
10093         if (!(p = lock_user_string(arg1)))
10094             return -TARGET_EFAULT;
10095         ret = get_errno(lchown(p, arg2, arg3));
10096         unlock_user(p, arg1, 0);
10097         return ret;
10098 #endif
10099 #ifdef TARGET_NR_getuid32
10100     case TARGET_NR_getuid32:
10101         return get_errno(getuid());
10102 #endif
10103 
10104 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10105    /* Alpha specific */
10106     case TARGET_NR_getxuid:
10107          {
10108             uid_t euid;
10109             euid=geteuid();
10110             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10111          }
10112         return get_errno(getuid());
10113 #endif
10114 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10115    /* Alpha specific */
10116     case TARGET_NR_getxgid:
10117          {
10118             uid_t egid;
10119             egid=getegid();
10120             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10121          }
10122         return get_errno(getgid());
10123 #endif
10124 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10125     /* Alpha specific */
10126     case TARGET_NR_osf_getsysinfo:
10127         ret = -TARGET_EOPNOTSUPP;
10128         switch (arg1) {
10129           case TARGET_GSI_IEEE_FP_CONTROL:
10130             {
10131                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10132 
10133                 /* Copied from linux ieee_fpcr_to_swcr.  */
10134                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10135                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10136                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10137                                         | SWCR_TRAP_ENABLE_DZE
10138                                         | SWCR_TRAP_ENABLE_OVF);
10139                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10140                                         | SWCR_TRAP_ENABLE_INE);
10141                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10142                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10143 
10144                 if (put_user_u64 (swcr, arg2))
10145                         return -TARGET_EFAULT;
10146                 ret = 0;
10147             }
10148             break;
10149 
10150           /* case GSI_IEEE_STATE_AT_SIGNAL:
10151              -- Not implemented in linux kernel.
10152              case GSI_UACPROC:
10153              -- Retrieves current unaligned access state; not much used.
10154              case GSI_PROC_TYPE:
10155              -- Retrieves implver information; surely not used.
10156              case GSI_GET_HWRPB:
10157              -- Grabs a copy of the HWRPB; surely not used.
10158           */
10159         }
10160         return ret;
10161 #endif
10162 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10163     /* Alpha specific */
10164     case TARGET_NR_osf_setsysinfo:
10165         ret = -TARGET_EOPNOTSUPP;
10166         switch (arg1) {
10167           case TARGET_SSI_IEEE_FP_CONTROL:
10168             {
10169                 uint64_t swcr, fpcr, orig_fpcr;
10170 
10171                 if (get_user_u64 (swcr, arg2)) {
10172                     return -TARGET_EFAULT;
10173                 }
10174                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10175                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10176 
10177                 /* Copied from linux ieee_swcr_to_fpcr.  */
10178                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10179                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10180                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10181                                   | SWCR_TRAP_ENABLE_DZE
10182                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10183                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10184                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10185                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10186                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10187 
10188                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10189                 ret = 0;
10190             }
10191             break;
10192 
10193           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10194             {
10195                 uint64_t exc, fpcr, orig_fpcr;
10196                 int si_code;
10197 
10198                 if (get_user_u64(exc, arg2)) {
10199                     return -TARGET_EFAULT;
10200                 }
10201 
10202                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10203 
10204                 /* We only add to the exception status here.  */
10205                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10206 
10207                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10208                 ret = 0;
10209 
10210                 /* Old exceptions are not signaled.  */
10211                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10212 
10213                 /* If any exceptions set by this call,
10214                    and are unmasked, send a signal.  */
10215                 si_code = 0;
10216                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10217                     si_code = TARGET_FPE_FLTRES;
10218                 }
10219                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10220                     si_code = TARGET_FPE_FLTUND;
10221                 }
10222                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10223                     si_code = TARGET_FPE_FLTOVF;
10224                 }
10225                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10226                     si_code = TARGET_FPE_FLTDIV;
10227                 }
10228                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10229                     si_code = TARGET_FPE_FLTINV;
10230                 }
10231                 if (si_code != 0) {
10232                     target_siginfo_t info;
10233                     info.si_signo = SIGFPE;
10234                     info.si_errno = 0;
10235                     info.si_code = si_code;
10236                     info._sifields._sigfault._addr
10237                         = ((CPUArchState *)cpu_env)->pc;
10238                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10239                                  QEMU_SI_FAULT, &info);
10240                 }
10241             }
10242             break;
10243 
10244           /* case SSI_NVPAIRS:
10245              -- Used with SSIN_UACPROC to enable unaligned accesses.
10246              case SSI_IEEE_STATE_AT_SIGNAL:
10247              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10248              -- Not implemented in linux kernel
10249           */
10250         }
10251         return ret;
10252 #endif
10253 #ifdef TARGET_NR_osf_sigprocmask
10254     /* Alpha specific.  */
10255     case TARGET_NR_osf_sigprocmask:
10256         {
10257             abi_ulong mask;
10258             int how;
10259             sigset_t set, oldset;
10260 
10261             switch(arg1) {
10262             case TARGET_SIG_BLOCK:
10263                 how = SIG_BLOCK;
10264                 break;
10265             case TARGET_SIG_UNBLOCK:
10266                 how = SIG_UNBLOCK;
10267                 break;
10268             case TARGET_SIG_SETMASK:
10269                 how = SIG_SETMASK;
10270                 break;
10271             default:
10272                 return -TARGET_EINVAL;
10273             }
10274             mask = arg2;
10275             target_to_host_old_sigset(&set, &mask);
10276             ret = do_sigprocmask(how, &set, &oldset);
10277             if (!ret) {
10278                 host_to_target_old_sigset(&mask, &oldset);
10279                 ret = mask;
10280             }
10281         }
10282         return ret;
10283 #endif
10284 
10285 #ifdef TARGET_NR_getgid32
10286     case TARGET_NR_getgid32:
10287         return get_errno(getgid());
10288 #endif
10289 #ifdef TARGET_NR_geteuid32
10290     case TARGET_NR_geteuid32:
10291         return get_errno(geteuid());
10292 #endif
10293 #ifdef TARGET_NR_getegid32
10294     case TARGET_NR_getegid32:
10295         return get_errno(getegid());
10296 #endif
10297 #ifdef TARGET_NR_setreuid32
10298     case TARGET_NR_setreuid32:
10299         return get_errno(setreuid(arg1, arg2));
10300 #endif
10301 #ifdef TARGET_NR_setregid32
10302     case TARGET_NR_setregid32:
10303         return get_errno(setregid(arg1, arg2));
10304 #endif
10305 #ifdef TARGET_NR_getgroups32
10306     case TARGET_NR_getgroups32:
10307         {
10308             int gidsetsize = arg1;
10309             uint32_t *target_grouplist;
10310             gid_t *grouplist;
10311             int i;
10312 
10313             grouplist = alloca(gidsetsize * sizeof(gid_t));
10314             ret = get_errno(getgroups(gidsetsize, grouplist));
10315             if (gidsetsize == 0)
10316                 return ret;
10317             if (!is_error(ret)) {
10318                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10319                 if (!target_grouplist) {
10320                     return -TARGET_EFAULT;
10321                 }
10322                 for(i = 0;i < ret; i++)
10323                     target_grouplist[i] = tswap32(grouplist[i]);
10324                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10325             }
10326         }
10327         return ret;
10328 #endif
10329 #ifdef TARGET_NR_setgroups32
10330     case TARGET_NR_setgroups32:
10331         {
10332             int gidsetsize = arg1;
10333             uint32_t *target_grouplist;
10334             gid_t *grouplist;
10335             int i;
10336 
10337             grouplist = alloca(gidsetsize * sizeof(gid_t));
10338             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10339             if (!target_grouplist) {
10340                 return -TARGET_EFAULT;
10341             }
10342             for(i = 0;i < gidsetsize; i++)
10343                 grouplist[i] = tswap32(target_grouplist[i]);
10344             unlock_user(target_grouplist, arg2, 0);
10345             return get_errno(setgroups(gidsetsize, grouplist));
10346         }
10347 #endif
10348 #ifdef TARGET_NR_fchown32
10349     case TARGET_NR_fchown32:
10350         return get_errno(fchown(arg1, arg2, arg3));
10351 #endif
10352 #ifdef TARGET_NR_setresuid32
10353     case TARGET_NR_setresuid32:
10354         return get_errno(sys_setresuid(arg1, arg2, arg3));
10355 #endif
10356 #ifdef TARGET_NR_getresuid32
10357     case TARGET_NR_getresuid32:
10358         {
10359             uid_t ruid, euid, suid;
10360             ret = get_errno(getresuid(&ruid, &euid, &suid));
10361             if (!is_error(ret)) {
10362                 if (put_user_u32(ruid, arg1)
10363                     || put_user_u32(euid, arg2)
10364                     || put_user_u32(suid, arg3))
10365                     return -TARGET_EFAULT;
10366             }
10367         }
10368         return ret;
10369 #endif
10370 #ifdef TARGET_NR_setresgid32
10371     case TARGET_NR_setresgid32:
10372         return get_errno(sys_setresgid(arg1, arg2, arg3));
10373 #endif
10374 #ifdef TARGET_NR_getresgid32
10375     case TARGET_NR_getresgid32:
10376         {
10377             gid_t rgid, egid, sgid;
10378             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10379             if (!is_error(ret)) {
10380                 if (put_user_u32(rgid, arg1)
10381                     || put_user_u32(egid, arg2)
10382                     || put_user_u32(sgid, arg3))
10383                     return -TARGET_EFAULT;
10384             }
10385         }
10386         return ret;
10387 #endif
10388 #ifdef TARGET_NR_chown32
10389     case TARGET_NR_chown32:
10390         if (!(p = lock_user_string(arg1)))
10391             return -TARGET_EFAULT;
10392         ret = get_errno(chown(p, arg2, arg3));
10393         unlock_user(p, arg1, 0);
10394         return ret;
10395 #endif
10396 #ifdef TARGET_NR_setuid32
10397     case TARGET_NR_setuid32:
10398         return get_errno(sys_setuid(arg1));
10399 #endif
10400 #ifdef TARGET_NR_setgid32
10401     case TARGET_NR_setgid32:
10402         return get_errno(sys_setgid(arg1));
10403 #endif
10404 #ifdef TARGET_NR_setfsuid32
10405     case TARGET_NR_setfsuid32:
10406         return get_errno(setfsuid(arg1));
10407 #endif
10408 #ifdef TARGET_NR_setfsgid32
10409     case TARGET_NR_setfsgid32:
10410         return get_errno(setfsgid(arg1));
10411 #endif
10412 #ifdef TARGET_NR_mincore
10413     case TARGET_NR_mincore:
10414         {
10415             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10416             if (!a) {
10417                 return -TARGET_ENOMEM;
10418             }
10419             p = lock_user_string(arg3);
10420             if (!p) {
10421                 ret = -TARGET_EFAULT;
10422             } else {
10423                 ret = get_errno(mincore(a, arg2, p));
10424                 unlock_user(p, arg3, ret);
10425             }
10426             unlock_user(a, arg1, 0);
10427         }
10428         return ret;
10429 #endif
10430 #ifdef TARGET_NR_arm_fadvise64_64
10431     case TARGET_NR_arm_fadvise64_64:
10432         /* arm_fadvise64_64 looks like fadvise64_64 but
10433          * with different argument order: fd, advice, offset, len
10434          * rather than the usual fd, offset, len, advice.
10435          * Note that offset and len are both 64-bit so appear as
10436          * pairs of 32-bit registers.
10437          */
10438         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10439                             target_offset64(arg5, arg6), arg2);
10440         return -host_to_target_errno(ret);
10441 #endif
10442 
10443 #if TARGET_ABI_BITS == 32
10444 
10445 #ifdef TARGET_NR_fadvise64_64
10446     case TARGET_NR_fadvise64_64:
10447 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10448         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10449         ret = arg2;
10450         arg2 = arg3;
10451         arg3 = arg4;
10452         arg4 = arg5;
10453         arg5 = arg6;
10454         arg6 = ret;
10455 #else
10456         /* 6 args: fd, offset (high, low), len (high, low), advice */
10457         if (regpairs_aligned(cpu_env, num)) {
10458             /* offset is in (3,4), len in (5,6) and advice in 7 */
10459             arg2 = arg3;
10460             arg3 = arg4;
10461             arg4 = arg5;
10462             arg5 = arg6;
10463             arg6 = arg7;
10464         }
10465 #endif
10466         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10467                             target_offset64(arg4, arg5), arg6);
10468         return -host_to_target_errno(ret);
10469 #endif
10470 
10471 #ifdef TARGET_NR_fadvise64
10472     case TARGET_NR_fadvise64:
10473         /* 5 args: fd, offset (high, low), len, advice */
10474         if (regpairs_aligned(cpu_env, num)) {
10475             /* offset is in (3,4), len in 5 and advice in 6 */
10476             arg2 = arg3;
10477             arg3 = arg4;
10478             arg4 = arg5;
10479             arg5 = arg6;
10480         }
10481         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10482         return -host_to_target_errno(ret);
10483 #endif
10484 
10485 #else /* not a 32-bit ABI */
10486 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10487 #ifdef TARGET_NR_fadvise64_64
10488     case TARGET_NR_fadvise64_64:
10489 #endif
10490 #ifdef TARGET_NR_fadvise64
10491     case TARGET_NR_fadvise64:
10492 #endif
10493 #ifdef TARGET_S390X
10494         switch (arg4) {
10495         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10496         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10497         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10498         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10499         default: break;
10500         }
10501 #endif
10502         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10503 #endif
10504 #endif /* end of 64-bit ABI fadvise handling */
10505 
10506 #ifdef TARGET_NR_madvise
10507     case TARGET_NR_madvise:
10508         /* A straight passthrough may not be safe because qemu sometimes
10509            turns private file-backed mappings into anonymous mappings.
10510            This will break MADV_DONTNEED.
10511            This is a hint, so ignoring and returning success is ok.  */
10512         return 0;
10513 #endif
10514 #if TARGET_ABI_BITS == 32
10515     case TARGET_NR_fcntl64:
10516     {
10517 	int cmd;
10518 	struct flock64 fl;
10519         from_flock64_fn *copyfrom = copy_from_user_flock64;
10520         to_flock64_fn *copyto = copy_to_user_flock64;
10521 
10522 #ifdef TARGET_ARM
10523         if (!((CPUARMState *)cpu_env)->eabi) {
10524             copyfrom = copy_from_user_oabi_flock64;
10525             copyto = copy_to_user_oabi_flock64;
10526         }
10527 #endif
10528 
10529 	cmd = target_to_host_fcntl_cmd(arg2);
10530         if (cmd == -TARGET_EINVAL) {
10531             return cmd;
10532         }
10533 
10534         switch(arg2) {
10535         case TARGET_F_GETLK64:
10536             ret = copyfrom(&fl, arg3);
10537             if (ret) {
10538                 break;
10539             }
10540             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10541             if (ret == 0) {
10542                 ret = copyto(arg3, &fl);
10543             }
10544 	    break;
10545 
10546         case TARGET_F_SETLK64:
10547         case TARGET_F_SETLKW64:
10548             ret = copyfrom(&fl, arg3);
10549             if (ret) {
10550                 break;
10551             }
10552             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10553 	    break;
10554         default:
10555             ret = do_fcntl(arg1, arg2, arg3);
10556             break;
10557         }
10558         return ret;
10559     }
10560 #endif
10561 #ifdef TARGET_NR_cacheflush
10562     case TARGET_NR_cacheflush:
10563         /* self-modifying code is handled automatically, so nothing needed */
10564         return 0;
10565 #endif
10566 #ifdef TARGET_NR_getpagesize
10567     case TARGET_NR_getpagesize:
10568         return TARGET_PAGE_SIZE;
10569 #endif
10570     case TARGET_NR_gettid:
10571         return get_errno(gettid());
10572 #ifdef TARGET_NR_readahead
10573     case TARGET_NR_readahead:
10574 #if TARGET_ABI_BITS == 32
10575         if (regpairs_aligned(cpu_env, num)) {
10576             arg2 = arg3;
10577             arg3 = arg4;
10578             arg4 = arg5;
10579         }
10580         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10581 #else
10582         ret = get_errno(readahead(arg1, arg2, arg3));
10583 #endif
10584         return ret;
10585 #endif
10586 #ifdef CONFIG_ATTR
10587 #ifdef TARGET_NR_setxattr
10588     case TARGET_NR_listxattr:
10589     case TARGET_NR_llistxattr:
10590     {
10591         void *p, *b = 0;
10592         if (arg2) {
10593             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10594             if (!b) {
10595                 return -TARGET_EFAULT;
10596             }
10597         }
10598         p = lock_user_string(arg1);
10599         if (p) {
10600             if (num == TARGET_NR_listxattr) {
10601                 ret = get_errno(listxattr(p, b, arg3));
10602             } else {
10603                 ret = get_errno(llistxattr(p, b, arg3));
10604             }
10605         } else {
10606             ret = -TARGET_EFAULT;
10607         }
10608         unlock_user(p, arg1, 0);
10609         unlock_user(b, arg2, arg3);
10610         return ret;
10611     }
10612     case TARGET_NR_flistxattr:
10613     {
10614         void *b = 0;
10615         if (arg2) {
10616             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10617             if (!b) {
10618                 return -TARGET_EFAULT;
10619             }
10620         }
10621         ret = get_errno(flistxattr(arg1, b, arg3));
10622         unlock_user(b, arg2, arg3);
10623         return ret;
10624     }
10625     case TARGET_NR_setxattr:
10626     case TARGET_NR_lsetxattr:
10627         {
10628             void *p, *n, *v = 0;
10629             if (arg3) {
10630                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10631                 if (!v) {
10632                     return -TARGET_EFAULT;
10633                 }
10634             }
10635             p = lock_user_string(arg1);
10636             n = lock_user_string(arg2);
10637             if (p && n) {
10638                 if (num == TARGET_NR_setxattr) {
10639                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10640                 } else {
10641                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10642                 }
10643             } else {
10644                 ret = -TARGET_EFAULT;
10645             }
10646             unlock_user(p, arg1, 0);
10647             unlock_user(n, arg2, 0);
10648             unlock_user(v, arg3, 0);
10649         }
10650         return ret;
10651     case TARGET_NR_fsetxattr:
10652         {
10653             void *n, *v = 0;
10654             if (arg3) {
10655                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10656                 if (!v) {
10657                     return -TARGET_EFAULT;
10658                 }
10659             }
10660             n = lock_user_string(arg2);
10661             if (n) {
10662                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10663             } else {
10664                 ret = -TARGET_EFAULT;
10665             }
10666             unlock_user(n, arg2, 0);
10667             unlock_user(v, arg3, 0);
10668         }
10669         return ret;
10670     case TARGET_NR_getxattr:
10671     case TARGET_NR_lgetxattr:
10672         {
10673             void *p, *n, *v = 0;
10674             if (arg3) {
10675                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10676                 if (!v) {
10677                     return -TARGET_EFAULT;
10678                 }
10679             }
10680             p = lock_user_string(arg1);
10681             n = lock_user_string(arg2);
10682             if (p && n) {
10683                 if (num == TARGET_NR_getxattr) {
10684                     ret = get_errno(getxattr(p, n, v, arg4));
10685                 } else {
10686                     ret = get_errno(lgetxattr(p, n, v, arg4));
10687                 }
10688             } else {
10689                 ret = -TARGET_EFAULT;
10690             }
10691             unlock_user(p, arg1, 0);
10692             unlock_user(n, arg2, 0);
10693             unlock_user(v, arg3, arg4);
10694         }
10695         return ret;
10696     case TARGET_NR_fgetxattr:
10697         {
10698             void *n, *v = 0;
10699             if (arg3) {
10700                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10701                 if (!v) {
10702                     return -TARGET_EFAULT;
10703                 }
10704             }
10705             n = lock_user_string(arg2);
10706             if (n) {
10707                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10708             } else {
10709                 ret = -TARGET_EFAULT;
10710             }
10711             unlock_user(n, arg2, 0);
10712             unlock_user(v, arg3, arg4);
10713         }
10714         return ret;
10715     case TARGET_NR_removexattr:
10716     case TARGET_NR_lremovexattr:
10717         {
10718             void *p, *n;
10719             p = lock_user_string(arg1);
10720             n = lock_user_string(arg2);
10721             if (p && n) {
10722                 if (num == TARGET_NR_removexattr) {
10723                     ret = get_errno(removexattr(p, n));
10724                 } else {
10725                     ret = get_errno(lremovexattr(p, n));
10726                 }
10727             } else {
10728                 ret = -TARGET_EFAULT;
10729             }
10730             unlock_user(p, arg1, 0);
10731             unlock_user(n, arg2, 0);
10732         }
10733         return ret;
10734     case TARGET_NR_fremovexattr:
10735         {
10736             void *n;
10737             n = lock_user_string(arg2);
10738             if (n) {
10739                 ret = get_errno(fremovexattr(arg1, n));
10740             } else {
10741                 ret = -TARGET_EFAULT;
10742             }
10743             unlock_user(n, arg2, 0);
10744         }
10745         return ret;
10746 #endif
10747 #endif /* CONFIG_ATTR */
10748 #ifdef TARGET_NR_set_thread_area
10749     case TARGET_NR_set_thread_area:
10750 #if defined(TARGET_MIPS)
10751       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10752       return 0;
10753 #elif defined(TARGET_CRIS)
10754       if (arg1 & 0xff)
10755           ret = -TARGET_EINVAL;
10756       else {
10757           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10758           ret = 0;
10759       }
10760       return ret;
10761 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10762       return do_set_thread_area(cpu_env, arg1);
10763 #elif defined(TARGET_M68K)
10764       {
10765           TaskState *ts = cpu->opaque;
10766           ts->tp_value = arg1;
10767           return 0;
10768       }
10769 #else
10770       return -TARGET_ENOSYS;
10771 #endif
10772 #endif
10773 #ifdef TARGET_NR_get_thread_area
10774     case TARGET_NR_get_thread_area:
10775 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10776         return do_get_thread_area(cpu_env, arg1);
10777 #elif defined(TARGET_M68K)
10778         {
10779             TaskState *ts = cpu->opaque;
10780             return ts->tp_value;
10781         }
10782 #else
10783         return -TARGET_ENOSYS;
10784 #endif
10785 #endif
10786 #ifdef TARGET_NR_getdomainname
10787     case TARGET_NR_getdomainname:
10788         return -TARGET_ENOSYS;
10789 #endif
10790 
10791 #ifdef TARGET_NR_clock_settime
10792     case TARGET_NR_clock_settime:
10793     {
10794         struct timespec ts;
10795 
10796         ret = target_to_host_timespec(&ts, arg2);
10797         if (!is_error(ret)) {
10798             ret = get_errno(clock_settime(arg1, &ts));
10799         }
10800         return ret;
10801     }
10802 #endif
10803 #ifdef TARGET_NR_clock_gettime
10804     case TARGET_NR_clock_gettime:
10805     {
10806         struct timespec ts;
10807         ret = get_errno(clock_gettime(arg1, &ts));
10808         if (!is_error(ret)) {
10809             ret = host_to_target_timespec(arg2, &ts);
10810         }
10811         return ret;
10812     }
10813 #endif
10814 #ifdef TARGET_NR_clock_getres
10815     case TARGET_NR_clock_getres:
10816     {
10817         struct timespec ts;
10818         ret = get_errno(clock_getres(arg1, &ts));
10819         if (!is_error(ret)) {
10820             host_to_target_timespec(arg2, &ts);
10821         }
10822         return ret;
10823     }
10824 #endif
10825 #ifdef TARGET_NR_clock_nanosleep
10826     case TARGET_NR_clock_nanosleep:
10827     {
10828         struct timespec ts;
10829         target_to_host_timespec(&ts, arg3);
10830         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10831                                              &ts, arg4 ? &ts : NULL));
10832         if (arg4)
10833             host_to_target_timespec(arg4, &ts);
10834 
10835 #if defined(TARGET_PPC)
10836         /* clock_nanosleep is odd in that it returns positive errno values.
10837          * On PPC, CR0 bit 3 should be set in such a situation. */
10838         if (ret && ret != -TARGET_ERESTARTSYS) {
10839             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10840         }
10841 #endif
10842         return ret;
10843     }
10844 #endif
10845 
10846 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10847     case TARGET_NR_set_tid_address:
10848         return get_errno(set_tid_address((int *)g2h(arg1)));
10849 #endif
10850 
10851     case TARGET_NR_tkill:
10852         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10853 
10854     case TARGET_NR_tgkill:
10855         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10856                          target_to_host_signal(arg3)));
10857 
10858 #ifdef TARGET_NR_set_robust_list
10859     case TARGET_NR_set_robust_list:
10860     case TARGET_NR_get_robust_list:
10861         /* The ABI for supporting robust futexes has userspace pass
10862          * the kernel a pointer to a linked list which is updated by
10863          * userspace after the syscall; the list is walked by the kernel
10864          * when the thread exits. Since the linked list in QEMU guest
10865          * memory isn't a valid linked list for the host and we have
10866          * no way to reliably intercept the thread-death event, we can't
10867          * support these. Silently return ENOSYS so that guest userspace
10868          * falls back to a non-robust futex implementation (which should
10869          * be OK except in the corner case of the guest crashing while
10870          * holding a mutex that is shared with another process via
10871          * shared memory).
10872          */
10873         return -TARGET_ENOSYS;
10874 #endif
10875 
10876 #if defined(TARGET_NR_utimensat)
10877     case TARGET_NR_utimensat:
10878         {
10879             struct timespec *tsp, ts[2];
10880             if (!arg3) {
10881                 tsp = NULL;
10882             } else {
10883                 target_to_host_timespec(ts, arg3);
10884                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10885                 tsp = ts;
10886             }
10887             if (!arg2)
10888                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10889             else {
10890                 if (!(p = lock_user_string(arg2))) {
10891                     return -TARGET_EFAULT;
10892                 }
10893                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10894                 unlock_user(p, arg2, 0);
10895             }
10896         }
10897         return ret;
10898 #endif
10899     case TARGET_NR_futex:
10900         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10901 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10902     case TARGET_NR_inotify_init:
10903         ret = get_errno(sys_inotify_init());
10904         if (ret >= 0) {
10905             fd_trans_register(ret, &target_inotify_trans);
10906         }
10907         return ret;
10908 #endif
10909 #ifdef CONFIG_INOTIFY1
10910 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10911     case TARGET_NR_inotify_init1:
10912         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10913                                           fcntl_flags_tbl)));
10914         if (ret >= 0) {
10915             fd_trans_register(ret, &target_inotify_trans);
10916         }
10917         return ret;
10918 #endif
10919 #endif
10920 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10921     case TARGET_NR_inotify_add_watch:
10922         p = lock_user_string(arg2);
10923         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10924         unlock_user(p, arg2, 0);
10925         return ret;
10926 #endif
10927 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10928     case TARGET_NR_inotify_rm_watch:
10929         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10930 #endif
10931 
10932 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10933     case TARGET_NR_mq_open:
10934         {
10935             struct mq_attr posix_mq_attr;
10936             struct mq_attr *pposix_mq_attr;
10937             int host_flags;
10938 
10939             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10940             pposix_mq_attr = NULL;
10941             if (arg4) {
10942                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10943                     return -TARGET_EFAULT;
10944                 }
10945                 pposix_mq_attr = &posix_mq_attr;
10946             }
10947             p = lock_user_string(arg1 - 1);
10948             if (!p) {
10949                 return -TARGET_EFAULT;
10950             }
10951             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
10952             unlock_user (p, arg1, 0);
10953         }
10954         return ret;
10955 
10956     case TARGET_NR_mq_unlink:
10957         p = lock_user_string(arg1 - 1);
10958         if (!p) {
10959             return -TARGET_EFAULT;
10960         }
10961         ret = get_errno(mq_unlink(p));
10962         unlock_user (p, arg1, 0);
10963         return ret;
10964 
10965     case TARGET_NR_mq_timedsend:
10966         {
10967             struct timespec ts;
10968 
10969             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10970             if (arg5 != 0) {
10971                 target_to_host_timespec(&ts, arg5);
10972                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10973                 host_to_target_timespec(arg5, &ts);
10974             } else {
10975                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10976             }
10977             unlock_user (p, arg2, arg3);
10978         }
10979         return ret;
10980 
10981     case TARGET_NR_mq_timedreceive:
10982         {
10983             struct timespec ts;
10984             unsigned int prio;
10985 
10986             p = lock_user (VERIFY_READ, arg2, arg3, 1);
10987             if (arg5 != 0) {
10988                 target_to_host_timespec(&ts, arg5);
10989                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10990                                                      &prio, &ts));
10991                 host_to_target_timespec(arg5, &ts);
10992             } else {
10993                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10994                                                      &prio, NULL));
10995             }
10996             unlock_user (p, arg2, arg3);
10997             if (arg4 != 0)
10998                 put_user_u32(prio, arg4);
10999         }
11000         return ret;
11001 
11002     /* Not implemented for now... */
11003 /*     case TARGET_NR_mq_notify: */
11004 /*         break; */
11005 
11006     case TARGET_NR_mq_getsetattr:
11007         {
11008             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11009             ret = 0;
11010             if (arg2 != 0) {
11011                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11012                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11013                                            &posix_mq_attr_out));
11014             } else if (arg3 != 0) {
11015                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11016             }
11017             if (ret == 0 && arg3 != 0) {
11018                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11019             }
11020         }
11021         return ret;
11022 #endif
11023 
11024 #ifdef CONFIG_SPLICE
11025 #ifdef TARGET_NR_tee
11026     case TARGET_NR_tee:
11027         {
11028             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11029         }
11030         return ret;
11031 #endif
11032 #ifdef TARGET_NR_splice
11033     case TARGET_NR_splice:
11034         {
11035             loff_t loff_in, loff_out;
11036             loff_t *ploff_in = NULL, *ploff_out = NULL;
11037             if (arg2) {
11038                 if (get_user_u64(loff_in, arg2)) {
11039                     return -TARGET_EFAULT;
11040                 }
11041                 ploff_in = &loff_in;
11042             }
11043             if (arg4) {
11044                 if (get_user_u64(loff_out, arg4)) {
11045                     return -TARGET_EFAULT;
11046                 }
11047                 ploff_out = &loff_out;
11048             }
11049             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11050             if (arg2) {
11051                 if (put_user_u64(loff_in, arg2)) {
11052                     return -TARGET_EFAULT;
11053                 }
11054             }
11055             if (arg4) {
11056                 if (put_user_u64(loff_out, arg4)) {
11057                     return -TARGET_EFAULT;
11058                 }
11059             }
11060         }
11061         return ret;
11062 #endif
11063 #ifdef TARGET_NR_vmsplice
11064 	case TARGET_NR_vmsplice:
11065         {
11066             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11067             if (vec != NULL) {
11068                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11069                 unlock_iovec(vec, arg2, arg3, 0);
11070             } else {
11071                 ret = -host_to_target_errno(errno);
11072             }
11073         }
11074         return ret;
11075 #endif
11076 #endif /* CONFIG_SPLICE */
11077 #ifdef CONFIG_EVENTFD
11078 #if defined(TARGET_NR_eventfd)
11079     case TARGET_NR_eventfd:
11080         ret = get_errno(eventfd(arg1, 0));
11081         if (ret >= 0) {
11082             fd_trans_register(ret, &target_eventfd_trans);
11083         }
11084         return ret;
11085 #endif
11086 #if defined(TARGET_NR_eventfd2)
11087     case TARGET_NR_eventfd2:
11088     {
11089         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11090         if (arg2 & TARGET_O_NONBLOCK) {
11091             host_flags |= O_NONBLOCK;
11092         }
11093         if (arg2 & TARGET_O_CLOEXEC) {
11094             host_flags |= O_CLOEXEC;
11095         }
11096         ret = get_errno(eventfd(arg1, host_flags));
11097         if (ret >= 0) {
11098             fd_trans_register(ret, &target_eventfd_trans);
11099         }
11100         return ret;
11101     }
11102 #endif
11103 #endif /* CONFIG_EVENTFD  */
11104 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11105     case TARGET_NR_fallocate:
11106 #if TARGET_ABI_BITS == 32
11107         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11108                                   target_offset64(arg5, arg6)));
11109 #else
11110         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11111 #endif
11112         return ret;
11113 #endif
11114 #if defined(CONFIG_SYNC_FILE_RANGE)
11115 #if defined(TARGET_NR_sync_file_range)
11116     case TARGET_NR_sync_file_range:
11117 #if TARGET_ABI_BITS == 32
11118 #if defined(TARGET_MIPS)
11119         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11120                                         target_offset64(arg5, arg6), arg7));
11121 #else
11122         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11123                                         target_offset64(arg4, arg5), arg6));
11124 #endif /* !TARGET_MIPS */
11125 #else
11126         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11127 #endif
11128         return ret;
11129 #endif
11130 #if defined(TARGET_NR_sync_file_range2)
11131     case TARGET_NR_sync_file_range2:
11132         /* This is like sync_file_range but the arguments are reordered */
11133 #if TARGET_ABI_BITS == 32
11134         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11135                                         target_offset64(arg5, arg6), arg2));
11136 #else
11137         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11138 #endif
11139         return ret;
11140 #endif
11141 #endif
11142 #if defined(TARGET_NR_signalfd4)
11143     case TARGET_NR_signalfd4:
11144         return do_signalfd4(arg1, arg2, arg4);
11145 #endif
11146 #if defined(TARGET_NR_signalfd)
11147     case TARGET_NR_signalfd:
11148         return do_signalfd4(arg1, arg2, 0);
11149 #endif
11150 #if defined(CONFIG_EPOLL)
11151 #if defined(TARGET_NR_epoll_create)
11152     case TARGET_NR_epoll_create:
11153         return get_errno(epoll_create(arg1));
11154 #endif
11155 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11156     case TARGET_NR_epoll_create1:
11157         return get_errno(epoll_create1(arg1));
11158 #endif
11159 #if defined(TARGET_NR_epoll_ctl)
11160     case TARGET_NR_epoll_ctl:
11161     {
11162         struct epoll_event ep;
11163         struct epoll_event *epp = 0;
11164         if (arg4) {
11165             struct target_epoll_event *target_ep;
11166             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11167                 return -TARGET_EFAULT;
11168             }
11169             ep.events = tswap32(target_ep->events);
11170             /* The epoll_data_t union is just opaque data to the kernel,
11171              * so we transfer all 64 bits across and need not worry what
11172              * actual data type it is.
11173              */
11174             ep.data.u64 = tswap64(target_ep->data.u64);
11175             unlock_user_struct(target_ep, arg4, 0);
11176             epp = &ep;
11177         }
11178         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11179     }
11180 #endif
11181 
11182 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11183 #if defined(TARGET_NR_epoll_wait)
11184     case TARGET_NR_epoll_wait:
11185 #endif
11186 #if defined(TARGET_NR_epoll_pwait)
11187     case TARGET_NR_epoll_pwait:
11188 #endif
11189     {
11190         struct target_epoll_event *target_ep;
11191         struct epoll_event *ep;
11192         int epfd = arg1;
11193         int maxevents = arg3;
11194         int timeout = arg4;
11195 
11196         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11197             return -TARGET_EINVAL;
11198         }
11199 
11200         target_ep = lock_user(VERIFY_WRITE, arg2,
11201                               maxevents * sizeof(struct target_epoll_event), 1);
11202         if (!target_ep) {
11203             return -TARGET_EFAULT;
11204         }
11205 
11206         ep = g_try_new(struct epoll_event, maxevents);
11207         if (!ep) {
11208             unlock_user(target_ep, arg2, 0);
11209             return -TARGET_ENOMEM;
11210         }
11211 
11212         switch (num) {
11213 #if defined(TARGET_NR_epoll_pwait)
11214         case TARGET_NR_epoll_pwait:
11215         {
11216             target_sigset_t *target_set;
11217             sigset_t _set, *set = &_set;
11218 
11219             if (arg5) {
11220                 if (arg6 != sizeof(target_sigset_t)) {
11221                     ret = -TARGET_EINVAL;
11222                     break;
11223                 }
11224 
11225                 target_set = lock_user(VERIFY_READ, arg5,
11226                                        sizeof(target_sigset_t), 1);
11227                 if (!target_set) {
11228                     ret = -TARGET_EFAULT;
11229                     break;
11230                 }
11231                 target_to_host_sigset(set, target_set);
11232                 unlock_user(target_set, arg5, 0);
11233             } else {
11234                 set = NULL;
11235             }
11236 
11237             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11238                                              set, SIGSET_T_SIZE));
11239             break;
11240         }
11241 #endif
11242 #if defined(TARGET_NR_epoll_wait)
11243         case TARGET_NR_epoll_wait:
11244             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11245                                              NULL, 0));
11246             break;
11247 #endif
11248         default:
11249             ret = -TARGET_ENOSYS;
11250         }
11251         if (!is_error(ret)) {
11252             int i;
11253             for (i = 0; i < ret; i++) {
11254                 target_ep[i].events = tswap32(ep[i].events);
11255                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11256             }
11257             unlock_user(target_ep, arg2,
11258                         ret * sizeof(struct target_epoll_event));
11259         } else {
11260             unlock_user(target_ep, arg2, 0);
11261         }
11262         g_free(ep);
11263         return ret;
11264     }
11265 #endif
11266 #endif
11267 #ifdef TARGET_NR_prlimit64
11268     case TARGET_NR_prlimit64:
11269     {
11270         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11271         struct target_rlimit64 *target_rnew, *target_rold;
11272         struct host_rlimit64 rnew, rold, *rnewp = 0;
11273         int resource = target_to_host_resource(arg2);
11274         if (arg3) {
11275             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11276                 return -TARGET_EFAULT;
11277             }
11278             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11279             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11280             unlock_user_struct(target_rnew, arg3, 0);
11281             rnewp = &rnew;
11282         }
11283 
11284         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11285         if (!is_error(ret) && arg4) {
11286             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11287                 return -TARGET_EFAULT;
11288             }
11289             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11290             target_rold->rlim_max = tswap64(rold.rlim_max);
11291             unlock_user_struct(target_rold, arg4, 1);
11292         }
11293         return ret;
11294     }
11295 #endif
11296 #ifdef TARGET_NR_gethostname
11297     case TARGET_NR_gethostname:
11298     {
11299         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11300         if (name) {
11301             ret = get_errno(gethostname(name, arg2));
11302             unlock_user(name, arg1, arg2);
11303         } else {
11304             ret = -TARGET_EFAULT;
11305         }
11306         return ret;
11307     }
11308 #endif
11309 #ifdef TARGET_NR_atomic_cmpxchg_32
11310     case TARGET_NR_atomic_cmpxchg_32:
11311     {
11312         /* should use start_exclusive from main.c */
11313         abi_ulong mem_value;
11314         if (get_user_u32(mem_value, arg6)) {
11315             target_siginfo_t info;
11316             info.si_signo = SIGSEGV;
11317             info.si_errno = 0;
11318             info.si_code = TARGET_SEGV_MAPERR;
11319             info._sifields._sigfault._addr = arg6;
11320             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11321                          QEMU_SI_FAULT, &info);
11322             ret = 0xdeadbeef;
11323 
11324         }
11325         if (mem_value == arg2)
11326             put_user_u32(arg1, arg6);
11327         return mem_value;
11328     }
11329 #endif
11330 #ifdef TARGET_NR_atomic_barrier
11331     case TARGET_NR_atomic_barrier:
11332         /* Like the kernel implementation and the
11333            qemu arm barrier, no-op this? */
11334         return 0;
11335 #endif
11336 
11337 #ifdef TARGET_NR_timer_create
11338     case TARGET_NR_timer_create:
11339     {
11340         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11341 
11342         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11343 
11344         int clkid = arg1;
11345         int timer_index = next_free_host_timer();
11346 
11347         if (timer_index < 0) {
11348             ret = -TARGET_EAGAIN;
11349         } else {
11350             timer_t *phtimer = g_posix_timers  + timer_index;
11351 
11352             if (arg2) {
11353                 phost_sevp = &host_sevp;
11354                 ret = target_to_host_sigevent(phost_sevp, arg2);
11355                 if (ret != 0) {
11356                     return ret;
11357                 }
11358             }
11359 
11360             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11361             if (ret) {
11362                 phtimer = NULL;
11363             } else {
11364                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11365                     return -TARGET_EFAULT;
11366                 }
11367             }
11368         }
11369         return ret;
11370     }
11371 #endif
11372 
11373 #ifdef TARGET_NR_timer_settime
11374     case TARGET_NR_timer_settime:
11375     {
11376         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11377          * struct itimerspec * old_value */
11378         target_timer_t timerid = get_timer_id(arg1);
11379 
11380         if (timerid < 0) {
11381             ret = timerid;
11382         } else if (arg3 == 0) {
11383             ret = -TARGET_EINVAL;
11384         } else {
11385             timer_t htimer = g_posix_timers[timerid];
11386             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11387 
11388             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11389                 return -TARGET_EFAULT;
11390             }
11391             ret = get_errno(
11392                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11393             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11394                 return -TARGET_EFAULT;
11395             }
11396         }
11397         return ret;
11398     }
11399 #endif
11400 
11401 #ifdef TARGET_NR_timer_gettime
11402     case TARGET_NR_timer_gettime:
11403     {
11404         /* args: timer_t timerid, struct itimerspec *curr_value */
11405         target_timer_t timerid = get_timer_id(arg1);
11406 
11407         if (timerid < 0) {
11408             ret = timerid;
11409         } else if (!arg2) {
11410             ret = -TARGET_EFAULT;
11411         } else {
11412             timer_t htimer = g_posix_timers[timerid];
11413             struct itimerspec hspec;
11414             ret = get_errno(timer_gettime(htimer, &hspec));
11415 
11416             if (host_to_target_itimerspec(arg2, &hspec)) {
11417                 ret = -TARGET_EFAULT;
11418             }
11419         }
11420         return ret;
11421     }
11422 #endif
11423 
11424 #ifdef TARGET_NR_timer_getoverrun
11425     case TARGET_NR_timer_getoverrun:
11426     {
11427         /* args: timer_t timerid */
11428         target_timer_t timerid = get_timer_id(arg1);
11429 
11430         if (timerid < 0) {
11431             ret = timerid;
11432         } else {
11433             timer_t htimer = g_posix_timers[timerid];
11434             ret = get_errno(timer_getoverrun(htimer));
11435         }
11436         fd_trans_unregister(ret);
11437         return ret;
11438     }
11439 #endif
11440 
11441 #ifdef TARGET_NR_timer_delete
11442     case TARGET_NR_timer_delete:
11443     {
11444         /* args: timer_t timerid */
11445         target_timer_t timerid = get_timer_id(arg1);
11446 
11447         if (timerid < 0) {
11448             ret = timerid;
11449         } else {
11450             timer_t htimer = g_posix_timers[timerid];
11451             ret = get_errno(timer_delete(htimer));
11452             g_posix_timers[timerid] = 0;
11453         }
11454         return ret;
11455     }
11456 #endif
11457 
11458 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11459     case TARGET_NR_timerfd_create:
11460         return get_errno(timerfd_create(arg1,
11461                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11462 #endif
11463 
11464 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11465     case TARGET_NR_timerfd_gettime:
11466         {
11467             struct itimerspec its_curr;
11468 
11469             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11470 
11471             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11472                 return -TARGET_EFAULT;
11473             }
11474         }
11475         return ret;
11476 #endif
11477 
11478 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11479     case TARGET_NR_timerfd_settime:
11480         {
11481             struct itimerspec its_new, its_old, *p_new;
11482 
11483             if (arg3) {
11484                 if (target_to_host_itimerspec(&its_new, arg3)) {
11485                     return -TARGET_EFAULT;
11486                 }
11487                 p_new = &its_new;
11488             } else {
11489                 p_new = NULL;
11490             }
11491 
11492             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11493 
11494             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11495                 return -TARGET_EFAULT;
11496             }
11497         }
11498         return ret;
11499 #endif
11500 
11501 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11502     case TARGET_NR_ioprio_get:
11503         return get_errno(ioprio_get(arg1, arg2));
11504 #endif
11505 
11506 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11507     case TARGET_NR_ioprio_set:
11508         return get_errno(ioprio_set(arg1, arg2, arg3));
11509 #endif
11510 
11511 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11512     case TARGET_NR_setns:
11513         return get_errno(setns(arg1, arg2));
11514 #endif
11515 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11516     case TARGET_NR_unshare:
11517         return get_errno(unshare(arg1));
11518 #endif
11519 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11520     case TARGET_NR_kcmp:
11521         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11522 #endif
11523 #ifdef TARGET_NR_swapcontext
11524     case TARGET_NR_swapcontext:
11525         /* PowerPC specific.  */
11526         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11527 #endif
11528 
11529     default:
11530         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11531         return -TARGET_ENOSYS;
11532     }
11533     return ret;
11534 }
11535 
11536 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11537                     abi_long arg2, abi_long arg3, abi_long arg4,
11538                     abi_long arg5, abi_long arg6, abi_long arg7,
11539                     abi_long arg8)
11540 {
11541     CPUState *cpu = ENV_GET_CPU(cpu_env);
11542     abi_long ret;
11543 
11544 #ifdef DEBUG_ERESTARTSYS
11545     /* Debug-only code for exercising the syscall-restart code paths
11546      * in the per-architecture cpu main loops: restart every syscall
11547      * the guest makes once before letting it through.
11548      */
11549     {
11550         static bool flag;
11551         flag = !flag;
11552         if (flag) {
11553             return -TARGET_ERESTARTSYS;
11554         }
11555     }
11556 #endif
11557 
11558     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11559                              arg5, arg6, arg7, arg8);
11560 
11561     if (unlikely(do_strace)) {
11562         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11563         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11564                           arg5, arg6, arg7, arg8);
11565         print_syscall_ret(num, ret);
11566     } else {
11567         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11568                           arg5, arg6, arg7, arg8);
11569     }
11570 
11571     trace_guest_user_syscall_ret(cpu, num, ret);
11572     return ret;
11573 }
11574