xref: /openbmc/qemu/linux-user/syscall.c (revision 200dbf37)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
113 
114 #ifndef CLONE_IO
115 #define CLONE_IO                0x80000000      /* Clone io context */
116 #endif
117 
118 /* We can't directly call the host clone syscall, because this will
119  * badly confuse libc (breaking mutexes, for example). So we must
120  * divide clone flags into:
121  *  * flag combinations that look like pthread_create()
122  *  * flag combinations that look like fork()
123  *  * flags we can implement within QEMU itself
124  *  * flags we can't support and will return an error for
125  */
126 /* For thread creation, all these flags must be present; for
127  * fork, none must be present.
128  */
129 #define CLONE_THREAD_FLAGS                              \
130     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
131      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
132 
133 /* These flags are ignored:
134  * CLONE_DETACHED is now ignored by the kernel;
135  * CLONE_IO is just an optimisation hint to the I/O scheduler
136  */
137 #define CLONE_IGNORED_FLAGS                     \
138     (CLONE_DETACHED | CLONE_IO)
139 
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS               \
142     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
143      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
144 
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
149 
150 #define CLONE_INVALID_FORK_FLAGS                                        \
151     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
152 
153 #define CLONE_INVALID_THREAD_FLAGS                                      \
154     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
155        CLONE_IGNORED_FLAGS))
156 
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158  * have almost all been allocated. We cannot support any of
159  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161  * The checks against the invalid thread masks above will catch these.
162  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
163  */
164 
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166  * once. This exercises the codepaths for restart.
167  */
168 //#define DEBUG_ERESTARTSYS
169 
170 //#include <linux/msdos_fs.h>
171 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
172 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
173 
174 #undef _syscall0
175 #undef _syscall1
176 #undef _syscall2
177 #undef _syscall3
178 #undef _syscall4
179 #undef _syscall5
180 #undef _syscall6
181 
182 #define _syscall0(type,name)		\
183 static type name (void)			\
184 {					\
185 	return syscall(__NR_##name);	\
186 }
187 
188 #define _syscall1(type,name,type1,arg1)		\
189 static type name (type1 arg1)			\
190 {						\
191 	return syscall(__NR_##name, arg1);	\
192 }
193 
194 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
195 static type name (type1 arg1,type2 arg2)		\
196 {							\
197 	return syscall(__NR_##name, arg1, arg2);	\
198 }
199 
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
201 static type name (type1 arg1,type2 arg2,type3 arg3)		\
202 {								\
203 	return syscall(__NR_##name, arg1, arg2, arg3);		\
204 }
205 
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
208 {										\
209 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
210 }
211 
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
213 		  type5,arg5)							\
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
215 {										\
216 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
217 }
218 
219 
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
221 		  type5,arg5,type6,arg6)					\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
223                   type6 arg6)							\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
226 }
227 
228 
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
241 
242 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
243 #define __NR__llseek __NR_lseek
244 #endif
245 
246 /* Newer kernel ports have llseek() instead of _llseek() */
247 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
248 #define TARGET_NR__llseek TARGET_NR_llseek
249 #endif
250 
251 #define __NR_sys_gettid __NR_gettid
252 _syscall0(int, sys_gettid)
253 
254 /* For the 64-bit guest on 32-bit host case we must emulate
255  * getdents using getdents64, because otherwise the host
256  * might hand us back more dirent records than we can fit
257  * into the guest buffer after structure format conversion.
258  * Otherwise we emulate getdents with getdents if the host has it.
259  */
260 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
261 #define EMULATE_GETDENTS_WITH_GETDENTS
262 #endif
263 
264 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #endif
267 #if (defined(TARGET_NR_getdents) && \
268       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
269     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
270 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
271 #endif
272 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
273 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
274           loff_t *, res, uint, wh);
275 #endif
276 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
277 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
278           siginfo_t *, uinfo)
279 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
280 #ifdef __NR_exit_group
281 _syscall1(int,exit_group,int,error_code)
282 #endif
283 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
284 _syscall1(int,set_tid_address,int *,tidptr)
285 #endif
286 #if defined(TARGET_NR_futex) && defined(__NR_futex)
287 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
288           const struct timespec *,timeout,int *,uaddr2,int,val3)
289 #endif
290 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
291 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
292           unsigned long *, user_mask_ptr);
293 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
294 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
295           unsigned long *, user_mask_ptr);
296 #define __NR_sys_getcpu __NR_getcpu
297 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
298 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
299           void *, arg);
300 _syscall2(int, capget, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 _syscall2(int, capset, struct __user_cap_header_struct *, header,
303           struct __user_cap_data_struct *, data);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get, int, which, int, who)
306 #endif
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
309 #endif
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
312 #endif
313 
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
316           unsigned long, idx1, unsigned long, idx2)
317 #endif
318 
319 static bitmask_transtbl fcntl_flags_tbl[] = {
320   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
321   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
322   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
323   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
324   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
325   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
326   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
327   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
328   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
329   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
330   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
331   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
332   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
333 #if defined(O_DIRECT)
334   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
335 #endif
336 #if defined(O_NOATIME)
337   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
338 #endif
339 #if defined(O_CLOEXEC)
340   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
341 #endif
342 #if defined(O_PATH)
343   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
344 #endif
345 #if defined(O_TMPFILE)
346   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
347 #endif
348   /* Don't terminate the list prematurely on 64-bit host+guest.  */
349 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
350   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
351 #endif
352   { 0, 0, 0, 0 }
353 };
354 
355 static int sys_getcwd1(char *buf, size_t size)
356 {
357   if (getcwd(buf, size) == NULL) {
358       /* getcwd() sets errno */
359       return (-1);
360   }
361   return strlen(buf)+1;
362 }
363 
364 #ifdef TARGET_NR_utimensat
365 #if defined(__NR_utimensat)
366 #define __NR_sys_utimensat __NR_utimensat
367 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
368           const struct timespec *,tsp,int,flags)
369 #else
370 static int sys_utimensat(int dirfd, const char *pathname,
371                          const struct timespec times[2], int flags)
372 {
373     errno = ENOSYS;
374     return -1;
375 }
376 #endif
377 #endif /* TARGET_NR_utimensat */
378 
379 #ifdef TARGET_NR_renameat2
380 #if defined(__NR_renameat2)
381 #define __NR_sys_renameat2 __NR_renameat2
382 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
383           const char *, new, unsigned int, flags)
384 #else
385 static int sys_renameat2(int oldfd, const char *old,
386                          int newfd, const char *new, int flags)
387 {
388     if (flags == 0) {
389         return renameat(oldfd, old, newfd, new);
390     }
391     errno = ENOSYS;
392     return -1;
393 }
394 #endif
395 #endif /* TARGET_NR_renameat2 */
396 
397 #ifdef CONFIG_INOTIFY
398 #include <sys/inotify.h>
399 
400 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
401 static int sys_inotify_init(void)
402 {
403   return (inotify_init());
404 }
405 #endif
406 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
407 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
408 {
409   return (inotify_add_watch(fd, pathname, mask));
410 }
411 #endif
412 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
413 static int sys_inotify_rm_watch(int fd, int32_t wd)
414 {
415   return (inotify_rm_watch(fd, wd));
416 }
417 #endif
418 #ifdef CONFIG_INOTIFY1
419 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
420 static int sys_inotify_init1(int flags)
421 {
422   return (inotify_init1(flags));
423 }
424 #endif
425 #endif
426 #else
427 /* Userspace can usually survive runtime without inotify */
428 #undef TARGET_NR_inotify_init
429 #undef TARGET_NR_inotify_init1
430 #undef TARGET_NR_inotify_add_watch
431 #undef TARGET_NR_inotify_rm_watch
432 #endif /* CONFIG_INOTIFY  */
433 
434 #if defined(TARGET_NR_prlimit64)
435 #ifndef __NR_prlimit64
436 # define __NR_prlimit64 -1
437 #endif
438 #define __NR_sys_prlimit64 __NR_prlimit64
439 /* The glibc rlimit structure may not be that used by the underlying syscall */
440 struct host_rlimit64 {
441     uint64_t rlim_cur;
442     uint64_t rlim_max;
443 };
444 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
445           const struct host_rlimit64 *, new_limit,
446           struct host_rlimit64 *, old_limit)
447 #endif
448 
449 
450 #if defined(TARGET_NR_timer_create)
451 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
452 static timer_t g_posix_timers[32] = { 0, } ;
453 
454 static inline int next_free_host_timer(void)
455 {
456     int k ;
457     /* FIXME: Does finding the next free slot require a lock? */
458     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
459         if (g_posix_timers[k] == 0) {
460             g_posix_timers[k] = (timer_t) 1;
461             return k;
462         }
463     }
464     return -1;
465 }
466 #endif
467 
468 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
469 #ifdef TARGET_ARM
470 static inline int regpairs_aligned(void *cpu_env, int num)
471 {
472     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
473 }
474 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
475 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
476 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
477 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
478  * of registers which translates to the same as ARM/MIPS, because we start with
479  * r3 as arg1 */
480 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
481 #elif defined(TARGET_SH4)
482 /* SH4 doesn't align register pairs, except for p{read,write}64 */
483 static inline int regpairs_aligned(void *cpu_env, int num)
484 {
485     switch (num) {
486     case TARGET_NR_pread64:
487     case TARGET_NR_pwrite64:
488         return 1;
489 
490     default:
491         return 0;
492     }
493 }
494 #elif defined(TARGET_XTENSA)
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
496 #else
497 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
498 #endif
499 
500 #define ERRNO_TABLE_SIZE 1200
501 
502 /* target_to_host_errno_table[] is initialized from
503  * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
505 };
506 
507 /*
508  * This list is the union of errno values overridden in asm-<arch>/errno.h
509  * minus the errnos that are not actually generic to all archs.
510  */
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512     [EAGAIN]		= TARGET_EAGAIN,
513     [EIDRM]		= TARGET_EIDRM,
514     [ECHRNG]		= TARGET_ECHRNG,
515     [EL2NSYNC]		= TARGET_EL2NSYNC,
516     [EL3HLT]		= TARGET_EL3HLT,
517     [EL3RST]		= TARGET_EL3RST,
518     [ELNRNG]		= TARGET_ELNRNG,
519     [EUNATCH]		= TARGET_EUNATCH,
520     [ENOCSI]		= TARGET_ENOCSI,
521     [EL2HLT]		= TARGET_EL2HLT,
522     [EDEADLK]		= TARGET_EDEADLK,
523     [ENOLCK]		= TARGET_ENOLCK,
524     [EBADE]		= TARGET_EBADE,
525     [EBADR]		= TARGET_EBADR,
526     [EXFULL]		= TARGET_EXFULL,
527     [ENOANO]		= TARGET_ENOANO,
528     [EBADRQC]		= TARGET_EBADRQC,
529     [EBADSLT]		= TARGET_EBADSLT,
530     [EBFONT]		= TARGET_EBFONT,
531     [ENOSTR]		= TARGET_ENOSTR,
532     [ENODATA]		= TARGET_ENODATA,
533     [ETIME]		= TARGET_ETIME,
534     [ENOSR]		= TARGET_ENOSR,
535     [ENONET]		= TARGET_ENONET,
536     [ENOPKG]		= TARGET_ENOPKG,
537     [EREMOTE]		= TARGET_EREMOTE,
538     [ENOLINK]		= TARGET_ENOLINK,
539     [EADV]		= TARGET_EADV,
540     [ESRMNT]		= TARGET_ESRMNT,
541     [ECOMM]		= TARGET_ECOMM,
542     [EPROTO]		= TARGET_EPROTO,
543     [EDOTDOT]		= TARGET_EDOTDOT,
544     [EMULTIHOP]		= TARGET_EMULTIHOP,
545     [EBADMSG]		= TARGET_EBADMSG,
546     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
547     [EOVERFLOW]		= TARGET_EOVERFLOW,
548     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
549     [EBADFD]		= TARGET_EBADFD,
550     [EREMCHG]		= TARGET_EREMCHG,
551     [ELIBACC]		= TARGET_ELIBACC,
552     [ELIBBAD]		= TARGET_ELIBBAD,
553     [ELIBSCN]		= TARGET_ELIBSCN,
554     [ELIBMAX]		= TARGET_ELIBMAX,
555     [ELIBEXEC]		= TARGET_ELIBEXEC,
556     [EILSEQ]		= TARGET_EILSEQ,
557     [ENOSYS]		= TARGET_ENOSYS,
558     [ELOOP]		= TARGET_ELOOP,
559     [ERESTART]		= TARGET_ERESTART,
560     [ESTRPIPE]		= TARGET_ESTRPIPE,
561     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
562     [EUSERS]		= TARGET_EUSERS,
563     [ENOTSOCK]		= TARGET_ENOTSOCK,
564     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
565     [EMSGSIZE]		= TARGET_EMSGSIZE,
566     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
567     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
568     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
569     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
570     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
571     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
572     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
573     [EADDRINUSE]	= TARGET_EADDRINUSE,
574     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
575     [ENETDOWN]		= TARGET_ENETDOWN,
576     [ENETUNREACH]	= TARGET_ENETUNREACH,
577     [ENETRESET]		= TARGET_ENETRESET,
578     [ECONNABORTED]	= TARGET_ECONNABORTED,
579     [ECONNRESET]	= TARGET_ECONNRESET,
580     [ENOBUFS]		= TARGET_ENOBUFS,
581     [EISCONN]		= TARGET_EISCONN,
582     [ENOTCONN]		= TARGET_ENOTCONN,
583     [EUCLEAN]		= TARGET_EUCLEAN,
584     [ENOTNAM]		= TARGET_ENOTNAM,
585     [ENAVAIL]		= TARGET_ENAVAIL,
586     [EISNAM]		= TARGET_EISNAM,
587     [EREMOTEIO]		= TARGET_EREMOTEIO,
588     [EDQUOT]            = TARGET_EDQUOT,
589     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
590     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
591     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
592     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
593     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
594     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
595     [EALREADY]		= TARGET_EALREADY,
596     [EINPROGRESS]	= TARGET_EINPROGRESS,
597     [ESTALE]		= TARGET_ESTALE,
598     [ECANCELED]		= TARGET_ECANCELED,
599     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
600     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
601 #ifdef ENOKEY
602     [ENOKEY]		= TARGET_ENOKEY,
603 #endif
604 #ifdef EKEYEXPIRED
605     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
606 #endif
607 #ifdef EKEYREVOKED
608     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
609 #endif
610 #ifdef EKEYREJECTED
611     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
612 #endif
613 #ifdef EOWNERDEAD
614     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
615 #endif
616 #ifdef ENOTRECOVERABLE
617     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
618 #endif
619 #ifdef ENOMSG
620     [ENOMSG]            = TARGET_ENOMSG,
621 #endif
622 #ifdef ERKFILL
623     [ERFKILL]           = TARGET_ERFKILL,
624 #endif
625 #ifdef EHWPOISON
626     [EHWPOISON]         = TARGET_EHWPOISON,
627 #endif
628 };
629 
630 static inline int host_to_target_errno(int err)
631 {
632     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
633         host_to_target_errno_table[err]) {
634         return host_to_target_errno_table[err];
635     }
636     return err;
637 }
638 
639 static inline int target_to_host_errno(int err)
640 {
641     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
642         target_to_host_errno_table[err]) {
643         return target_to_host_errno_table[err];
644     }
645     return err;
646 }
647 
648 static inline abi_long get_errno(abi_long ret)
649 {
650     if (ret == -1)
651         return -host_to_target_errno(errno);
652     else
653         return ret;
654 }
655 
656 const char *target_strerror(int err)
657 {
658     if (err == TARGET_ERESTARTSYS) {
659         return "To be restarted";
660     }
661     if (err == TARGET_QEMU_ESIGRETURN) {
662         return "Successful exit from sigreturn";
663     }
664 
665     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
666         return NULL;
667     }
668     return strerror(target_to_host_errno(err));
669 }
670 
671 #define safe_syscall0(type, name) \
672 static type safe_##name(void) \
673 { \
674     return safe_syscall(__NR_##name); \
675 }
676 
677 #define safe_syscall1(type, name, type1, arg1) \
678 static type safe_##name(type1 arg1) \
679 { \
680     return safe_syscall(__NR_##name, arg1); \
681 }
682 
683 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
684 static type safe_##name(type1 arg1, type2 arg2) \
685 { \
686     return safe_syscall(__NR_##name, arg1, arg2); \
687 }
688 
689 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
690 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
691 { \
692     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 }
694 
695 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
696     type4, arg4) \
697 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
698 { \
699     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 }
701 
702 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
703     type4, arg4, type5, arg5) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
705     type5 arg5) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 }
709 
710 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5, type6, arg6) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5, type6 arg6) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 }
717 
718 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
719 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
720 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
721               int, flags, mode_t, mode)
722 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
723               struct rusage *, rusage)
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
728               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
729 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
730               struct timespec *, tsp, const sigset_t *, sigmask,
731               size_t, sigsetsize)
732 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
733               int, maxevents, int, timeout, const sigset_t *, sigmask,
734               size_t, sigsetsize)
735 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
736               const struct timespec *,timeout,int *,uaddr2,int,val3)
737 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
738 safe_syscall2(int, kill, pid_t, pid, int, sig)
739 safe_syscall2(int, tkill, int, tid, int, sig)
740 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
741 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
743 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
746               unsigned long, pos_l, unsigned long, pos_h)
747 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
748               socklen_t, addrlen)
749 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
750               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
751 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
752               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
753 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
754 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
755 safe_syscall2(int, flock, int, fd, int, operation)
756 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
757               const struct timespec *, uts, size_t, sigsetsize)
758 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
759               int, flags)
760 safe_syscall2(int, nanosleep, const struct timespec *, req,
761               struct timespec *, rem)
762 #ifdef TARGET_NR_clock_nanosleep
763 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
764               const struct timespec *, req, struct timespec *, rem)
765 #endif
766 #if !defined(__NR_msgsnd) || !defined(__NR_msgrcv) || !defined(__NR_semtimedop)
767 /* This host kernel architecture uses a single ipc syscall; fake up
768  * wrappers for the sub-operations to hide this implementation detail.
769  * Annoyingly we can't include linux/ipc.h to get the constant definitions
770  * for the call parameter because some structs in there conflict with the
771  * sys/ipc.h ones. So we just define them here, and rely on them being
772  * the same for all host architectures.
773  */
774 #define Q_SEMTIMEDOP 4
775 #define Q_MSGSND 11
776 #define Q_MSGRCV 12
777 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
778 
779 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
780               void *, ptr, long, fifth)
781 #endif
782 #ifdef __NR_msgsnd
783 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
784               int, flags)
785 #else
786 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
787 {
788     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
789 }
790 #endif
791 #ifdef __NR_msgrcv
792 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
793               long, msgtype, int, flags)
794 #else
795 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
796 {
797     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
798 }
799 #endif
800 #ifdef __NR_semtimedop
801 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
802               unsigned, nsops, const struct timespec *, timeout)
803 #else
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805                            const struct timespec *timeout)
806 {
807     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808                     (long)timeout);
809 }
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818  * "third argument might be integer or pointer or not present" behaviour of
819  * the libc function.
820  */
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824  *  use the flock64 struct rather than unsuffixed flock
825  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826  */
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
832 
833 static inline int host_to_target_sock_type(int host_type)
834 {
835     int target_type;
836 
837     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838     case SOCK_DGRAM:
839         target_type = TARGET_SOCK_DGRAM;
840         break;
841     case SOCK_STREAM:
842         target_type = TARGET_SOCK_STREAM;
843         break;
844     default:
845         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846         break;
847     }
848 
849 #if defined(SOCK_CLOEXEC)
850     if (host_type & SOCK_CLOEXEC) {
851         target_type |= TARGET_SOCK_CLOEXEC;
852     }
853 #endif
854 
855 #if defined(SOCK_NONBLOCK)
856     if (host_type & SOCK_NONBLOCK) {
857         target_type |= TARGET_SOCK_NONBLOCK;
858     }
859 #endif
860 
861     return target_type;
862 }
863 
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
867 
868 void target_set_brk(abi_ulong new_brk)
869 {
870     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871     brk_page = HOST_PAGE_ALIGN(target_brk);
872 }
873 
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
876 
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
879 {
880     abi_long mapped_addr;
881     abi_ulong new_alloc_size;
882 
883     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 
885     if (!new_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887         return target_brk;
888     }
889     if (new_brk < target_original_brk) {
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891                    target_brk);
892         return target_brk;
893     }
894 
895     /* If the new brk is less than the highest page reserved to the
896      * target heap allocation, set it and we're almost done...  */
897     if (new_brk <= brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  */
900         if (new_brk > target_brk) {
901             memset(g2h(target_brk), 0, new_brk - target_brk);
902         }
903 	target_brk = new_brk;
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905 	return target_brk;
906     }
907 
908     /* We need to allocate more memory after the brk... Note that
909      * we don't use MAP_FIXED because that will map over the top of
910      * any existing mapping (like the one with the host libc or qemu
911      * itself); instead we treat "mapped but at wrong address" as
912      * a failure and unmap again.
913      */
914     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916                                         PROT_READ|PROT_WRITE,
917                                         MAP_ANON|MAP_PRIVATE, 0, 0));
918 
919     if (mapped_addr == brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  Technically the new pages are already
922          * initialized to zero since they *are* anonymous mapped
923          * pages, however we have to take care with the contents that
924          * come from the remaining part of the previous page: it may
925          * contains garbage data due to a previous heap usage (grown
926          * then shrunken).  */
927         memset(g2h(target_brk), 0, brk_page - target_brk);
928 
929         target_brk = new_brk;
930         brk_page = HOST_PAGE_ALIGN(target_brk);
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932             target_brk);
933         return target_brk;
934     } else if (mapped_addr != -1) {
935         /* Mapped but at wrong address, meaning there wasn't actually
936          * enough space for this brk.
937          */
938         target_munmap(mapped_addr, new_alloc_size);
939         mapped_addr = -1;
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941     }
942     else {
943         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
944     }
945 
946 #if defined(TARGET_ALPHA)
947     /* We (partially) emulate OSF/1 on Alpha, which requires we
948        return a proper errno, not an unchanged brk value.  */
949     return -TARGET_ENOMEM;
950 #endif
951     /* For everything else, return the previous break. */
952     return target_brk;
953 }
954 
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956                                             abi_ulong target_fds_addr,
957                                             int n)
958 {
959     int i, nw, j, k;
960     abi_ulong b, *target_fds;
961 
962     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963     if (!(target_fds = lock_user(VERIFY_READ,
964                                  target_fds_addr,
965                                  sizeof(abi_ulong) * nw,
966                                  1)))
967         return -TARGET_EFAULT;
968 
969     FD_ZERO(fds);
970     k = 0;
971     for (i = 0; i < nw; i++) {
972         /* grab the abi_ulong */
973         __get_user(b, &target_fds[i]);
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             /* check the bit inside the abi_ulong */
976             if ((b >> j) & 1)
977                 FD_SET(k, fds);
978             k++;
979         }
980     }
981 
982     unlock_user(target_fds, target_fds_addr, 0);
983 
984     return 0;
985 }
986 
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988                                                  abi_ulong target_fds_addr,
989                                                  int n)
990 {
991     if (target_fds_addr) {
992         if (copy_from_user_fdset(fds, target_fds_addr, n))
993             return -TARGET_EFAULT;
994         *fds_ptr = fds;
995     } else {
996         *fds_ptr = NULL;
997     }
998     return 0;
999 }
1000 
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002                                           const fd_set *fds,
1003                                           int n)
1004 {
1005     int i, nw, j, k;
1006     abi_long v;
1007     abi_ulong *target_fds;
1008 
1009     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010     if (!(target_fds = lock_user(VERIFY_WRITE,
1011                                  target_fds_addr,
1012                                  sizeof(abi_ulong) * nw,
1013                                  0)))
1014         return -TARGET_EFAULT;
1015 
1016     k = 0;
1017     for (i = 0; i < nw; i++) {
1018         v = 0;
1019         for (j = 0; j < TARGET_ABI_BITS; j++) {
1020             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021             k++;
1022         }
1023         __put_user(v, &target_fds[i]);
1024     }
1025 
1026     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1027 
1028     return 0;
1029 }
1030 
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1036 
1037 static inline abi_long host_to_target_clock_t(long ticks)
1038 {
1039 #if HOST_HZ == TARGET_HZ
1040     return ticks;
1041 #else
1042     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1044 }
1045 
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047                                              const struct rusage *rusage)
1048 {
1049     struct target_rusage *target_rusage;
1050 
1051     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052         return -TARGET_EFAULT;
1053     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071     unlock_user_struct(target_rusage, target_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 {
1094     abi_ulong target_rlim_swap;
1095     abi_ulong result;
1096 
1097     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098         target_rlim_swap = TARGET_RLIM_INFINITY;
1099     else
1100         target_rlim_swap = rlim;
1101     result = tswapal(target_rlim_swap);
1102 
1103     return result;
1104 }
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150         return -TARGET_EFAULT;
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161                                             const struct timeval *tv)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166         return -TARGET_EFAULT;
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177                                                abi_ulong target_tz_addr)
1178 {
1179     struct target_timezone *target_tz;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184 
1185     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1187 
1188     unlock_user_struct(target_tz, target_tz_addr, 0);
1189 
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1195 
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197                                               abi_ulong target_mq_attr_addr)
1198 {
1199     struct target_mq_attr *target_mq_attr;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202                           target_mq_attr_addr, 1))
1203         return -TARGET_EFAULT;
1204 
1205     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1209 
1210     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1211 
1212     return 0;
1213 }
1214 
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216                                             const struct mq_attr *attr)
1217 {
1218     struct target_mq_attr *target_mq_attr;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221                           target_mq_attr_addr, 0))
1222         return -TARGET_EFAULT;
1223 
1224     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1228 
1229     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1239                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1240 {
1241     fd_set rfds, wfds, efds;
1242     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243     struct timeval tv;
1244     struct timespec ts, *ts_ptr;
1245     abi_long ret;
1246 
1247     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256     if (ret) {
1257         return ret;
1258     }
1259 
1260     if (target_tv_addr) {
1261         if (copy_from_user_timeval(&tv, target_tv_addr))
1262             return -TARGET_EFAULT;
1263         ts.tv_sec = tv.tv_sec;
1264         ts.tv_nsec = tv.tv_usec * 1000;
1265         ts_ptr = &ts;
1266     } else {
1267         ts_ptr = NULL;
1268     }
1269 
1270     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271                                   ts_ptr, NULL));
1272 
1273     if (!is_error(ret)) {
1274         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275             return -TARGET_EFAULT;
1276         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277             return -TARGET_EFAULT;
1278         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279             return -TARGET_EFAULT;
1280 
1281         if (target_tv_addr) {
1282             tv.tv_sec = ts.tv_sec;
1283             tv.tv_usec = ts.tv_nsec / 1000;
1284             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285                 return -TARGET_EFAULT;
1286             }
1287         }
1288     }
1289 
1290     return ret;
1291 }
1292 
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1295 {
1296     struct target_sel_arg_struct *sel;
1297     abi_ulong inp, outp, exp, tvp;
1298     long nsel;
1299 
1300     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     nsel = tswapal(sel->n);
1305     inp = tswapal(sel->inp);
1306     outp = tswapal(sel->outp);
1307     exp = tswapal(sel->exp);
1308     tvp = tswapal(sel->tvp);
1309 
1310     unlock_user_struct(sel, arg1, 0);
1311 
1312     return do_select(nsel, inp, outp, exp, tvp);
1313 }
1314 #endif
1315 #endif
1316 
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1318 {
1319 #ifdef CONFIG_PIPE2
1320     return pipe2(host_pipe, flags);
1321 #else
1322     return -ENOSYS;
1323 #endif
1324 }
1325 
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327                         int flags, int is_pipe2)
1328 {
1329     int host_pipe[2];
1330     abi_long ret;
1331     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1332 
1333     if (is_error(ret))
1334         return get_errno(ret);
1335 
1336     /* Several targets have special calling conventions for the original
1337        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1338     if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341         return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344         return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347         return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350         return host_pipe[0];
1351 #endif
1352     }
1353 
1354     if (put_user_s32(host_pipe[0], pipedes)
1355         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356         return -TARGET_EFAULT;
1357     return get_errno(ret);
1358 }
1359 
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361                                               abi_ulong target_addr,
1362                                               socklen_t len)
1363 {
1364     struct target_ip_mreqn *target_smreqn;
1365 
1366     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367     if (!target_smreqn)
1368         return -TARGET_EFAULT;
1369     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371     if (len == sizeof(struct target_ip_mreqn))
1372         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373     unlock_user(target_smreqn, target_addr, 0);
1374 
1375     return 0;
1376 }
1377 
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379                                                abi_ulong target_addr,
1380                                                socklen_t len)
1381 {
1382     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383     sa_family_t sa_family;
1384     struct target_sockaddr *target_saddr;
1385 
1386     if (fd_trans_target_to_host_addr(fd)) {
1387         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1388     }
1389 
1390     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391     if (!target_saddr)
1392         return -TARGET_EFAULT;
1393 
1394     sa_family = tswap16(target_saddr->sa_family);
1395 
1396     /* Oops. The caller might send a incomplete sun_path; sun_path
1397      * must be terminated by \0 (see the manual page), but
1398      * unfortunately it is quite common to specify sockaddr_un
1399      * length as "strlen(x->sun_path)" while it should be
1400      * "strlen(...) + 1". We'll fix that here if needed.
1401      * Linux kernel has a similar feature.
1402      */
1403 
1404     if (sa_family == AF_UNIX) {
1405         if (len < unix_maxlen && len > 0) {
1406             char *cp = (char*)target_saddr;
1407 
1408             if ( cp[len-1] && !cp[len] )
1409                 len++;
1410         }
1411         if (len > unix_maxlen)
1412             len = unix_maxlen;
1413     }
1414 
1415     memcpy(addr, target_saddr, len);
1416     addr->sa_family = sa_family;
1417     if (sa_family == AF_NETLINK) {
1418         struct sockaddr_nl *nladdr;
1419 
1420         nladdr = (struct sockaddr_nl *)addr;
1421         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423     } else if (sa_family == AF_PACKET) {
1424 	struct target_sockaddr_ll *lladdr;
1425 
1426 	lladdr = (struct target_sockaddr_ll *)addr;
1427 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1429     }
1430     unlock_user(target_saddr, target_addr, 0);
1431 
1432     return 0;
1433 }
1434 
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436                                                struct sockaddr *addr,
1437                                                socklen_t len)
1438 {
1439     struct target_sockaddr *target_saddr;
1440 
1441     if (len == 0) {
1442         return 0;
1443     }
1444     assert(addr);
1445 
1446     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447     if (!target_saddr)
1448         return -TARGET_EFAULT;
1449     memcpy(target_saddr, addr, len);
1450     if (len >= offsetof(struct target_sockaddr, sa_family) +
1451         sizeof(target_saddr->sa_family)) {
1452         target_saddr->sa_family = tswap16(addr->sa_family);
1453     }
1454     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458     } else if (addr->sa_family == AF_PACKET) {
1459         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462     } else if (addr->sa_family == AF_INET6 &&
1463                len >= sizeof(struct target_sockaddr_in6)) {
1464         struct target_sockaddr_in6 *target_in6 =
1465                (struct target_sockaddr_in6 *)target_saddr;
1466         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1467     }
1468     unlock_user(target_saddr, target_addr, len);
1469 
1470     return 0;
1471 }
1472 
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474                                            struct target_msghdr *target_msgh)
1475 {
1476     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477     abi_long msg_controllen;
1478     abi_ulong target_cmsg_addr;
1479     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480     socklen_t space = 0;
1481 
1482     msg_controllen = tswapal(target_msgh->msg_controllen);
1483     if (msg_controllen < sizeof (struct target_cmsghdr))
1484         goto the_end;
1485     target_cmsg_addr = tswapal(target_msgh->msg_control);
1486     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487     target_cmsg_start = target_cmsg;
1488     if (!target_cmsg)
1489         return -TARGET_EFAULT;
1490 
1491     while (cmsg && target_cmsg) {
1492         void *data = CMSG_DATA(cmsg);
1493         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1494 
1495         int len = tswapal(target_cmsg->cmsg_len)
1496             - sizeof(struct target_cmsghdr);
1497 
1498         space += CMSG_SPACE(len);
1499         if (space > msgh->msg_controllen) {
1500             space -= CMSG_SPACE(len);
1501             /* This is a QEMU bug, since we allocated the payload
1502              * area ourselves (unlike overflow in host-to-target
1503              * conversion, which is just the guest giving us a buffer
1504              * that's too small). It can't happen for the payload types
1505              * we currently support; if it becomes an issue in future
1506              * we would need to improve our allocation strategy to
1507              * something more intelligent than "twice the size of the
1508              * target buffer we're reading from".
1509              */
1510             gemu_log("Host cmsg overflow\n");
1511             break;
1512         }
1513 
1514         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515             cmsg->cmsg_level = SOL_SOCKET;
1516         } else {
1517             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1518         }
1519         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520         cmsg->cmsg_len = CMSG_LEN(len);
1521 
1522         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523             int *fd = (int *)data;
1524             int *target_fd = (int *)target_data;
1525             int i, numfds = len / sizeof(int);
1526 
1527             for (i = 0; i < numfds; i++) {
1528                 __get_user(fd[i], target_fd + i);
1529             }
1530         } else if (cmsg->cmsg_level == SOL_SOCKET
1531                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1532             struct ucred *cred = (struct ucred *)data;
1533             struct target_ucred *target_cred =
1534                 (struct target_ucred *)target_data;
1535 
1536             __get_user(cred->pid, &target_cred->pid);
1537             __get_user(cred->uid, &target_cred->uid);
1538             __get_user(cred->gid, &target_cred->gid);
1539         } else {
1540             gemu_log("Unsupported ancillary data: %d/%d\n",
1541                                         cmsg->cmsg_level, cmsg->cmsg_type);
1542             memcpy(data, target_data, len);
1543         }
1544 
1545         cmsg = CMSG_NXTHDR(msgh, cmsg);
1546         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547                                          target_cmsg_start);
1548     }
1549     unlock_user(target_cmsg, target_cmsg_addr, 0);
1550  the_end:
1551     msgh->msg_controllen = space;
1552     return 0;
1553 }
1554 
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556                                            struct msghdr *msgh)
1557 {
1558     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559     abi_long msg_controllen;
1560     abi_ulong target_cmsg_addr;
1561     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562     socklen_t space = 0;
1563 
1564     msg_controllen = tswapal(target_msgh->msg_controllen);
1565     if (msg_controllen < sizeof (struct target_cmsghdr))
1566         goto the_end;
1567     target_cmsg_addr = tswapal(target_msgh->msg_control);
1568     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569     target_cmsg_start = target_cmsg;
1570     if (!target_cmsg)
1571         return -TARGET_EFAULT;
1572 
1573     while (cmsg && target_cmsg) {
1574         void *data = CMSG_DATA(cmsg);
1575         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1576 
1577         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578         int tgt_len, tgt_space;
1579 
1580         /* We never copy a half-header but may copy half-data;
1581          * this is Linux's behaviour in put_cmsg(). Note that
1582          * truncation here is a guest problem (which we report
1583          * to the guest via the CTRUNC bit), unlike truncation
1584          * in target_to_host_cmsg, which is a QEMU bug.
1585          */
1586         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             break;
1589         }
1590 
1591         if (cmsg->cmsg_level == SOL_SOCKET) {
1592             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593         } else {
1594             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1595         }
1596         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1597 
1598         /* Payload types which need a different size of payload on
1599          * the target must adjust tgt_len here.
1600          */
1601         tgt_len = len;
1602         switch (cmsg->cmsg_level) {
1603         case SOL_SOCKET:
1604             switch (cmsg->cmsg_type) {
1605             case SO_TIMESTAMP:
1606                 tgt_len = sizeof(struct target_timeval);
1607                 break;
1608             default:
1609                 break;
1610             }
1611             break;
1612         default:
1613             break;
1614         }
1615 
1616         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1619         }
1620 
1621         /* We must now copy-and-convert len bytes of payload
1622          * into tgt_len bytes of destination space. Bear in mind
1623          * that in both source and destination we may be dealing
1624          * with a truncated value!
1625          */
1626         switch (cmsg->cmsg_level) {
1627         case SOL_SOCKET:
1628             switch (cmsg->cmsg_type) {
1629             case SCM_RIGHTS:
1630             {
1631                 int *fd = (int *)data;
1632                 int *target_fd = (int *)target_data;
1633                 int i, numfds = tgt_len / sizeof(int);
1634 
1635                 for (i = 0; i < numfds; i++) {
1636                     __put_user(fd[i], target_fd + i);
1637                 }
1638                 break;
1639             }
1640             case SO_TIMESTAMP:
1641             {
1642                 struct timeval *tv = (struct timeval *)data;
1643                 struct target_timeval *target_tv =
1644                     (struct target_timeval *)target_data;
1645 
1646                 if (len != sizeof(struct timeval) ||
1647                     tgt_len != sizeof(struct target_timeval)) {
1648                     goto unimplemented;
1649                 }
1650 
1651                 /* copy struct timeval to target */
1652                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654                 break;
1655             }
1656             case SCM_CREDENTIALS:
1657             {
1658                 struct ucred *cred = (struct ucred *)data;
1659                 struct target_ucred *target_cred =
1660                     (struct target_ucred *)target_data;
1661 
1662                 __put_user(cred->pid, &target_cred->pid);
1663                 __put_user(cred->uid, &target_cred->uid);
1664                 __put_user(cred->gid, &target_cred->gid);
1665                 break;
1666             }
1667             default:
1668                 goto unimplemented;
1669             }
1670             break;
1671 
1672         case SOL_IP:
1673             switch (cmsg->cmsg_type) {
1674             case IP_TTL:
1675             {
1676                 uint32_t *v = (uint32_t *)data;
1677                 uint32_t *t_int = (uint32_t *)target_data;
1678 
1679                 if (len != sizeof(uint32_t) ||
1680                     tgt_len != sizeof(uint32_t)) {
1681                     goto unimplemented;
1682                 }
1683                 __put_user(*v, t_int);
1684                 break;
1685             }
1686             case IP_RECVERR:
1687             {
1688                 struct errhdr_t {
1689                    struct sock_extended_err ee;
1690                    struct sockaddr_in offender;
1691                 };
1692                 struct errhdr_t *errh = (struct errhdr_t *)data;
1693                 struct errhdr_t *target_errh =
1694                     (struct errhdr_t *)target_data;
1695 
1696                 if (len != sizeof(struct errhdr_t) ||
1697                     tgt_len != sizeof(struct errhdr_t)) {
1698                     goto unimplemented;
1699                 }
1700                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1703                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708                     (void *) &errh->offender, sizeof(errh->offender));
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IPV6:
1717             switch (cmsg->cmsg_type) {
1718             case IPV6_HOPLIMIT:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IPV6_RECVERR:
1731             {
1732                 struct errhdr6_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in6 offender;
1735                 };
1736                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737                 struct errhdr6_t *target_errh =
1738                     (struct errhdr6_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr6_t) ||
1741                     tgt_len != sizeof(struct errhdr6_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         default:
1761         unimplemented:
1762             gemu_log("Unsupported ancillary data: %d/%d\n",
1763                                         cmsg->cmsg_level, cmsg->cmsg_type);
1764             memcpy(target_data, data, MIN(len, tgt_len));
1765             if (tgt_len > len) {
1766                 memset(target_data + len, 0, tgt_len - len);
1767             }
1768         }
1769 
1770         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772         if (msg_controllen < tgt_space) {
1773             tgt_space = msg_controllen;
1774         }
1775         msg_controllen -= tgt_space;
1776         space += tgt_space;
1777         cmsg = CMSG_NXTHDR(msgh, cmsg);
1778         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779                                          target_cmsg_start);
1780     }
1781     unlock_user(target_cmsg, target_cmsg_addr, space);
1782  the_end:
1783     target_msgh->msg_controllen = tswapal(space);
1784     return 0;
1785 }
1786 
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789                               abi_ulong optval_addr, socklen_t optlen)
1790 {
1791     abi_long ret;
1792     int val;
1793     struct ip_mreqn *ip_mreq;
1794     struct ip_mreq_source *ip_mreq_source;
1795 
1796     switch(level) {
1797     case SOL_TCP:
1798         /* TCP options all take an 'int' value.  */
1799         if (optlen < sizeof(uint32_t))
1800             return -TARGET_EINVAL;
1801 
1802         if (get_user_u32(val, optval_addr))
1803             return -TARGET_EFAULT;
1804         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805         break;
1806     case SOL_IP:
1807         switch(optname) {
1808         case IP_TOS:
1809         case IP_TTL:
1810         case IP_HDRINCL:
1811         case IP_ROUTER_ALERT:
1812         case IP_RECVOPTS:
1813         case IP_RETOPTS:
1814         case IP_PKTINFO:
1815         case IP_MTU_DISCOVER:
1816         case IP_RECVERR:
1817         case IP_RECVTTL:
1818         case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820         case IP_FREEBIND:
1821 #endif
1822         case IP_MULTICAST_TTL:
1823         case IP_MULTICAST_LOOP:
1824             val = 0;
1825             if (optlen >= sizeof(uint32_t)) {
1826                 if (get_user_u32(val, optval_addr))
1827                     return -TARGET_EFAULT;
1828             } else if (optlen >= 1) {
1829                 if (get_user_u8(val, optval_addr))
1830                     return -TARGET_EFAULT;
1831             }
1832             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833             break;
1834         case IP_ADD_MEMBERSHIP:
1835         case IP_DROP_MEMBERSHIP:
1836             if (optlen < sizeof (struct target_ip_mreq) ||
1837                 optlen > sizeof (struct target_ip_mreqn))
1838                 return -TARGET_EINVAL;
1839 
1840             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843             break;
1844 
1845         case IP_BLOCK_SOURCE:
1846         case IP_UNBLOCK_SOURCE:
1847         case IP_ADD_SOURCE_MEMBERSHIP:
1848         case IP_DROP_SOURCE_MEMBERSHIP:
1849             if (optlen != sizeof (struct target_ip_mreq_source))
1850                 return -TARGET_EINVAL;
1851 
1852             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854             unlock_user (ip_mreq_source, optval_addr, 0);
1855             break;
1856 
1857         default:
1858             goto unimplemented;
1859         }
1860         break;
1861     case SOL_IPV6:
1862         switch (optname) {
1863         case IPV6_MTU_DISCOVER:
1864         case IPV6_MTU:
1865         case IPV6_V6ONLY:
1866         case IPV6_RECVPKTINFO:
1867         case IPV6_UNICAST_HOPS:
1868         case IPV6_MULTICAST_HOPS:
1869         case IPV6_MULTICAST_LOOP:
1870         case IPV6_RECVERR:
1871         case IPV6_RECVHOPLIMIT:
1872         case IPV6_2292HOPLIMIT:
1873         case IPV6_CHECKSUM:
1874         case IPV6_ADDRFORM:
1875         case IPV6_2292PKTINFO:
1876         case IPV6_RECVTCLASS:
1877         case IPV6_RECVRTHDR:
1878         case IPV6_2292RTHDR:
1879         case IPV6_RECVHOPOPTS:
1880         case IPV6_2292HOPOPTS:
1881         case IPV6_RECVDSTOPTS:
1882         case IPV6_2292DSTOPTS:
1883         case IPV6_TCLASS:
1884 #ifdef IPV6_RECVPATHMTU
1885         case IPV6_RECVPATHMTU:
1886 #endif
1887 #ifdef IPV6_TRANSPARENT
1888         case IPV6_TRANSPARENT:
1889 #endif
1890 #ifdef IPV6_FREEBIND
1891         case IPV6_FREEBIND:
1892 #endif
1893 #ifdef IPV6_RECVORIGDSTADDR
1894         case IPV6_RECVORIGDSTADDR:
1895 #endif
1896             val = 0;
1897             if (optlen < sizeof(uint32_t)) {
1898                 return -TARGET_EINVAL;
1899             }
1900             if (get_user_u32(val, optval_addr)) {
1901                 return -TARGET_EFAULT;
1902             }
1903             ret = get_errno(setsockopt(sockfd, level, optname,
1904                                        &val, sizeof(val)));
1905             break;
1906         case IPV6_PKTINFO:
1907         {
1908             struct in6_pktinfo pki;
1909 
1910             if (optlen < sizeof(pki)) {
1911                 return -TARGET_EINVAL;
1912             }
1913 
1914             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1915                 return -TARGET_EFAULT;
1916             }
1917 
1918             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1919 
1920             ret = get_errno(setsockopt(sockfd, level, optname,
1921                                        &pki, sizeof(pki)));
1922             break;
1923         }
1924         default:
1925             goto unimplemented;
1926         }
1927         break;
1928     case SOL_ICMPV6:
1929         switch (optname) {
1930         case ICMPV6_FILTER:
1931         {
1932             struct icmp6_filter icmp6f;
1933 
1934             if (optlen > sizeof(icmp6f)) {
1935                 optlen = sizeof(icmp6f);
1936             }
1937 
1938             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1939                 return -TARGET_EFAULT;
1940             }
1941 
1942             for (val = 0; val < 8; val++) {
1943                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1944             }
1945 
1946             ret = get_errno(setsockopt(sockfd, level, optname,
1947                                        &icmp6f, optlen));
1948             break;
1949         }
1950         default:
1951             goto unimplemented;
1952         }
1953         break;
1954     case SOL_RAW:
1955         switch (optname) {
1956         case ICMP_FILTER:
1957         case IPV6_CHECKSUM:
1958             /* those take an u32 value */
1959             if (optlen < sizeof(uint32_t)) {
1960                 return -TARGET_EINVAL;
1961             }
1962 
1963             if (get_user_u32(val, optval_addr)) {
1964                 return -TARGET_EFAULT;
1965             }
1966             ret = get_errno(setsockopt(sockfd, level, optname,
1967                                        &val, sizeof(val)));
1968             break;
1969 
1970         default:
1971             goto unimplemented;
1972         }
1973         break;
1974     case TARGET_SOL_SOCKET:
1975         switch (optname) {
1976         case TARGET_SO_RCVTIMEO:
1977         {
1978                 struct timeval tv;
1979 
1980                 optname = SO_RCVTIMEO;
1981 
1982 set_timeout:
1983                 if (optlen != sizeof(struct target_timeval)) {
1984                     return -TARGET_EINVAL;
1985                 }
1986 
1987                 if (copy_from_user_timeval(&tv, optval_addr)) {
1988                     return -TARGET_EFAULT;
1989                 }
1990 
1991                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1992                                 &tv, sizeof(tv)));
1993                 return ret;
1994         }
1995         case TARGET_SO_SNDTIMEO:
1996                 optname = SO_SNDTIMEO;
1997                 goto set_timeout;
1998         case TARGET_SO_ATTACH_FILTER:
1999         {
2000                 struct target_sock_fprog *tfprog;
2001                 struct target_sock_filter *tfilter;
2002                 struct sock_fprog fprog;
2003                 struct sock_filter *filter;
2004                 int i;
2005 
2006                 if (optlen != sizeof(*tfprog)) {
2007                     return -TARGET_EINVAL;
2008                 }
2009                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2010                     return -TARGET_EFAULT;
2011                 }
2012                 if (!lock_user_struct(VERIFY_READ, tfilter,
2013                                       tswapal(tfprog->filter), 0)) {
2014                     unlock_user_struct(tfprog, optval_addr, 1);
2015                     return -TARGET_EFAULT;
2016                 }
2017 
2018                 fprog.len = tswap16(tfprog->len);
2019                 filter = g_try_new(struct sock_filter, fprog.len);
2020                 if (filter == NULL) {
2021                     unlock_user_struct(tfilter, tfprog->filter, 1);
2022                     unlock_user_struct(tfprog, optval_addr, 1);
2023                     return -TARGET_ENOMEM;
2024                 }
2025                 for (i = 0; i < fprog.len; i++) {
2026                     filter[i].code = tswap16(tfilter[i].code);
2027                     filter[i].jt = tfilter[i].jt;
2028                     filter[i].jf = tfilter[i].jf;
2029                     filter[i].k = tswap32(tfilter[i].k);
2030                 }
2031                 fprog.filter = filter;
2032 
2033                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2034                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2035                 g_free(filter);
2036 
2037                 unlock_user_struct(tfilter, tfprog->filter, 1);
2038                 unlock_user_struct(tfprog, optval_addr, 1);
2039                 return ret;
2040         }
2041 	case TARGET_SO_BINDTODEVICE:
2042 	{
2043 		char *dev_ifname, *addr_ifname;
2044 
2045 		if (optlen > IFNAMSIZ - 1) {
2046 		    optlen = IFNAMSIZ - 1;
2047 		}
2048 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2049 		if (!dev_ifname) {
2050 		    return -TARGET_EFAULT;
2051 		}
2052 		optname = SO_BINDTODEVICE;
2053 		addr_ifname = alloca(IFNAMSIZ);
2054 		memcpy(addr_ifname, dev_ifname, optlen);
2055 		addr_ifname[optlen] = 0;
2056 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2057                                            addr_ifname, optlen));
2058 		unlock_user (dev_ifname, optval_addr, 0);
2059 		return ret;
2060 	}
2061         case TARGET_SO_LINGER:
2062         {
2063                 struct linger lg;
2064                 struct target_linger *tlg;
2065 
2066                 if (optlen != sizeof(struct target_linger)) {
2067                     return -TARGET_EINVAL;
2068                 }
2069                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2070                     return -TARGET_EFAULT;
2071                 }
2072                 __get_user(lg.l_onoff, &tlg->l_onoff);
2073                 __get_user(lg.l_linger, &tlg->l_linger);
2074                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2075                                 &lg, sizeof(lg)));
2076                 unlock_user_struct(tlg, optval_addr, 0);
2077                 return ret;
2078         }
2079             /* Options with 'int' argument.  */
2080         case TARGET_SO_DEBUG:
2081 		optname = SO_DEBUG;
2082 		break;
2083         case TARGET_SO_REUSEADDR:
2084 		optname = SO_REUSEADDR;
2085 		break;
2086 #ifdef SO_REUSEPORT
2087         case TARGET_SO_REUSEPORT:
2088                 optname = SO_REUSEPORT;
2089                 break;
2090 #endif
2091         case TARGET_SO_TYPE:
2092 		optname = SO_TYPE;
2093 		break;
2094         case TARGET_SO_ERROR:
2095 		optname = SO_ERROR;
2096 		break;
2097         case TARGET_SO_DONTROUTE:
2098 		optname = SO_DONTROUTE;
2099 		break;
2100         case TARGET_SO_BROADCAST:
2101 		optname = SO_BROADCAST;
2102 		break;
2103         case TARGET_SO_SNDBUF:
2104 		optname = SO_SNDBUF;
2105 		break;
2106         case TARGET_SO_SNDBUFFORCE:
2107                 optname = SO_SNDBUFFORCE;
2108                 break;
2109         case TARGET_SO_RCVBUF:
2110 		optname = SO_RCVBUF;
2111 		break;
2112         case TARGET_SO_RCVBUFFORCE:
2113                 optname = SO_RCVBUFFORCE;
2114                 break;
2115         case TARGET_SO_KEEPALIVE:
2116 		optname = SO_KEEPALIVE;
2117 		break;
2118         case TARGET_SO_OOBINLINE:
2119 		optname = SO_OOBINLINE;
2120 		break;
2121         case TARGET_SO_NO_CHECK:
2122 		optname = SO_NO_CHECK;
2123 		break;
2124         case TARGET_SO_PRIORITY:
2125 		optname = SO_PRIORITY;
2126 		break;
2127 #ifdef SO_BSDCOMPAT
2128         case TARGET_SO_BSDCOMPAT:
2129 		optname = SO_BSDCOMPAT;
2130 		break;
2131 #endif
2132         case TARGET_SO_PASSCRED:
2133 		optname = SO_PASSCRED;
2134 		break;
2135         case TARGET_SO_PASSSEC:
2136                 optname = SO_PASSSEC;
2137                 break;
2138         case TARGET_SO_TIMESTAMP:
2139 		optname = SO_TIMESTAMP;
2140 		break;
2141         case TARGET_SO_RCVLOWAT:
2142 		optname = SO_RCVLOWAT;
2143 		break;
2144         default:
2145             goto unimplemented;
2146         }
2147 	if (optlen < sizeof(uint32_t))
2148             return -TARGET_EINVAL;
2149 
2150 	if (get_user_u32(val, optval_addr))
2151             return -TARGET_EFAULT;
2152 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2153         break;
2154     default:
2155     unimplemented:
2156         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2157         ret = -TARGET_ENOPROTOOPT;
2158     }
2159     return ret;
2160 }
2161 
2162 /* do_getsockopt() Must return target values and target errnos. */
2163 static abi_long do_getsockopt(int sockfd, int level, int optname,
2164                               abi_ulong optval_addr, abi_ulong optlen)
2165 {
2166     abi_long ret;
2167     int len, val;
2168     socklen_t lv;
2169 
2170     switch(level) {
2171     case TARGET_SOL_SOCKET:
2172         level = SOL_SOCKET;
2173         switch (optname) {
2174         /* These don't just return a single integer */
2175         case TARGET_SO_RCVTIMEO:
2176         case TARGET_SO_SNDTIMEO:
2177         case TARGET_SO_PEERNAME:
2178             goto unimplemented;
2179         case TARGET_SO_PEERCRED: {
2180             struct ucred cr;
2181             socklen_t crlen;
2182             struct target_ucred *tcr;
2183 
2184             if (get_user_u32(len, optlen)) {
2185                 return -TARGET_EFAULT;
2186             }
2187             if (len < 0) {
2188                 return -TARGET_EINVAL;
2189             }
2190 
2191             crlen = sizeof(cr);
2192             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2193                                        &cr, &crlen));
2194             if (ret < 0) {
2195                 return ret;
2196             }
2197             if (len > crlen) {
2198                 len = crlen;
2199             }
2200             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2201                 return -TARGET_EFAULT;
2202             }
2203             __put_user(cr.pid, &tcr->pid);
2204             __put_user(cr.uid, &tcr->uid);
2205             __put_user(cr.gid, &tcr->gid);
2206             unlock_user_struct(tcr, optval_addr, 1);
2207             if (put_user_u32(len, optlen)) {
2208                 return -TARGET_EFAULT;
2209             }
2210             break;
2211         }
2212         case TARGET_SO_LINGER:
2213         {
2214             struct linger lg;
2215             socklen_t lglen;
2216             struct target_linger *tlg;
2217 
2218             if (get_user_u32(len, optlen)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             if (len < 0) {
2222                 return -TARGET_EINVAL;
2223             }
2224 
2225             lglen = sizeof(lg);
2226             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2227                                        &lg, &lglen));
2228             if (ret < 0) {
2229                 return ret;
2230             }
2231             if (len > lglen) {
2232                 len = lglen;
2233             }
2234             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2235                 return -TARGET_EFAULT;
2236             }
2237             __put_user(lg.l_onoff, &tlg->l_onoff);
2238             __put_user(lg.l_linger, &tlg->l_linger);
2239             unlock_user_struct(tlg, optval_addr, 1);
2240             if (put_user_u32(len, optlen)) {
2241                 return -TARGET_EFAULT;
2242             }
2243             break;
2244         }
2245         /* Options with 'int' argument.  */
2246         case TARGET_SO_DEBUG:
2247             optname = SO_DEBUG;
2248             goto int_case;
2249         case TARGET_SO_REUSEADDR:
2250             optname = SO_REUSEADDR;
2251             goto int_case;
2252 #ifdef SO_REUSEPORT
2253         case TARGET_SO_REUSEPORT:
2254             optname = SO_REUSEPORT;
2255             goto int_case;
2256 #endif
2257         case TARGET_SO_TYPE:
2258             optname = SO_TYPE;
2259             goto int_case;
2260         case TARGET_SO_ERROR:
2261             optname = SO_ERROR;
2262             goto int_case;
2263         case TARGET_SO_DONTROUTE:
2264             optname = SO_DONTROUTE;
2265             goto int_case;
2266         case TARGET_SO_BROADCAST:
2267             optname = SO_BROADCAST;
2268             goto int_case;
2269         case TARGET_SO_SNDBUF:
2270             optname = SO_SNDBUF;
2271             goto int_case;
2272         case TARGET_SO_RCVBUF:
2273             optname = SO_RCVBUF;
2274             goto int_case;
2275         case TARGET_SO_KEEPALIVE:
2276             optname = SO_KEEPALIVE;
2277             goto int_case;
2278         case TARGET_SO_OOBINLINE:
2279             optname = SO_OOBINLINE;
2280             goto int_case;
2281         case TARGET_SO_NO_CHECK:
2282             optname = SO_NO_CHECK;
2283             goto int_case;
2284         case TARGET_SO_PRIORITY:
2285             optname = SO_PRIORITY;
2286             goto int_case;
2287 #ifdef SO_BSDCOMPAT
2288         case TARGET_SO_BSDCOMPAT:
2289             optname = SO_BSDCOMPAT;
2290             goto int_case;
2291 #endif
2292         case TARGET_SO_PASSCRED:
2293             optname = SO_PASSCRED;
2294             goto int_case;
2295         case TARGET_SO_TIMESTAMP:
2296             optname = SO_TIMESTAMP;
2297             goto int_case;
2298         case TARGET_SO_RCVLOWAT:
2299             optname = SO_RCVLOWAT;
2300             goto int_case;
2301         case TARGET_SO_ACCEPTCONN:
2302             optname = SO_ACCEPTCONN;
2303             goto int_case;
2304         default:
2305             goto int_case;
2306         }
2307         break;
2308     case SOL_TCP:
2309         /* TCP options all take an 'int' value.  */
2310     int_case:
2311         if (get_user_u32(len, optlen))
2312             return -TARGET_EFAULT;
2313         if (len < 0)
2314             return -TARGET_EINVAL;
2315         lv = sizeof(lv);
2316         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2317         if (ret < 0)
2318             return ret;
2319         if (optname == SO_TYPE) {
2320             val = host_to_target_sock_type(val);
2321         }
2322         if (len > lv)
2323             len = lv;
2324         if (len == 4) {
2325             if (put_user_u32(val, optval_addr))
2326                 return -TARGET_EFAULT;
2327         } else {
2328             if (put_user_u8(val, optval_addr))
2329                 return -TARGET_EFAULT;
2330         }
2331         if (put_user_u32(len, optlen))
2332             return -TARGET_EFAULT;
2333         break;
2334     case SOL_IP:
2335         switch(optname) {
2336         case IP_TOS:
2337         case IP_TTL:
2338         case IP_HDRINCL:
2339         case IP_ROUTER_ALERT:
2340         case IP_RECVOPTS:
2341         case IP_RETOPTS:
2342         case IP_PKTINFO:
2343         case IP_MTU_DISCOVER:
2344         case IP_RECVERR:
2345         case IP_RECVTOS:
2346 #ifdef IP_FREEBIND
2347         case IP_FREEBIND:
2348 #endif
2349         case IP_MULTICAST_TTL:
2350         case IP_MULTICAST_LOOP:
2351             if (get_user_u32(len, optlen))
2352                 return -TARGET_EFAULT;
2353             if (len < 0)
2354                 return -TARGET_EINVAL;
2355             lv = sizeof(lv);
2356             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2357             if (ret < 0)
2358                 return ret;
2359             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2360                 len = 1;
2361                 if (put_user_u32(len, optlen)
2362                     || put_user_u8(val, optval_addr))
2363                     return -TARGET_EFAULT;
2364             } else {
2365                 if (len > sizeof(int))
2366                     len = sizeof(int);
2367                 if (put_user_u32(len, optlen)
2368                     || put_user_u32(val, optval_addr))
2369                     return -TARGET_EFAULT;
2370             }
2371             break;
2372         default:
2373             ret = -TARGET_ENOPROTOOPT;
2374             break;
2375         }
2376         break;
2377     case SOL_IPV6:
2378         switch (optname) {
2379         case IPV6_MTU_DISCOVER:
2380         case IPV6_MTU:
2381         case IPV6_V6ONLY:
2382         case IPV6_RECVPKTINFO:
2383         case IPV6_UNICAST_HOPS:
2384         case IPV6_MULTICAST_HOPS:
2385         case IPV6_MULTICAST_LOOP:
2386         case IPV6_RECVERR:
2387         case IPV6_RECVHOPLIMIT:
2388         case IPV6_2292HOPLIMIT:
2389         case IPV6_CHECKSUM:
2390         case IPV6_ADDRFORM:
2391         case IPV6_2292PKTINFO:
2392         case IPV6_RECVTCLASS:
2393         case IPV6_RECVRTHDR:
2394         case IPV6_2292RTHDR:
2395         case IPV6_RECVHOPOPTS:
2396         case IPV6_2292HOPOPTS:
2397         case IPV6_RECVDSTOPTS:
2398         case IPV6_2292DSTOPTS:
2399         case IPV6_TCLASS:
2400 #ifdef IPV6_RECVPATHMTU
2401         case IPV6_RECVPATHMTU:
2402 #endif
2403 #ifdef IPV6_TRANSPARENT
2404         case IPV6_TRANSPARENT:
2405 #endif
2406 #ifdef IPV6_FREEBIND
2407         case IPV6_FREEBIND:
2408 #endif
2409 #ifdef IPV6_RECVORIGDSTADDR
2410         case IPV6_RECVORIGDSTADDR:
2411 #endif
2412             if (get_user_u32(len, optlen))
2413                 return -TARGET_EFAULT;
2414             if (len < 0)
2415                 return -TARGET_EINVAL;
2416             lv = sizeof(lv);
2417             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2418             if (ret < 0)
2419                 return ret;
2420             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2421                 len = 1;
2422                 if (put_user_u32(len, optlen)
2423                     || put_user_u8(val, optval_addr))
2424                     return -TARGET_EFAULT;
2425             } else {
2426                 if (len > sizeof(int))
2427                     len = sizeof(int);
2428                 if (put_user_u32(len, optlen)
2429                     || put_user_u32(val, optval_addr))
2430                     return -TARGET_EFAULT;
2431             }
2432             break;
2433         default:
2434             ret = -TARGET_ENOPROTOOPT;
2435             break;
2436         }
2437         break;
2438     default:
2439     unimplemented:
2440         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2441                  level, optname);
2442         ret = -TARGET_EOPNOTSUPP;
2443         break;
2444     }
2445     return ret;
2446 }
2447 
2448 /* Convert target low/high pair representing file offset into the host
2449  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2450  * as the kernel doesn't handle them either.
2451  */
2452 static void target_to_host_low_high(abi_ulong tlow,
2453                                     abi_ulong thigh,
2454                                     unsigned long *hlow,
2455                                     unsigned long *hhigh)
2456 {
2457     uint64_t off = tlow |
2458         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2459         TARGET_LONG_BITS / 2;
2460 
2461     *hlow = off;
2462     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2463 }
2464 
2465 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2466                                 abi_ulong count, int copy)
2467 {
2468     struct target_iovec *target_vec;
2469     struct iovec *vec;
2470     abi_ulong total_len, max_len;
2471     int i;
2472     int err = 0;
2473     bool bad_address = false;
2474 
2475     if (count == 0) {
2476         errno = 0;
2477         return NULL;
2478     }
2479     if (count > IOV_MAX) {
2480         errno = EINVAL;
2481         return NULL;
2482     }
2483 
2484     vec = g_try_new0(struct iovec, count);
2485     if (vec == NULL) {
2486         errno = ENOMEM;
2487         return NULL;
2488     }
2489 
2490     target_vec = lock_user(VERIFY_READ, target_addr,
2491                            count * sizeof(struct target_iovec), 1);
2492     if (target_vec == NULL) {
2493         err = EFAULT;
2494         goto fail2;
2495     }
2496 
2497     /* ??? If host page size > target page size, this will result in a
2498        value larger than what we can actually support.  */
2499     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2500     total_len = 0;
2501 
2502     for (i = 0; i < count; i++) {
2503         abi_ulong base = tswapal(target_vec[i].iov_base);
2504         abi_long len = tswapal(target_vec[i].iov_len);
2505 
2506         if (len < 0) {
2507             err = EINVAL;
2508             goto fail;
2509         } else if (len == 0) {
2510             /* Zero length pointer is ignored.  */
2511             vec[i].iov_base = 0;
2512         } else {
2513             vec[i].iov_base = lock_user(type, base, len, copy);
2514             /* If the first buffer pointer is bad, this is a fault.  But
2515              * subsequent bad buffers will result in a partial write; this
2516              * is realized by filling the vector with null pointers and
2517              * zero lengths. */
2518             if (!vec[i].iov_base) {
2519                 if (i == 0) {
2520                     err = EFAULT;
2521                     goto fail;
2522                 } else {
2523                     bad_address = true;
2524                 }
2525             }
2526             if (bad_address) {
2527                 len = 0;
2528             }
2529             if (len > max_len - total_len) {
2530                 len = max_len - total_len;
2531             }
2532         }
2533         vec[i].iov_len = len;
2534         total_len += len;
2535     }
2536 
2537     unlock_user(target_vec, target_addr, 0);
2538     return vec;
2539 
2540  fail:
2541     while (--i >= 0) {
2542         if (tswapal(target_vec[i].iov_len) > 0) {
2543             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2544         }
2545     }
2546     unlock_user(target_vec, target_addr, 0);
2547  fail2:
2548     g_free(vec);
2549     errno = err;
2550     return NULL;
2551 }
2552 
2553 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2554                          abi_ulong count, int copy)
2555 {
2556     struct target_iovec *target_vec;
2557     int i;
2558 
2559     target_vec = lock_user(VERIFY_READ, target_addr,
2560                            count * sizeof(struct target_iovec), 1);
2561     if (target_vec) {
2562         for (i = 0; i < count; i++) {
2563             abi_ulong base = tswapal(target_vec[i].iov_base);
2564             abi_long len = tswapal(target_vec[i].iov_len);
2565             if (len < 0) {
2566                 break;
2567             }
2568             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2569         }
2570         unlock_user(target_vec, target_addr, 0);
2571     }
2572 
2573     g_free(vec);
2574 }
2575 
2576 static inline int target_to_host_sock_type(int *type)
2577 {
2578     int host_type = 0;
2579     int target_type = *type;
2580 
2581     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2582     case TARGET_SOCK_DGRAM:
2583         host_type = SOCK_DGRAM;
2584         break;
2585     case TARGET_SOCK_STREAM:
2586         host_type = SOCK_STREAM;
2587         break;
2588     default:
2589         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2590         break;
2591     }
2592     if (target_type & TARGET_SOCK_CLOEXEC) {
2593 #if defined(SOCK_CLOEXEC)
2594         host_type |= SOCK_CLOEXEC;
2595 #else
2596         return -TARGET_EINVAL;
2597 #endif
2598     }
2599     if (target_type & TARGET_SOCK_NONBLOCK) {
2600 #if defined(SOCK_NONBLOCK)
2601         host_type |= SOCK_NONBLOCK;
2602 #elif !defined(O_NONBLOCK)
2603         return -TARGET_EINVAL;
2604 #endif
2605     }
2606     *type = host_type;
2607     return 0;
2608 }
2609 
2610 /* Try to emulate socket type flags after socket creation.  */
2611 static int sock_flags_fixup(int fd, int target_type)
2612 {
2613 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2614     if (target_type & TARGET_SOCK_NONBLOCK) {
2615         int flags = fcntl(fd, F_GETFL);
2616         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2617             close(fd);
2618             return -TARGET_EINVAL;
2619         }
2620     }
2621 #endif
2622     return fd;
2623 }
2624 
2625 /* do_socket() Must return target values and target errnos. */
2626 static abi_long do_socket(int domain, int type, int protocol)
2627 {
2628     int target_type = type;
2629     int ret;
2630 
2631     ret = target_to_host_sock_type(&type);
2632     if (ret) {
2633         return ret;
2634     }
2635 
2636     if (domain == PF_NETLINK && !(
2637 #ifdef CONFIG_RTNETLINK
2638          protocol == NETLINK_ROUTE ||
2639 #endif
2640          protocol == NETLINK_KOBJECT_UEVENT ||
2641          protocol == NETLINK_AUDIT)) {
2642         return -EPFNOSUPPORT;
2643     }
2644 
2645     if (domain == AF_PACKET ||
2646         (domain == AF_INET && type == SOCK_PACKET)) {
2647         protocol = tswap16(protocol);
2648     }
2649 
2650     ret = get_errno(socket(domain, type, protocol));
2651     if (ret >= 0) {
2652         ret = sock_flags_fixup(ret, target_type);
2653         if (type == SOCK_PACKET) {
2654             /* Manage an obsolete case :
2655              * if socket type is SOCK_PACKET, bind by name
2656              */
2657             fd_trans_register(ret, &target_packet_trans);
2658         } else if (domain == PF_NETLINK) {
2659             switch (protocol) {
2660 #ifdef CONFIG_RTNETLINK
2661             case NETLINK_ROUTE:
2662                 fd_trans_register(ret, &target_netlink_route_trans);
2663                 break;
2664 #endif
2665             case NETLINK_KOBJECT_UEVENT:
2666                 /* nothing to do: messages are strings */
2667                 break;
2668             case NETLINK_AUDIT:
2669                 fd_trans_register(ret, &target_netlink_audit_trans);
2670                 break;
2671             default:
2672                 g_assert_not_reached();
2673             }
2674         }
2675     }
2676     return ret;
2677 }
2678 
2679 /* do_bind() Must return target values and target errnos. */
2680 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2681                         socklen_t addrlen)
2682 {
2683     void *addr;
2684     abi_long ret;
2685 
2686     if ((int)addrlen < 0) {
2687         return -TARGET_EINVAL;
2688     }
2689 
2690     addr = alloca(addrlen+1);
2691 
2692     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2693     if (ret)
2694         return ret;
2695 
2696     return get_errno(bind(sockfd, addr, addrlen));
2697 }
2698 
2699 /* do_connect() Must return target values and target errnos. */
2700 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2701                            socklen_t addrlen)
2702 {
2703     void *addr;
2704     abi_long ret;
2705 
2706     if ((int)addrlen < 0) {
2707         return -TARGET_EINVAL;
2708     }
2709 
2710     addr = alloca(addrlen+1);
2711 
2712     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2713     if (ret)
2714         return ret;
2715 
2716     return get_errno(safe_connect(sockfd, addr, addrlen));
2717 }
2718 
2719 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2720 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2721                                       int flags, int send)
2722 {
2723     abi_long ret, len;
2724     struct msghdr msg;
2725     abi_ulong count;
2726     struct iovec *vec;
2727     abi_ulong target_vec;
2728 
2729     if (msgp->msg_name) {
2730         msg.msg_namelen = tswap32(msgp->msg_namelen);
2731         msg.msg_name = alloca(msg.msg_namelen+1);
2732         ret = target_to_host_sockaddr(fd, msg.msg_name,
2733                                       tswapal(msgp->msg_name),
2734                                       msg.msg_namelen);
2735         if (ret == -TARGET_EFAULT) {
2736             /* For connected sockets msg_name and msg_namelen must
2737              * be ignored, so returning EFAULT immediately is wrong.
2738              * Instead, pass a bad msg_name to the host kernel, and
2739              * let it decide whether to return EFAULT or not.
2740              */
2741             msg.msg_name = (void *)-1;
2742         } else if (ret) {
2743             goto out2;
2744         }
2745     } else {
2746         msg.msg_name = NULL;
2747         msg.msg_namelen = 0;
2748     }
2749     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2750     msg.msg_control = alloca(msg.msg_controllen);
2751     memset(msg.msg_control, 0, msg.msg_controllen);
2752 
2753     msg.msg_flags = tswap32(msgp->msg_flags);
2754 
2755     count = tswapal(msgp->msg_iovlen);
2756     target_vec = tswapal(msgp->msg_iov);
2757 
2758     if (count > IOV_MAX) {
2759         /* sendrcvmsg returns a different errno for this condition than
2760          * readv/writev, so we must catch it here before lock_iovec() does.
2761          */
2762         ret = -TARGET_EMSGSIZE;
2763         goto out2;
2764     }
2765 
2766     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2767                      target_vec, count, send);
2768     if (vec == NULL) {
2769         ret = -host_to_target_errno(errno);
2770         goto out2;
2771     }
2772     msg.msg_iovlen = count;
2773     msg.msg_iov = vec;
2774 
2775     if (send) {
2776         if (fd_trans_target_to_host_data(fd)) {
2777             void *host_msg;
2778 
2779             host_msg = g_malloc(msg.msg_iov->iov_len);
2780             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2781             ret = fd_trans_target_to_host_data(fd)(host_msg,
2782                                                    msg.msg_iov->iov_len);
2783             if (ret >= 0) {
2784                 msg.msg_iov->iov_base = host_msg;
2785                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2786             }
2787             g_free(host_msg);
2788         } else {
2789             ret = target_to_host_cmsg(&msg, msgp);
2790             if (ret == 0) {
2791                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2792             }
2793         }
2794     } else {
2795         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2796         if (!is_error(ret)) {
2797             len = ret;
2798             if (fd_trans_host_to_target_data(fd)) {
2799                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2800                                                MIN(msg.msg_iov->iov_len, len));
2801             } else {
2802                 ret = host_to_target_cmsg(msgp, &msg);
2803             }
2804             if (!is_error(ret)) {
2805                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2806                 msgp->msg_flags = tswap32(msg.msg_flags);
2807                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2808                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2809                                     msg.msg_name, msg.msg_namelen);
2810                     if (ret) {
2811                         goto out;
2812                     }
2813                 }
2814 
2815                 ret = len;
2816             }
2817         }
2818     }
2819 
2820 out:
2821     unlock_iovec(vec, target_vec, count, !send);
2822 out2:
2823     return ret;
2824 }
2825 
2826 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2827                                int flags, int send)
2828 {
2829     abi_long ret;
2830     struct target_msghdr *msgp;
2831 
2832     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2833                           msgp,
2834                           target_msg,
2835                           send ? 1 : 0)) {
2836         return -TARGET_EFAULT;
2837     }
2838     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2839     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2840     return ret;
2841 }
2842 
2843 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2844  * so it might not have this *mmsg-specific flag either.
2845  */
2846 #ifndef MSG_WAITFORONE
2847 #define MSG_WAITFORONE 0x10000
2848 #endif
2849 
2850 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2851                                 unsigned int vlen, unsigned int flags,
2852                                 int send)
2853 {
2854     struct target_mmsghdr *mmsgp;
2855     abi_long ret = 0;
2856     int i;
2857 
2858     if (vlen > UIO_MAXIOV) {
2859         vlen = UIO_MAXIOV;
2860     }
2861 
2862     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2863     if (!mmsgp) {
2864         return -TARGET_EFAULT;
2865     }
2866 
2867     for (i = 0; i < vlen; i++) {
2868         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2869         if (is_error(ret)) {
2870             break;
2871         }
2872         mmsgp[i].msg_len = tswap32(ret);
2873         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2874         if (flags & MSG_WAITFORONE) {
2875             flags |= MSG_DONTWAIT;
2876         }
2877     }
2878 
2879     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2880 
2881     /* Return number of datagrams sent if we sent any at all;
2882      * otherwise return the error.
2883      */
2884     if (i) {
2885         return i;
2886     }
2887     return ret;
2888 }
2889 
2890 /* do_accept4() Must return target values and target errnos. */
2891 static abi_long do_accept4(int fd, abi_ulong target_addr,
2892                            abi_ulong target_addrlen_addr, int flags)
2893 {
2894     socklen_t addrlen, ret_addrlen;
2895     void *addr;
2896     abi_long ret;
2897     int host_flags;
2898 
2899     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2900 
2901     if (target_addr == 0) {
2902         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2903     }
2904 
2905     /* linux returns EINVAL if addrlen pointer is invalid */
2906     if (get_user_u32(addrlen, target_addrlen_addr))
2907         return -TARGET_EINVAL;
2908 
2909     if ((int)addrlen < 0) {
2910         return -TARGET_EINVAL;
2911     }
2912 
2913     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2914         return -TARGET_EINVAL;
2915 
2916     addr = alloca(addrlen);
2917 
2918     ret_addrlen = addrlen;
2919     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2920     if (!is_error(ret)) {
2921         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2922         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2923             ret = -TARGET_EFAULT;
2924         }
2925     }
2926     return ret;
2927 }
2928 
2929 /* do_getpeername() Must return target values and target errnos. */
2930 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2931                                abi_ulong target_addrlen_addr)
2932 {
2933     socklen_t addrlen, ret_addrlen;
2934     void *addr;
2935     abi_long ret;
2936 
2937     if (get_user_u32(addrlen, target_addrlen_addr))
2938         return -TARGET_EFAULT;
2939 
2940     if ((int)addrlen < 0) {
2941         return -TARGET_EINVAL;
2942     }
2943 
2944     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2945         return -TARGET_EFAULT;
2946 
2947     addr = alloca(addrlen);
2948 
2949     ret_addrlen = addrlen;
2950     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2951     if (!is_error(ret)) {
2952         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2953         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2954             ret = -TARGET_EFAULT;
2955         }
2956     }
2957     return ret;
2958 }
2959 
2960 /* do_getsockname() Must return target values and target errnos. */
2961 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2962                                abi_ulong target_addrlen_addr)
2963 {
2964     socklen_t addrlen, ret_addrlen;
2965     void *addr;
2966     abi_long ret;
2967 
2968     if (get_user_u32(addrlen, target_addrlen_addr))
2969         return -TARGET_EFAULT;
2970 
2971     if ((int)addrlen < 0) {
2972         return -TARGET_EINVAL;
2973     }
2974 
2975     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2976         return -TARGET_EFAULT;
2977 
2978     addr = alloca(addrlen);
2979 
2980     ret_addrlen = addrlen;
2981     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2982     if (!is_error(ret)) {
2983         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2984         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2985             ret = -TARGET_EFAULT;
2986         }
2987     }
2988     return ret;
2989 }
2990 
2991 /* do_socketpair() Must return target values and target errnos. */
2992 static abi_long do_socketpair(int domain, int type, int protocol,
2993                               abi_ulong target_tab_addr)
2994 {
2995     int tab[2];
2996     abi_long ret;
2997 
2998     target_to_host_sock_type(&type);
2999 
3000     ret = get_errno(socketpair(domain, type, protocol, tab));
3001     if (!is_error(ret)) {
3002         if (put_user_s32(tab[0], target_tab_addr)
3003             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3004             ret = -TARGET_EFAULT;
3005     }
3006     return ret;
3007 }
3008 
3009 /* do_sendto() Must return target values and target errnos. */
3010 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3011                           abi_ulong target_addr, socklen_t addrlen)
3012 {
3013     void *addr;
3014     void *host_msg;
3015     void *copy_msg = NULL;
3016     abi_long ret;
3017 
3018     if ((int)addrlen < 0) {
3019         return -TARGET_EINVAL;
3020     }
3021 
3022     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3023     if (!host_msg)
3024         return -TARGET_EFAULT;
3025     if (fd_trans_target_to_host_data(fd)) {
3026         copy_msg = host_msg;
3027         host_msg = g_malloc(len);
3028         memcpy(host_msg, copy_msg, len);
3029         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3030         if (ret < 0) {
3031             goto fail;
3032         }
3033     }
3034     if (target_addr) {
3035         addr = alloca(addrlen+1);
3036         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3037         if (ret) {
3038             goto fail;
3039         }
3040         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3041     } else {
3042         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3043     }
3044 fail:
3045     if (copy_msg) {
3046         g_free(host_msg);
3047         host_msg = copy_msg;
3048     }
3049     unlock_user(host_msg, msg, 0);
3050     return ret;
3051 }
3052 
3053 /* do_recvfrom() Must return target values and target errnos. */
3054 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3055                             abi_ulong target_addr,
3056                             abi_ulong target_addrlen)
3057 {
3058     socklen_t addrlen, ret_addrlen;
3059     void *addr;
3060     void *host_msg;
3061     abi_long ret;
3062 
3063     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3064     if (!host_msg)
3065         return -TARGET_EFAULT;
3066     if (target_addr) {
3067         if (get_user_u32(addrlen, target_addrlen)) {
3068             ret = -TARGET_EFAULT;
3069             goto fail;
3070         }
3071         if ((int)addrlen < 0) {
3072             ret = -TARGET_EINVAL;
3073             goto fail;
3074         }
3075         addr = alloca(addrlen);
3076         ret_addrlen = addrlen;
3077         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3078                                       addr, &ret_addrlen));
3079     } else {
3080         addr = NULL; /* To keep compiler quiet.  */
3081         addrlen = 0; /* To keep compiler quiet.  */
3082         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3083     }
3084     if (!is_error(ret)) {
3085         if (fd_trans_host_to_target_data(fd)) {
3086             abi_long trans;
3087             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3088             if (is_error(trans)) {
3089                 ret = trans;
3090                 goto fail;
3091             }
3092         }
3093         if (target_addr) {
3094             host_to_target_sockaddr(target_addr, addr,
3095                                     MIN(addrlen, ret_addrlen));
3096             if (put_user_u32(ret_addrlen, target_addrlen)) {
3097                 ret = -TARGET_EFAULT;
3098                 goto fail;
3099             }
3100         }
3101         unlock_user(host_msg, msg, len);
3102     } else {
3103 fail:
3104         unlock_user(host_msg, msg, 0);
3105     }
3106     return ret;
3107 }
3108 
3109 #ifdef TARGET_NR_socketcall
3110 /* do_socketcall() must return target values and target errnos. */
3111 static abi_long do_socketcall(int num, abi_ulong vptr)
3112 {
3113     static const unsigned nargs[] = { /* number of arguments per operation */
3114         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3115         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3116         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3117         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3118         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3119         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3120         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3121         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3122         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3123         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3124         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3125         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3126         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3127         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3128         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3129         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3130         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3131         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3132         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3133         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3134     };
3135     abi_long a[6]; /* max 6 args */
3136     unsigned i;
3137 
3138     /* check the range of the first argument num */
3139     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3140     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3141         return -TARGET_EINVAL;
3142     }
3143     /* ensure we have space for args */
3144     if (nargs[num] > ARRAY_SIZE(a)) {
3145         return -TARGET_EINVAL;
3146     }
3147     /* collect the arguments in a[] according to nargs[] */
3148     for (i = 0; i < nargs[num]; ++i) {
3149         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3150             return -TARGET_EFAULT;
3151         }
3152     }
3153     /* now when we have the args, invoke the appropriate underlying function */
3154     switch (num) {
3155     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3156         return do_socket(a[0], a[1], a[2]);
3157     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3158         return do_bind(a[0], a[1], a[2]);
3159     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3160         return do_connect(a[0], a[1], a[2]);
3161     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3162         return get_errno(listen(a[0], a[1]));
3163     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3164         return do_accept4(a[0], a[1], a[2], 0);
3165     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3166         return do_getsockname(a[0], a[1], a[2]);
3167     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3168         return do_getpeername(a[0], a[1], a[2]);
3169     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3170         return do_socketpair(a[0], a[1], a[2], a[3]);
3171     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3172         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3173     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3174         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3175     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3176         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3177     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3178         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3179     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3180         return get_errno(shutdown(a[0], a[1]));
3181     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3182         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3183     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3184         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3185     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3186         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3187     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3188         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3189     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3190         return do_accept4(a[0], a[1], a[2], a[3]);
3191     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3192         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3193     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3194         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3195     default:
3196         gemu_log("Unsupported socketcall: %d\n", num);
3197         return -TARGET_EINVAL;
3198     }
3199 }
3200 #endif
3201 
3202 #define N_SHM_REGIONS	32
3203 
3204 static struct shm_region {
3205     abi_ulong start;
3206     abi_ulong size;
3207     bool in_use;
3208 } shm_regions[N_SHM_REGIONS];
3209 
3210 #ifndef TARGET_SEMID64_DS
3211 /* asm-generic version of this struct */
3212 struct target_semid64_ds
3213 {
3214   struct target_ipc_perm sem_perm;
3215   abi_ulong sem_otime;
3216 #if TARGET_ABI_BITS == 32
3217   abi_ulong __unused1;
3218 #endif
3219   abi_ulong sem_ctime;
3220 #if TARGET_ABI_BITS == 32
3221   abi_ulong __unused2;
3222 #endif
3223   abi_ulong sem_nsems;
3224   abi_ulong __unused3;
3225   abi_ulong __unused4;
3226 };
3227 #endif
3228 
3229 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3230                                                abi_ulong target_addr)
3231 {
3232     struct target_ipc_perm *target_ip;
3233     struct target_semid64_ds *target_sd;
3234 
3235     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3236         return -TARGET_EFAULT;
3237     target_ip = &(target_sd->sem_perm);
3238     host_ip->__key = tswap32(target_ip->__key);
3239     host_ip->uid = tswap32(target_ip->uid);
3240     host_ip->gid = tswap32(target_ip->gid);
3241     host_ip->cuid = tswap32(target_ip->cuid);
3242     host_ip->cgid = tswap32(target_ip->cgid);
3243 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3244     host_ip->mode = tswap32(target_ip->mode);
3245 #else
3246     host_ip->mode = tswap16(target_ip->mode);
3247 #endif
3248 #if defined(TARGET_PPC)
3249     host_ip->__seq = tswap32(target_ip->__seq);
3250 #else
3251     host_ip->__seq = tswap16(target_ip->__seq);
3252 #endif
3253     unlock_user_struct(target_sd, target_addr, 0);
3254     return 0;
3255 }
3256 
3257 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3258                                                struct ipc_perm *host_ip)
3259 {
3260     struct target_ipc_perm *target_ip;
3261     struct target_semid64_ds *target_sd;
3262 
3263     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3264         return -TARGET_EFAULT;
3265     target_ip = &(target_sd->sem_perm);
3266     target_ip->__key = tswap32(host_ip->__key);
3267     target_ip->uid = tswap32(host_ip->uid);
3268     target_ip->gid = tswap32(host_ip->gid);
3269     target_ip->cuid = tswap32(host_ip->cuid);
3270     target_ip->cgid = tswap32(host_ip->cgid);
3271 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3272     target_ip->mode = tswap32(host_ip->mode);
3273 #else
3274     target_ip->mode = tswap16(host_ip->mode);
3275 #endif
3276 #if defined(TARGET_PPC)
3277     target_ip->__seq = tswap32(host_ip->__seq);
3278 #else
3279     target_ip->__seq = tswap16(host_ip->__seq);
3280 #endif
3281     unlock_user_struct(target_sd, target_addr, 1);
3282     return 0;
3283 }
3284 
3285 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3286                                                abi_ulong target_addr)
3287 {
3288     struct target_semid64_ds *target_sd;
3289 
3290     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3291         return -TARGET_EFAULT;
3292     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3293         return -TARGET_EFAULT;
3294     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3295     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3296     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3297     unlock_user_struct(target_sd, target_addr, 0);
3298     return 0;
3299 }
3300 
3301 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3302                                                struct semid_ds *host_sd)
3303 {
3304     struct target_semid64_ds *target_sd;
3305 
3306     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3307         return -TARGET_EFAULT;
3308     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3309         return -TARGET_EFAULT;
3310     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3311     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3312     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3313     unlock_user_struct(target_sd, target_addr, 1);
3314     return 0;
3315 }
3316 
3317 struct target_seminfo {
3318     int semmap;
3319     int semmni;
3320     int semmns;
3321     int semmnu;
3322     int semmsl;
3323     int semopm;
3324     int semume;
3325     int semusz;
3326     int semvmx;
3327     int semaem;
3328 };
3329 
3330 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3331                                               struct seminfo *host_seminfo)
3332 {
3333     struct target_seminfo *target_seminfo;
3334     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3335         return -TARGET_EFAULT;
3336     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3337     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3338     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3339     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3340     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3341     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3342     __put_user(host_seminfo->semume, &target_seminfo->semume);
3343     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3344     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3345     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3346     unlock_user_struct(target_seminfo, target_addr, 1);
3347     return 0;
3348 }
3349 
3350 union semun {
3351 	int val;
3352 	struct semid_ds *buf;
3353 	unsigned short *array;
3354 	struct seminfo *__buf;
3355 };
3356 
3357 union target_semun {
3358 	int val;
3359 	abi_ulong buf;
3360 	abi_ulong array;
3361 	abi_ulong __buf;
3362 };
3363 
3364 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3365                                                abi_ulong target_addr)
3366 {
3367     int nsems;
3368     unsigned short *array;
3369     union semun semun;
3370     struct semid_ds semid_ds;
3371     int i, ret;
3372 
3373     semun.buf = &semid_ds;
3374 
3375     ret = semctl(semid, 0, IPC_STAT, semun);
3376     if (ret == -1)
3377         return get_errno(ret);
3378 
3379     nsems = semid_ds.sem_nsems;
3380 
3381     *host_array = g_try_new(unsigned short, nsems);
3382     if (!*host_array) {
3383         return -TARGET_ENOMEM;
3384     }
3385     array = lock_user(VERIFY_READ, target_addr,
3386                       nsems*sizeof(unsigned short), 1);
3387     if (!array) {
3388         g_free(*host_array);
3389         return -TARGET_EFAULT;
3390     }
3391 
3392     for(i=0; i<nsems; i++) {
3393         __get_user((*host_array)[i], &array[i]);
3394     }
3395     unlock_user(array, target_addr, 0);
3396 
3397     return 0;
3398 }
3399 
3400 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3401                                                unsigned short **host_array)
3402 {
3403     int nsems;
3404     unsigned short *array;
3405     union semun semun;
3406     struct semid_ds semid_ds;
3407     int i, ret;
3408 
3409     semun.buf = &semid_ds;
3410 
3411     ret = semctl(semid, 0, IPC_STAT, semun);
3412     if (ret == -1)
3413         return get_errno(ret);
3414 
3415     nsems = semid_ds.sem_nsems;
3416 
3417     array = lock_user(VERIFY_WRITE, target_addr,
3418                       nsems*sizeof(unsigned short), 0);
3419     if (!array)
3420         return -TARGET_EFAULT;
3421 
3422     for(i=0; i<nsems; i++) {
3423         __put_user((*host_array)[i], &array[i]);
3424     }
3425     g_free(*host_array);
3426     unlock_user(array, target_addr, 1);
3427 
3428     return 0;
3429 }
3430 
3431 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3432                                  abi_ulong target_arg)
3433 {
3434     union target_semun target_su = { .buf = target_arg };
3435     union semun arg;
3436     struct semid_ds dsarg;
3437     unsigned short *array = NULL;
3438     struct seminfo seminfo;
3439     abi_long ret = -TARGET_EINVAL;
3440     abi_long err;
3441     cmd &= 0xff;
3442 
3443     switch( cmd ) {
3444 	case GETVAL:
3445 	case SETVAL:
3446             /* In 64 bit cross-endian situations, we will erroneously pick up
3447              * the wrong half of the union for the "val" element.  To rectify
3448              * this, the entire 8-byte structure is byteswapped, followed by
3449 	     * a swap of the 4 byte val field. In other cases, the data is
3450 	     * already in proper host byte order. */
3451 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3452 		target_su.buf = tswapal(target_su.buf);
3453 		arg.val = tswap32(target_su.val);
3454 	    } else {
3455 		arg.val = target_su.val;
3456 	    }
3457             ret = get_errno(semctl(semid, semnum, cmd, arg));
3458             break;
3459 	case GETALL:
3460 	case SETALL:
3461             err = target_to_host_semarray(semid, &array, target_su.array);
3462             if (err)
3463                 return err;
3464             arg.array = array;
3465             ret = get_errno(semctl(semid, semnum, cmd, arg));
3466             err = host_to_target_semarray(semid, target_su.array, &array);
3467             if (err)
3468                 return err;
3469             break;
3470 	case IPC_STAT:
3471 	case IPC_SET:
3472 	case SEM_STAT:
3473             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3474             if (err)
3475                 return err;
3476             arg.buf = &dsarg;
3477             ret = get_errno(semctl(semid, semnum, cmd, arg));
3478             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3479             if (err)
3480                 return err;
3481             break;
3482 	case IPC_INFO:
3483 	case SEM_INFO:
3484             arg.__buf = &seminfo;
3485             ret = get_errno(semctl(semid, semnum, cmd, arg));
3486             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3487             if (err)
3488                 return err;
3489             break;
3490 	case IPC_RMID:
3491 	case GETPID:
3492 	case GETNCNT:
3493 	case GETZCNT:
3494             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3495             break;
3496     }
3497 
3498     return ret;
3499 }
3500 
3501 struct target_sembuf {
3502     unsigned short sem_num;
3503     short sem_op;
3504     short sem_flg;
3505 };
3506 
3507 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3508                                              abi_ulong target_addr,
3509                                              unsigned nsops)
3510 {
3511     struct target_sembuf *target_sembuf;
3512     int i;
3513 
3514     target_sembuf = lock_user(VERIFY_READ, target_addr,
3515                               nsops*sizeof(struct target_sembuf), 1);
3516     if (!target_sembuf)
3517         return -TARGET_EFAULT;
3518 
3519     for(i=0; i<nsops; i++) {
3520         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3521         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3522         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3523     }
3524 
3525     unlock_user(target_sembuf, target_addr, 0);
3526 
3527     return 0;
3528 }
3529 
3530 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3531 {
3532     struct sembuf sops[nsops];
3533 
3534     if (target_to_host_sembuf(sops, ptr, nsops))
3535         return -TARGET_EFAULT;
3536 
3537     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3538 }
3539 
3540 struct target_msqid_ds
3541 {
3542     struct target_ipc_perm msg_perm;
3543     abi_ulong msg_stime;
3544 #if TARGET_ABI_BITS == 32
3545     abi_ulong __unused1;
3546 #endif
3547     abi_ulong msg_rtime;
3548 #if TARGET_ABI_BITS == 32
3549     abi_ulong __unused2;
3550 #endif
3551     abi_ulong msg_ctime;
3552 #if TARGET_ABI_BITS == 32
3553     abi_ulong __unused3;
3554 #endif
3555     abi_ulong __msg_cbytes;
3556     abi_ulong msg_qnum;
3557     abi_ulong msg_qbytes;
3558     abi_ulong msg_lspid;
3559     abi_ulong msg_lrpid;
3560     abi_ulong __unused4;
3561     abi_ulong __unused5;
3562 };
3563 
3564 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3565                                                abi_ulong target_addr)
3566 {
3567     struct target_msqid_ds *target_md;
3568 
3569     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3570         return -TARGET_EFAULT;
3571     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3572         return -TARGET_EFAULT;
3573     host_md->msg_stime = tswapal(target_md->msg_stime);
3574     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3575     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3576     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3577     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3578     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3579     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3580     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3581     unlock_user_struct(target_md, target_addr, 0);
3582     return 0;
3583 }
3584 
3585 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3586                                                struct msqid_ds *host_md)
3587 {
3588     struct target_msqid_ds *target_md;
3589 
3590     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3591         return -TARGET_EFAULT;
3592     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3593         return -TARGET_EFAULT;
3594     target_md->msg_stime = tswapal(host_md->msg_stime);
3595     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3596     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3597     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3598     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3599     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3600     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3601     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3602     unlock_user_struct(target_md, target_addr, 1);
3603     return 0;
3604 }
3605 
3606 struct target_msginfo {
3607     int msgpool;
3608     int msgmap;
3609     int msgmax;
3610     int msgmnb;
3611     int msgmni;
3612     int msgssz;
3613     int msgtql;
3614     unsigned short int msgseg;
3615 };
3616 
3617 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3618                                               struct msginfo *host_msginfo)
3619 {
3620     struct target_msginfo *target_msginfo;
3621     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3622         return -TARGET_EFAULT;
3623     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3624     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3625     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3626     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3627     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3628     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3629     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3630     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3631     unlock_user_struct(target_msginfo, target_addr, 1);
3632     return 0;
3633 }
3634 
3635 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3636 {
3637     struct msqid_ds dsarg;
3638     struct msginfo msginfo;
3639     abi_long ret = -TARGET_EINVAL;
3640 
3641     cmd &= 0xff;
3642 
3643     switch (cmd) {
3644     case IPC_STAT:
3645     case IPC_SET:
3646     case MSG_STAT:
3647         if (target_to_host_msqid_ds(&dsarg,ptr))
3648             return -TARGET_EFAULT;
3649         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3650         if (host_to_target_msqid_ds(ptr,&dsarg))
3651             return -TARGET_EFAULT;
3652         break;
3653     case IPC_RMID:
3654         ret = get_errno(msgctl(msgid, cmd, NULL));
3655         break;
3656     case IPC_INFO:
3657     case MSG_INFO:
3658         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3659         if (host_to_target_msginfo(ptr, &msginfo))
3660             return -TARGET_EFAULT;
3661         break;
3662     }
3663 
3664     return ret;
3665 }
3666 
3667 struct target_msgbuf {
3668     abi_long mtype;
3669     char	mtext[1];
3670 };
3671 
3672 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3673                                  ssize_t msgsz, int msgflg)
3674 {
3675     struct target_msgbuf *target_mb;
3676     struct msgbuf *host_mb;
3677     abi_long ret = 0;
3678 
3679     if (msgsz < 0) {
3680         return -TARGET_EINVAL;
3681     }
3682 
3683     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3684         return -TARGET_EFAULT;
3685     host_mb = g_try_malloc(msgsz + sizeof(long));
3686     if (!host_mb) {
3687         unlock_user_struct(target_mb, msgp, 0);
3688         return -TARGET_ENOMEM;
3689     }
3690     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3691     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3692     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3693     g_free(host_mb);
3694     unlock_user_struct(target_mb, msgp, 0);
3695 
3696     return ret;
3697 }
3698 
3699 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3700                                  ssize_t msgsz, abi_long msgtyp,
3701                                  int msgflg)
3702 {
3703     struct target_msgbuf *target_mb;
3704     char *target_mtext;
3705     struct msgbuf *host_mb;
3706     abi_long ret = 0;
3707 
3708     if (msgsz < 0) {
3709         return -TARGET_EINVAL;
3710     }
3711 
3712     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3713         return -TARGET_EFAULT;
3714 
3715     host_mb = g_try_malloc(msgsz + sizeof(long));
3716     if (!host_mb) {
3717         ret = -TARGET_ENOMEM;
3718         goto end;
3719     }
3720     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3721 
3722     if (ret > 0) {
3723         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3724         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3725         if (!target_mtext) {
3726             ret = -TARGET_EFAULT;
3727             goto end;
3728         }
3729         memcpy(target_mb->mtext, host_mb->mtext, ret);
3730         unlock_user(target_mtext, target_mtext_addr, ret);
3731     }
3732 
3733     target_mb->mtype = tswapal(host_mb->mtype);
3734 
3735 end:
3736     if (target_mb)
3737         unlock_user_struct(target_mb, msgp, 1);
3738     g_free(host_mb);
3739     return ret;
3740 }
3741 
3742 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3743                                                abi_ulong target_addr)
3744 {
3745     struct target_shmid_ds *target_sd;
3746 
3747     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3748         return -TARGET_EFAULT;
3749     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3750         return -TARGET_EFAULT;
3751     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3752     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3753     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3754     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3755     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3756     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3757     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3758     unlock_user_struct(target_sd, target_addr, 0);
3759     return 0;
3760 }
3761 
3762 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3763                                                struct shmid_ds *host_sd)
3764 {
3765     struct target_shmid_ds *target_sd;
3766 
3767     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3768         return -TARGET_EFAULT;
3769     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3770         return -TARGET_EFAULT;
3771     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3772     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3773     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3774     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3775     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3776     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3777     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3778     unlock_user_struct(target_sd, target_addr, 1);
3779     return 0;
3780 }
3781 
3782 struct  target_shminfo {
3783     abi_ulong shmmax;
3784     abi_ulong shmmin;
3785     abi_ulong shmmni;
3786     abi_ulong shmseg;
3787     abi_ulong shmall;
3788 };
3789 
3790 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3791                                               struct shminfo *host_shminfo)
3792 {
3793     struct target_shminfo *target_shminfo;
3794     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3795         return -TARGET_EFAULT;
3796     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3797     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3798     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3799     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3800     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3801     unlock_user_struct(target_shminfo, target_addr, 1);
3802     return 0;
3803 }
3804 
3805 struct target_shm_info {
3806     int used_ids;
3807     abi_ulong shm_tot;
3808     abi_ulong shm_rss;
3809     abi_ulong shm_swp;
3810     abi_ulong swap_attempts;
3811     abi_ulong swap_successes;
3812 };
3813 
3814 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3815                                                struct shm_info *host_shm_info)
3816 {
3817     struct target_shm_info *target_shm_info;
3818     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3819         return -TARGET_EFAULT;
3820     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3821     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3822     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3823     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3824     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3825     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3826     unlock_user_struct(target_shm_info, target_addr, 1);
3827     return 0;
3828 }
3829 
3830 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3831 {
3832     struct shmid_ds dsarg;
3833     struct shminfo shminfo;
3834     struct shm_info shm_info;
3835     abi_long ret = -TARGET_EINVAL;
3836 
3837     cmd &= 0xff;
3838 
3839     switch(cmd) {
3840     case IPC_STAT:
3841     case IPC_SET:
3842     case SHM_STAT:
3843         if (target_to_host_shmid_ds(&dsarg, buf))
3844             return -TARGET_EFAULT;
3845         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3846         if (host_to_target_shmid_ds(buf, &dsarg))
3847             return -TARGET_EFAULT;
3848         break;
3849     case IPC_INFO:
3850         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3851         if (host_to_target_shminfo(buf, &shminfo))
3852             return -TARGET_EFAULT;
3853         break;
3854     case SHM_INFO:
3855         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3856         if (host_to_target_shm_info(buf, &shm_info))
3857             return -TARGET_EFAULT;
3858         break;
3859     case IPC_RMID:
3860     case SHM_LOCK:
3861     case SHM_UNLOCK:
3862         ret = get_errno(shmctl(shmid, cmd, NULL));
3863         break;
3864     }
3865 
3866     return ret;
3867 }
3868 
3869 #ifndef TARGET_FORCE_SHMLBA
3870 /* For most architectures, SHMLBA is the same as the page size;
3871  * some architectures have larger values, in which case they should
3872  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3873  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3874  * and defining its own value for SHMLBA.
3875  *
3876  * The kernel also permits SHMLBA to be set by the architecture to a
3877  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3878  * this means that addresses are rounded to the large size if
3879  * SHM_RND is set but addresses not aligned to that size are not rejected
3880  * as long as they are at least page-aligned. Since the only architecture
3881  * which uses this is ia64 this code doesn't provide for that oddity.
3882  */
3883 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3884 {
3885     return TARGET_PAGE_SIZE;
3886 }
3887 #endif
3888 
3889 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3890                                  int shmid, abi_ulong shmaddr, int shmflg)
3891 {
3892     abi_long raddr;
3893     void *host_raddr;
3894     struct shmid_ds shm_info;
3895     int i,ret;
3896     abi_ulong shmlba;
3897 
3898     /* find out the length of the shared memory segment */
3899     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3900     if (is_error(ret)) {
3901         /* can't get length, bail out */
3902         return ret;
3903     }
3904 
3905     shmlba = target_shmlba(cpu_env);
3906 
3907     if (shmaddr & (shmlba - 1)) {
3908         if (shmflg & SHM_RND) {
3909             shmaddr &= ~(shmlba - 1);
3910         } else {
3911             return -TARGET_EINVAL;
3912         }
3913     }
3914     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3915         return -TARGET_EINVAL;
3916     }
3917 
3918     mmap_lock();
3919 
3920     if (shmaddr)
3921         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3922     else {
3923         abi_ulong mmap_start;
3924 
3925         /* In order to use the host shmat, we need to honor host SHMLBA.  */
3926         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
3927 
3928         if (mmap_start == -1) {
3929             errno = ENOMEM;
3930             host_raddr = (void *)-1;
3931         } else
3932             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3933     }
3934 
3935     if (host_raddr == (void *)-1) {
3936         mmap_unlock();
3937         return get_errno((long)host_raddr);
3938     }
3939     raddr=h2g((unsigned long)host_raddr);
3940 
3941     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3942                    PAGE_VALID | PAGE_READ |
3943                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3944 
3945     for (i = 0; i < N_SHM_REGIONS; i++) {
3946         if (!shm_regions[i].in_use) {
3947             shm_regions[i].in_use = true;
3948             shm_regions[i].start = raddr;
3949             shm_regions[i].size = shm_info.shm_segsz;
3950             break;
3951         }
3952     }
3953 
3954     mmap_unlock();
3955     return raddr;
3956 
3957 }
3958 
3959 static inline abi_long do_shmdt(abi_ulong shmaddr)
3960 {
3961     int i;
3962     abi_long rv;
3963 
3964     mmap_lock();
3965 
3966     for (i = 0; i < N_SHM_REGIONS; ++i) {
3967         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3968             shm_regions[i].in_use = false;
3969             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3970             break;
3971         }
3972     }
3973     rv = get_errno(shmdt(g2h(shmaddr)));
3974 
3975     mmap_unlock();
3976 
3977     return rv;
3978 }
3979 
3980 #ifdef TARGET_NR_ipc
3981 /* ??? This only works with linear mappings.  */
3982 /* do_ipc() must return target values and target errnos. */
3983 static abi_long do_ipc(CPUArchState *cpu_env,
3984                        unsigned int call, abi_long first,
3985                        abi_long second, abi_long third,
3986                        abi_long ptr, abi_long fifth)
3987 {
3988     int version;
3989     abi_long ret = 0;
3990 
3991     version = call >> 16;
3992     call &= 0xffff;
3993 
3994     switch (call) {
3995     case IPCOP_semop:
3996         ret = do_semop(first, ptr, second);
3997         break;
3998 
3999     case IPCOP_semget:
4000         ret = get_errno(semget(first, second, third));
4001         break;
4002 
4003     case IPCOP_semctl: {
4004         /* The semun argument to semctl is passed by value, so dereference the
4005          * ptr argument. */
4006         abi_ulong atptr;
4007         get_user_ual(atptr, ptr);
4008         ret = do_semctl(first, second, third, atptr);
4009         break;
4010     }
4011 
4012     case IPCOP_msgget:
4013         ret = get_errno(msgget(first, second));
4014         break;
4015 
4016     case IPCOP_msgsnd:
4017         ret = do_msgsnd(first, ptr, second, third);
4018         break;
4019 
4020     case IPCOP_msgctl:
4021         ret = do_msgctl(first, second, ptr);
4022         break;
4023 
4024     case IPCOP_msgrcv:
4025         switch (version) {
4026         case 0:
4027             {
4028                 struct target_ipc_kludge {
4029                     abi_long msgp;
4030                     abi_long msgtyp;
4031                 } *tmp;
4032 
4033                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4034                     ret = -TARGET_EFAULT;
4035                     break;
4036                 }
4037 
4038                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4039 
4040                 unlock_user_struct(tmp, ptr, 0);
4041                 break;
4042             }
4043         default:
4044             ret = do_msgrcv(first, ptr, second, fifth, third);
4045         }
4046         break;
4047 
4048     case IPCOP_shmat:
4049         switch (version) {
4050         default:
4051         {
4052             abi_ulong raddr;
4053             raddr = do_shmat(cpu_env, first, ptr, second);
4054             if (is_error(raddr))
4055                 return get_errno(raddr);
4056             if (put_user_ual(raddr, third))
4057                 return -TARGET_EFAULT;
4058             break;
4059         }
4060         case 1:
4061             ret = -TARGET_EINVAL;
4062             break;
4063         }
4064 	break;
4065     case IPCOP_shmdt:
4066         ret = do_shmdt(ptr);
4067 	break;
4068 
4069     case IPCOP_shmget:
4070 	/* IPC_* flag values are the same on all linux platforms */
4071 	ret = get_errno(shmget(first, second, third));
4072 	break;
4073 
4074 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4075     case IPCOP_shmctl:
4076         ret = do_shmctl(first, second, ptr);
4077         break;
4078     default:
4079 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4080 	ret = -TARGET_ENOSYS;
4081 	break;
4082     }
4083     return ret;
4084 }
4085 #endif
4086 
4087 /* kernel structure types definitions */
4088 
4089 #define STRUCT(name, ...) STRUCT_ ## name,
4090 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4091 enum {
4092 #include "syscall_types.h"
4093 STRUCT_MAX
4094 };
4095 #undef STRUCT
4096 #undef STRUCT_SPECIAL
4097 
4098 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4099 #define STRUCT_SPECIAL(name)
4100 #include "syscall_types.h"
4101 #undef STRUCT
4102 #undef STRUCT_SPECIAL
4103 
4104 typedef struct IOCTLEntry IOCTLEntry;
4105 
4106 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4107                              int fd, int cmd, abi_long arg);
4108 
4109 struct IOCTLEntry {
4110     int target_cmd;
4111     unsigned int host_cmd;
4112     const char *name;
4113     int access;
4114     do_ioctl_fn *do_ioctl;
4115     const argtype arg_type[5];
4116 };
4117 
4118 #define IOC_R 0x0001
4119 #define IOC_W 0x0002
4120 #define IOC_RW (IOC_R | IOC_W)
4121 
4122 #define MAX_STRUCT_SIZE 4096
4123 
4124 #ifdef CONFIG_FIEMAP
4125 /* So fiemap access checks don't overflow on 32 bit systems.
4126  * This is very slightly smaller than the limit imposed by
4127  * the underlying kernel.
4128  */
4129 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4130                             / sizeof(struct fiemap_extent))
4131 
4132 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4133                                        int fd, int cmd, abi_long arg)
4134 {
4135     /* The parameter for this ioctl is a struct fiemap followed
4136      * by an array of struct fiemap_extent whose size is set
4137      * in fiemap->fm_extent_count. The array is filled in by the
4138      * ioctl.
4139      */
4140     int target_size_in, target_size_out;
4141     struct fiemap *fm;
4142     const argtype *arg_type = ie->arg_type;
4143     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4144     void *argptr, *p;
4145     abi_long ret;
4146     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4147     uint32_t outbufsz;
4148     int free_fm = 0;
4149 
4150     assert(arg_type[0] == TYPE_PTR);
4151     assert(ie->access == IOC_RW);
4152     arg_type++;
4153     target_size_in = thunk_type_size(arg_type, 0);
4154     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4155     if (!argptr) {
4156         return -TARGET_EFAULT;
4157     }
4158     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4159     unlock_user(argptr, arg, 0);
4160     fm = (struct fiemap *)buf_temp;
4161     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4162         return -TARGET_EINVAL;
4163     }
4164 
4165     outbufsz = sizeof (*fm) +
4166         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4167 
4168     if (outbufsz > MAX_STRUCT_SIZE) {
4169         /* We can't fit all the extents into the fixed size buffer.
4170          * Allocate one that is large enough and use it instead.
4171          */
4172         fm = g_try_malloc(outbufsz);
4173         if (!fm) {
4174             return -TARGET_ENOMEM;
4175         }
4176         memcpy(fm, buf_temp, sizeof(struct fiemap));
4177         free_fm = 1;
4178     }
4179     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4180     if (!is_error(ret)) {
4181         target_size_out = target_size_in;
4182         /* An extent_count of 0 means we were only counting the extents
4183          * so there are no structs to copy
4184          */
4185         if (fm->fm_extent_count != 0) {
4186             target_size_out += fm->fm_mapped_extents * extent_size;
4187         }
4188         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4189         if (!argptr) {
4190             ret = -TARGET_EFAULT;
4191         } else {
4192             /* Convert the struct fiemap */
4193             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4194             if (fm->fm_extent_count != 0) {
4195                 p = argptr + target_size_in;
4196                 /* ...and then all the struct fiemap_extents */
4197                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4198                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4199                                   THUNK_TARGET);
4200                     p += extent_size;
4201                 }
4202             }
4203             unlock_user(argptr, arg, target_size_out);
4204         }
4205     }
4206     if (free_fm) {
4207         g_free(fm);
4208     }
4209     return ret;
4210 }
4211 #endif
4212 
4213 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4214                                 int fd, int cmd, abi_long arg)
4215 {
4216     const argtype *arg_type = ie->arg_type;
4217     int target_size;
4218     void *argptr;
4219     int ret;
4220     struct ifconf *host_ifconf;
4221     uint32_t outbufsz;
4222     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4223     int target_ifreq_size;
4224     int nb_ifreq;
4225     int free_buf = 0;
4226     int i;
4227     int target_ifc_len;
4228     abi_long target_ifc_buf;
4229     int host_ifc_len;
4230     char *host_ifc_buf;
4231 
4232     assert(arg_type[0] == TYPE_PTR);
4233     assert(ie->access == IOC_RW);
4234 
4235     arg_type++;
4236     target_size = thunk_type_size(arg_type, 0);
4237 
4238     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4239     if (!argptr)
4240         return -TARGET_EFAULT;
4241     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4242     unlock_user(argptr, arg, 0);
4243 
4244     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4245     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4246     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4247 
4248     if (target_ifc_buf != 0) {
4249         target_ifc_len = host_ifconf->ifc_len;
4250         nb_ifreq = target_ifc_len / target_ifreq_size;
4251         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4252 
4253         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4254         if (outbufsz > MAX_STRUCT_SIZE) {
4255             /*
4256              * We can't fit all the extents into the fixed size buffer.
4257              * Allocate one that is large enough and use it instead.
4258              */
4259             host_ifconf = malloc(outbufsz);
4260             if (!host_ifconf) {
4261                 return -TARGET_ENOMEM;
4262             }
4263             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4264             free_buf = 1;
4265         }
4266         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4267 
4268         host_ifconf->ifc_len = host_ifc_len;
4269     } else {
4270       host_ifc_buf = NULL;
4271     }
4272     host_ifconf->ifc_buf = host_ifc_buf;
4273 
4274     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4275     if (!is_error(ret)) {
4276 	/* convert host ifc_len to target ifc_len */
4277 
4278         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4279         target_ifc_len = nb_ifreq * target_ifreq_size;
4280         host_ifconf->ifc_len = target_ifc_len;
4281 
4282 	/* restore target ifc_buf */
4283 
4284         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4285 
4286 	/* copy struct ifconf to target user */
4287 
4288         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4289         if (!argptr)
4290             return -TARGET_EFAULT;
4291         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4292         unlock_user(argptr, arg, target_size);
4293 
4294         if (target_ifc_buf != 0) {
4295             /* copy ifreq[] to target user */
4296             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4297             for (i = 0; i < nb_ifreq ; i++) {
4298                 thunk_convert(argptr + i * target_ifreq_size,
4299                               host_ifc_buf + i * sizeof(struct ifreq),
4300                               ifreq_arg_type, THUNK_TARGET);
4301             }
4302             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4303         }
4304     }
4305 
4306     if (free_buf) {
4307         free(host_ifconf);
4308     }
4309 
4310     return ret;
4311 }
4312 
4313 #if defined(CONFIG_USBFS)
4314 #if HOST_LONG_BITS > 64
4315 #error USBDEVFS thunks do not support >64 bit hosts yet.
4316 #endif
4317 struct live_urb {
4318     uint64_t target_urb_adr;
4319     uint64_t target_buf_adr;
4320     char *target_buf_ptr;
4321     struct usbdevfs_urb host_urb;
4322 };
4323 
4324 static GHashTable *usbdevfs_urb_hashtable(void)
4325 {
4326     static GHashTable *urb_hashtable;
4327 
4328     if (!urb_hashtable) {
4329         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4330     }
4331     return urb_hashtable;
4332 }
4333 
4334 static void urb_hashtable_insert(struct live_urb *urb)
4335 {
4336     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4337     g_hash_table_insert(urb_hashtable, urb, urb);
4338 }
4339 
4340 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4341 {
4342     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4343     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4344 }
4345 
4346 static void urb_hashtable_remove(struct live_urb *urb)
4347 {
4348     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4349     g_hash_table_remove(urb_hashtable, urb);
4350 }
4351 
4352 static abi_long
4353 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4354                           int fd, int cmd, abi_long arg)
4355 {
4356     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4357     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4358     struct live_urb *lurb;
4359     void *argptr;
4360     uint64_t hurb;
4361     int target_size;
4362     uintptr_t target_urb_adr;
4363     abi_long ret;
4364 
4365     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4366 
4367     memset(buf_temp, 0, sizeof(uint64_t));
4368     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4369     if (is_error(ret)) {
4370         return ret;
4371     }
4372 
4373     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4374     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4375     if (!lurb->target_urb_adr) {
4376         return -TARGET_EFAULT;
4377     }
4378     urb_hashtable_remove(lurb);
4379     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4380         lurb->host_urb.buffer_length);
4381     lurb->target_buf_ptr = NULL;
4382 
4383     /* restore the guest buffer pointer */
4384     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4385 
4386     /* update the guest urb struct */
4387     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4388     if (!argptr) {
4389         g_free(lurb);
4390         return -TARGET_EFAULT;
4391     }
4392     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4393     unlock_user(argptr, lurb->target_urb_adr, target_size);
4394 
4395     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4396     /* write back the urb handle */
4397     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4398     if (!argptr) {
4399         g_free(lurb);
4400         return -TARGET_EFAULT;
4401     }
4402 
4403     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4404     target_urb_adr = lurb->target_urb_adr;
4405     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4406     unlock_user(argptr, arg, target_size);
4407 
4408     g_free(lurb);
4409     return ret;
4410 }
4411 
4412 static abi_long
4413 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4414                              uint8_t *buf_temp __attribute__((unused)),
4415                              int fd, int cmd, abi_long arg)
4416 {
4417     struct live_urb *lurb;
4418 
4419     /* map target address back to host URB with metadata. */
4420     lurb = urb_hashtable_lookup(arg);
4421     if (!lurb) {
4422         return -TARGET_EFAULT;
4423     }
4424     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4425 }
4426 
4427 static abi_long
4428 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4429                             int fd, int cmd, abi_long arg)
4430 {
4431     const argtype *arg_type = ie->arg_type;
4432     int target_size;
4433     abi_long ret;
4434     void *argptr;
4435     int rw_dir;
4436     struct live_urb *lurb;
4437 
4438     /*
4439      * each submitted URB needs to map to a unique ID for the
4440      * kernel, and that unique ID needs to be a pointer to
4441      * host memory.  hence, we need to malloc for each URB.
4442      * isochronous transfers have a variable length struct.
4443      */
4444     arg_type++;
4445     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4446 
4447     /* construct host copy of urb and metadata */
4448     lurb = g_try_malloc0(sizeof(struct live_urb));
4449     if (!lurb) {
4450         return -TARGET_ENOMEM;
4451     }
4452 
4453     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4454     if (!argptr) {
4455         g_free(lurb);
4456         return -TARGET_EFAULT;
4457     }
4458     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4459     unlock_user(argptr, arg, 0);
4460 
4461     lurb->target_urb_adr = arg;
4462     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4463 
4464     /* buffer space used depends on endpoint type so lock the entire buffer */
4465     /* control type urbs should check the buffer contents for true direction */
4466     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4467     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4468         lurb->host_urb.buffer_length, 1);
4469     if (lurb->target_buf_ptr == NULL) {
4470         g_free(lurb);
4471         return -TARGET_EFAULT;
4472     }
4473 
4474     /* update buffer pointer in host copy */
4475     lurb->host_urb.buffer = lurb->target_buf_ptr;
4476 
4477     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4478     if (is_error(ret)) {
4479         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4480         g_free(lurb);
4481     } else {
4482         urb_hashtable_insert(lurb);
4483     }
4484 
4485     return ret;
4486 }
4487 #endif /* CONFIG_USBFS */
4488 
4489 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4490                             int cmd, abi_long arg)
4491 {
4492     void *argptr;
4493     struct dm_ioctl *host_dm;
4494     abi_long guest_data;
4495     uint32_t guest_data_size;
4496     int target_size;
4497     const argtype *arg_type = ie->arg_type;
4498     abi_long ret;
4499     void *big_buf = NULL;
4500     char *host_data;
4501 
4502     arg_type++;
4503     target_size = thunk_type_size(arg_type, 0);
4504     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4505     if (!argptr) {
4506         ret = -TARGET_EFAULT;
4507         goto out;
4508     }
4509     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4510     unlock_user(argptr, arg, 0);
4511 
4512     /* buf_temp is too small, so fetch things into a bigger buffer */
4513     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4514     memcpy(big_buf, buf_temp, target_size);
4515     buf_temp = big_buf;
4516     host_dm = big_buf;
4517 
4518     guest_data = arg + host_dm->data_start;
4519     if ((guest_data - arg) < 0) {
4520         ret = -TARGET_EINVAL;
4521         goto out;
4522     }
4523     guest_data_size = host_dm->data_size - host_dm->data_start;
4524     host_data = (char*)host_dm + host_dm->data_start;
4525 
4526     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4527     if (!argptr) {
4528         ret = -TARGET_EFAULT;
4529         goto out;
4530     }
4531 
4532     switch (ie->host_cmd) {
4533     case DM_REMOVE_ALL:
4534     case DM_LIST_DEVICES:
4535     case DM_DEV_CREATE:
4536     case DM_DEV_REMOVE:
4537     case DM_DEV_SUSPEND:
4538     case DM_DEV_STATUS:
4539     case DM_DEV_WAIT:
4540     case DM_TABLE_STATUS:
4541     case DM_TABLE_CLEAR:
4542     case DM_TABLE_DEPS:
4543     case DM_LIST_VERSIONS:
4544         /* no input data */
4545         break;
4546     case DM_DEV_RENAME:
4547     case DM_DEV_SET_GEOMETRY:
4548         /* data contains only strings */
4549         memcpy(host_data, argptr, guest_data_size);
4550         break;
4551     case DM_TARGET_MSG:
4552         memcpy(host_data, argptr, guest_data_size);
4553         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4554         break;
4555     case DM_TABLE_LOAD:
4556     {
4557         void *gspec = argptr;
4558         void *cur_data = host_data;
4559         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4560         int spec_size = thunk_type_size(arg_type, 0);
4561         int i;
4562 
4563         for (i = 0; i < host_dm->target_count; i++) {
4564             struct dm_target_spec *spec = cur_data;
4565             uint32_t next;
4566             int slen;
4567 
4568             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4569             slen = strlen((char*)gspec + spec_size) + 1;
4570             next = spec->next;
4571             spec->next = sizeof(*spec) + slen;
4572             strcpy((char*)&spec[1], gspec + spec_size);
4573             gspec += next;
4574             cur_data += spec->next;
4575         }
4576         break;
4577     }
4578     default:
4579         ret = -TARGET_EINVAL;
4580         unlock_user(argptr, guest_data, 0);
4581         goto out;
4582     }
4583     unlock_user(argptr, guest_data, 0);
4584 
4585     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4586     if (!is_error(ret)) {
4587         guest_data = arg + host_dm->data_start;
4588         guest_data_size = host_dm->data_size - host_dm->data_start;
4589         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4590         switch (ie->host_cmd) {
4591         case DM_REMOVE_ALL:
4592         case DM_DEV_CREATE:
4593         case DM_DEV_REMOVE:
4594         case DM_DEV_RENAME:
4595         case DM_DEV_SUSPEND:
4596         case DM_DEV_STATUS:
4597         case DM_TABLE_LOAD:
4598         case DM_TABLE_CLEAR:
4599         case DM_TARGET_MSG:
4600         case DM_DEV_SET_GEOMETRY:
4601             /* no return data */
4602             break;
4603         case DM_LIST_DEVICES:
4604         {
4605             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4606             uint32_t remaining_data = guest_data_size;
4607             void *cur_data = argptr;
4608             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4609             int nl_size = 12; /* can't use thunk_size due to alignment */
4610 
4611             while (1) {
4612                 uint32_t next = nl->next;
4613                 if (next) {
4614                     nl->next = nl_size + (strlen(nl->name) + 1);
4615                 }
4616                 if (remaining_data < nl->next) {
4617                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4618                     break;
4619                 }
4620                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4621                 strcpy(cur_data + nl_size, nl->name);
4622                 cur_data += nl->next;
4623                 remaining_data -= nl->next;
4624                 if (!next) {
4625                     break;
4626                 }
4627                 nl = (void*)nl + next;
4628             }
4629             break;
4630         }
4631         case DM_DEV_WAIT:
4632         case DM_TABLE_STATUS:
4633         {
4634             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4635             void *cur_data = argptr;
4636             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4637             int spec_size = thunk_type_size(arg_type, 0);
4638             int i;
4639 
4640             for (i = 0; i < host_dm->target_count; i++) {
4641                 uint32_t next = spec->next;
4642                 int slen = strlen((char*)&spec[1]) + 1;
4643                 spec->next = (cur_data - argptr) + spec_size + slen;
4644                 if (guest_data_size < spec->next) {
4645                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4646                     break;
4647                 }
4648                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4649                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4650                 cur_data = argptr + spec->next;
4651                 spec = (void*)host_dm + host_dm->data_start + next;
4652             }
4653             break;
4654         }
4655         case DM_TABLE_DEPS:
4656         {
4657             void *hdata = (void*)host_dm + host_dm->data_start;
4658             int count = *(uint32_t*)hdata;
4659             uint64_t *hdev = hdata + 8;
4660             uint64_t *gdev = argptr + 8;
4661             int i;
4662 
4663             *(uint32_t*)argptr = tswap32(count);
4664             for (i = 0; i < count; i++) {
4665                 *gdev = tswap64(*hdev);
4666                 gdev++;
4667                 hdev++;
4668             }
4669             break;
4670         }
4671         case DM_LIST_VERSIONS:
4672         {
4673             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4674             uint32_t remaining_data = guest_data_size;
4675             void *cur_data = argptr;
4676             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4677             int vers_size = thunk_type_size(arg_type, 0);
4678 
4679             while (1) {
4680                 uint32_t next = vers->next;
4681                 if (next) {
4682                     vers->next = vers_size + (strlen(vers->name) + 1);
4683                 }
4684                 if (remaining_data < vers->next) {
4685                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4686                     break;
4687                 }
4688                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4689                 strcpy(cur_data + vers_size, vers->name);
4690                 cur_data += vers->next;
4691                 remaining_data -= vers->next;
4692                 if (!next) {
4693                     break;
4694                 }
4695                 vers = (void*)vers + next;
4696             }
4697             break;
4698         }
4699         default:
4700             unlock_user(argptr, guest_data, 0);
4701             ret = -TARGET_EINVAL;
4702             goto out;
4703         }
4704         unlock_user(argptr, guest_data, guest_data_size);
4705 
4706         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4707         if (!argptr) {
4708             ret = -TARGET_EFAULT;
4709             goto out;
4710         }
4711         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4712         unlock_user(argptr, arg, target_size);
4713     }
4714 out:
4715     g_free(big_buf);
4716     return ret;
4717 }
4718 
4719 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4720                                int cmd, abi_long arg)
4721 {
4722     void *argptr;
4723     int target_size;
4724     const argtype *arg_type = ie->arg_type;
4725     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4726     abi_long ret;
4727 
4728     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4729     struct blkpg_partition host_part;
4730 
4731     /* Read and convert blkpg */
4732     arg_type++;
4733     target_size = thunk_type_size(arg_type, 0);
4734     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4735     if (!argptr) {
4736         ret = -TARGET_EFAULT;
4737         goto out;
4738     }
4739     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4740     unlock_user(argptr, arg, 0);
4741 
4742     switch (host_blkpg->op) {
4743     case BLKPG_ADD_PARTITION:
4744     case BLKPG_DEL_PARTITION:
4745         /* payload is struct blkpg_partition */
4746         break;
4747     default:
4748         /* Unknown opcode */
4749         ret = -TARGET_EINVAL;
4750         goto out;
4751     }
4752 
4753     /* Read and convert blkpg->data */
4754     arg = (abi_long)(uintptr_t)host_blkpg->data;
4755     target_size = thunk_type_size(part_arg_type, 0);
4756     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4757     if (!argptr) {
4758         ret = -TARGET_EFAULT;
4759         goto out;
4760     }
4761     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4762     unlock_user(argptr, arg, 0);
4763 
4764     /* Swizzle the data pointer to our local copy and call! */
4765     host_blkpg->data = &host_part;
4766     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4767 
4768 out:
4769     return ret;
4770 }
4771 
4772 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4773                                 int fd, int cmd, abi_long arg)
4774 {
4775     const argtype *arg_type = ie->arg_type;
4776     const StructEntry *se;
4777     const argtype *field_types;
4778     const int *dst_offsets, *src_offsets;
4779     int target_size;
4780     void *argptr;
4781     abi_ulong *target_rt_dev_ptr = NULL;
4782     unsigned long *host_rt_dev_ptr = NULL;
4783     abi_long ret;
4784     int i;
4785 
4786     assert(ie->access == IOC_W);
4787     assert(*arg_type == TYPE_PTR);
4788     arg_type++;
4789     assert(*arg_type == TYPE_STRUCT);
4790     target_size = thunk_type_size(arg_type, 0);
4791     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4792     if (!argptr) {
4793         return -TARGET_EFAULT;
4794     }
4795     arg_type++;
4796     assert(*arg_type == (int)STRUCT_rtentry);
4797     se = struct_entries + *arg_type++;
4798     assert(se->convert[0] == NULL);
4799     /* convert struct here to be able to catch rt_dev string */
4800     field_types = se->field_types;
4801     dst_offsets = se->field_offsets[THUNK_HOST];
4802     src_offsets = se->field_offsets[THUNK_TARGET];
4803     for (i = 0; i < se->nb_fields; i++) {
4804         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4805             assert(*field_types == TYPE_PTRVOID);
4806             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4807             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4808             if (*target_rt_dev_ptr != 0) {
4809                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4810                                                   tswapal(*target_rt_dev_ptr));
4811                 if (!*host_rt_dev_ptr) {
4812                     unlock_user(argptr, arg, 0);
4813                     return -TARGET_EFAULT;
4814                 }
4815             } else {
4816                 *host_rt_dev_ptr = 0;
4817             }
4818             field_types++;
4819             continue;
4820         }
4821         field_types = thunk_convert(buf_temp + dst_offsets[i],
4822                                     argptr + src_offsets[i],
4823                                     field_types, THUNK_HOST);
4824     }
4825     unlock_user(argptr, arg, 0);
4826 
4827     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4828 
4829     assert(host_rt_dev_ptr != NULL);
4830     assert(target_rt_dev_ptr != NULL);
4831     if (*host_rt_dev_ptr != 0) {
4832         unlock_user((void *)*host_rt_dev_ptr,
4833                     *target_rt_dev_ptr, 0);
4834     }
4835     return ret;
4836 }
4837 
4838 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4839                                      int fd, int cmd, abi_long arg)
4840 {
4841     int sig = target_to_host_signal(arg);
4842     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4843 }
4844 
4845 #ifdef TIOCGPTPEER
4846 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4847                                      int fd, int cmd, abi_long arg)
4848 {
4849     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4850     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4851 }
4852 #endif
4853 
4854 static IOCTLEntry ioctl_entries[] = {
4855 #define IOCTL(cmd, access, ...) \
4856     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4857 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4858     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4859 #define IOCTL_IGNORE(cmd) \
4860     { TARGET_ ## cmd, 0, #cmd },
4861 #include "ioctls.h"
4862     { 0, 0, },
4863 };
4864 
4865 /* ??? Implement proper locking for ioctls.  */
4866 /* do_ioctl() Must return target values and target errnos. */
4867 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4868 {
4869     const IOCTLEntry *ie;
4870     const argtype *arg_type;
4871     abi_long ret;
4872     uint8_t buf_temp[MAX_STRUCT_SIZE];
4873     int target_size;
4874     void *argptr;
4875 
4876     ie = ioctl_entries;
4877     for(;;) {
4878         if (ie->target_cmd == 0) {
4879             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4880             return -TARGET_ENOSYS;
4881         }
4882         if (ie->target_cmd == cmd)
4883             break;
4884         ie++;
4885     }
4886     arg_type = ie->arg_type;
4887     if (ie->do_ioctl) {
4888         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4889     } else if (!ie->host_cmd) {
4890         /* Some architectures define BSD ioctls in their headers
4891            that are not implemented in Linux.  */
4892         return -TARGET_ENOSYS;
4893     }
4894 
4895     switch(arg_type[0]) {
4896     case TYPE_NULL:
4897         /* no argument */
4898         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4899         break;
4900     case TYPE_PTRVOID:
4901     case TYPE_INT:
4902         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4903         break;
4904     case TYPE_PTR:
4905         arg_type++;
4906         target_size = thunk_type_size(arg_type, 0);
4907         switch(ie->access) {
4908         case IOC_R:
4909             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4910             if (!is_error(ret)) {
4911                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4912                 if (!argptr)
4913                     return -TARGET_EFAULT;
4914                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4915                 unlock_user(argptr, arg, target_size);
4916             }
4917             break;
4918         case IOC_W:
4919             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4920             if (!argptr)
4921                 return -TARGET_EFAULT;
4922             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4923             unlock_user(argptr, arg, 0);
4924             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4925             break;
4926         default:
4927         case IOC_RW:
4928             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4929             if (!argptr)
4930                 return -TARGET_EFAULT;
4931             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4932             unlock_user(argptr, arg, 0);
4933             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4934             if (!is_error(ret)) {
4935                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4936                 if (!argptr)
4937                     return -TARGET_EFAULT;
4938                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4939                 unlock_user(argptr, arg, target_size);
4940             }
4941             break;
4942         }
4943         break;
4944     default:
4945         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4946                  (long)cmd, arg_type[0]);
4947         ret = -TARGET_ENOSYS;
4948         break;
4949     }
4950     return ret;
4951 }
4952 
4953 static const bitmask_transtbl iflag_tbl[] = {
4954         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4955         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4956         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4957         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4958         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4959         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4960         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4961         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4962         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4963         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4964         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4965         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4966         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4967         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4968         { 0, 0, 0, 0 }
4969 };
4970 
4971 static const bitmask_transtbl oflag_tbl[] = {
4972 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4973 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4974 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4975 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4976 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4977 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4978 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4979 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4980 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4981 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4982 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4983 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4984 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4985 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4986 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4987 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4988 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4989 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4990 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4991 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4992 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4993 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4994 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4995 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4996 	{ 0, 0, 0, 0 }
4997 };
4998 
4999 static const bitmask_transtbl cflag_tbl[] = {
5000 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5001 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5002 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5003 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5004 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5005 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5006 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5007 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5008 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5009 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5010 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5011 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5012 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5013 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5014 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5015 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5016 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5017 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5018 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5019 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5020 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5021 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5022 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5023 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5024 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5025 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5026 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5027 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5028 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5029 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5030 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5031 	{ 0, 0, 0, 0 }
5032 };
5033 
5034 static const bitmask_transtbl lflag_tbl[] = {
5035 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5036 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5037 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5038 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5039 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5040 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5041 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5042 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5043 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5044 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5045 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5046 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5047 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5048 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5049 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5050 	{ 0, 0, 0, 0 }
5051 };
5052 
5053 static void target_to_host_termios (void *dst, const void *src)
5054 {
5055     struct host_termios *host = dst;
5056     const struct target_termios *target = src;
5057 
5058     host->c_iflag =
5059         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5060     host->c_oflag =
5061         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5062     host->c_cflag =
5063         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5064     host->c_lflag =
5065         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5066     host->c_line = target->c_line;
5067 
5068     memset(host->c_cc, 0, sizeof(host->c_cc));
5069     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5070     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5071     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5072     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5073     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5074     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5075     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5076     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5077     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5078     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5079     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5080     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5081     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5082     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5083     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5084     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5085     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5086 }
5087 
5088 static void host_to_target_termios (void *dst, const void *src)
5089 {
5090     struct target_termios *target = dst;
5091     const struct host_termios *host = src;
5092 
5093     target->c_iflag =
5094         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5095     target->c_oflag =
5096         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5097     target->c_cflag =
5098         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5099     target->c_lflag =
5100         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5101     target->c_line = host->c_line;
5102 
5103     memset(target->c_cc, 0, sizeof(target->c_cc));
5104     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5105     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5106     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5107     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5108     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5109     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5110     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5111     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5112     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5113     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5114     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5115     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5116     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5117     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5118     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5119     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5120     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5121 }
5122 
5123 static const StructEntry struct_termios_def = {
5124     .convert = { host_to_target_termios, target_to_host_termios },
5125     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5126     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5127 };
5128 
5129 static bitmask_transtbl mmap_flags_tbl[] = {
5130     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5131     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5132     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5133     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5134       MAP_ANONYMOUS, MAP_ANONYMOUS },
5135     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5136       MAP_GROWSDOWN, MAP_GROWSDOWN },
5137     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5138       MAP_DENYWRITE, MAP_DENYWRITE },
5139     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5140       MAP_EXECUTABLE, MAP_EXECUTABLE },
5141     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5142     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5143       MAP_NORESERVE, MAP_NORESERVE },
5144     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5145     /* MAP_STACK had been ignored by the kernel for quite some time.
5146        Recognize it for the target insofar as we do not want to pass
5147        it through to the host.  */
5148     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5149     { 0, 0, 0, 0 }
5150 };
5151 
5152 #if defined(TARGET_I386)
5153 
5154 /* NOTE: there is really one LDT for all the threads */
5155 static uint8_t *ldt_table;
5156 
5157 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5158 {
5159     int size;
5160     void *p;
5161 
5162     if (!ldt_table)
5163         return 0;
5164     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5165     if (size > bytecount)
5166         size = bytecount;
5167     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5168     if (!p)
5169         return -TARGET_EFAULT;
5170     /* ??? Should this by byteswapped?  */
5171     memcpy(p, ldt_table, size);
5172     unlock_user(p, ptr, size);
5173     return size;
5174 }
5175 
5176 /* XXX: add locking support */
5177 static abi_long write_ldt(CPUX86State *env,
5178                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5179 {
5180     struct target_modify_ldt_ldt_s ldt_info;
5181     struct target_modify_ldt_ldt_s *target_ldt_info;
5182     int seg_32bit, contents, read_exec_only, limit_in_pages;
5183     int seg_not_present, useable, lm;
5184     uint32_t *lp, entry_1, entry_2;
5185 
5186     if (bytecount != sizeof(ldt_info))
5187         return -TARGET_EINVAL;
5188     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5189         return -TARGET_EFAULT;
5190     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5191     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5192     ldt_info.limit = tswap32(target_ldt_info->limit);
5193     ldt_info.flags = tswap32(target_ldt_info->flags);
5194     unlock_user_struct(target_ldt_info, ptr, 0);
5195 
5196     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5197         return -TARGET_EINVAL;
5198     seg_32bit = ldt_info.flags & 1;
5199     contents = (ldt_info.flags >> 1) & 3;
5200     read_exec_only = (ldt_info.flags >> 3) & 1;
5201     limit_in_pages = (ldt_info.flags >> 4) & 1;
5202     seg_not_present = (ldt_info.flags >> 5) & 1;
5203     useable = (ldt_info.flags >> 6) & 1;
5204 #ifdef TARGET_ABI32
5205     lm = 0;
5206 #else
5207     lm = (ldt_info.flags >> 7) & 1;
5208 #endif
5209     if (contents == 3) {
5210         if (oldmode)
5211             return -TARGET_EINVAL;
5212         if (seg_not_present == 0)
5213             return -TARGET_EINVAL;
5214     }
5215     /* allocate the LDT */
5216     if (!ldt_table) {
5217         env->ldt.base = target_mmap(0,
5218                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5219                                     PROT_READ|PROT_WRITE,
5220                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5221         if (env->ldt.base == -1)
5222             return -TARGET_ENOMEM;
5223         memset(g2h(env->ldt.base), 0,
5224                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5225         env->ldt.limit = 0xffff;
5226         ldt_table = g2h(env->ldt.base);
5227     }
5228 
5229     /* NOTE: same code as Linux kernel */
5230     /* Allow LDTs to be cleared by the user. */
5231     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5232         if (oldmode ||
5233             (contents == 0		&&
5234              read_exec_only == 1	&&
5235              seg_32bit == 0		&&
5236              limit_in_pages == 0	&&
5237              seg_not_present == 1	&&
5238              useable == 0 )) {
5239             entry_1 = 0;
5240             entry_2 = 0;
5241             goto install;
5242         }
5243     }
5244 
5245     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5246         (ldt_info.limit & 0x0ffff);
5247     entry_2 = (ldt_info.base_addr & 0xff000000) |
5248         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5249         (ldt_info.limit & 0xf0000) |
5250         ((read_exec_only ^ 1) << 9) |
5251         (contents << 10) |
5252         ((seg_not_present ^ 1) << 15) |
5253         (seg_32bit << 22) |
5254         (limit_in_pages << 23) |
5255         (lm << 21) |
5256         0x7000;
5257     if (!oldmode)
5258         entry_2 |= (useable << 20);
5259 
5260     /* Install the new entry ...  */
5261 install:
5262     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5263     lp[0] = tswap32(entry_1);
5264     lp[1] = tswap32(entry_2);
5265     return 0;
5266 }
5267 
5268 /* specific and weird i386 syscalls */
5269 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5270                               unsigned long bytecount)
5271 {
5272     abi_long ret;
5273 
5274     switch (func) {
5275     case 0:
5276         ret = read_ldt(ptr, bytecount);
5277         break;
5278     case 1:
5279         ret = write_ldt(env, ptr, bytecount, 1);
5280         break;
5281     case 0x11:
5282         ret = write_ldt(env, ptr, bytecount, 0);
5283         break;
5284     default:
5285         ret = -TARGET_ENOSYS;
5286         break;
5287     }
5288     return ret;
5289 }
5290 
5291 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5292 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5293 {
5294     uint64_t *gdt_table = g2h(env->gdt.base);
5295     struct target_modify_ldt_ldt_s ldt_info;
5296     struct target_modify_ldt_ldt_s *target_ldt_info;
5297     int seg_32bit, contents, read_exec_only, limit_in_pages;
5298     int seg_not_present, useable, lm;
5299     uint32_t *lp, entry_1, entry_2;
5300     int i;
5301 
5302     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5303     if (!target_ldt_info)
5304         return -TARGET_EFAULT;
5305     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5306     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5307     ldt_info.limit = tswap32(target_ldt_info->limit);
5308     ldt_info.flags = tswap32(target_ldt_info->flags);
5309     if (ldt_info.entry_number == -1) {
5310         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5311             if (gdt_table[i] == 0) {
5312                 ldt_info.entry_number = i;
5313                 target_ldt_info->entry_number = tswap32(i);
5314                 break;
5315             }
5316         }
5317     }
5318     unlock_user_struct(target_ldt_info, ptr, 1);
5319 
5320     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5321         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5322            return -TARGET_EINVAL;
5323     seg_32bit = ldt_info.flags & 1;
5324     contents = (ldt_info.flags >> 1) & 3;
5325     read_exec_only = (ldt_info.flags >> 3) & 1;
5326     limit_in_pages = (ldt_info.flags >> 4) & 1;
5327     seg_not_present = (ldt_info.flags >> 5) & 1;
5328     useable = (ldt_info.flags >> 6) & 1;
5329 #ifdef TARGET_ABI32
5330     lm = 0;
5331 #else
5332     lm = (ldt_info.flags >> 7) & 1;
5333 #endif
5334 
5335     if (contents == 3) {
5336         if (seg_not_present == 0)
5337             return -TARGET_EINVAL;
5338     }
5339 
5340     /* NOTE: same code as Linux kernel */
5341     /* Allow LDTs to be cleared by the user. */
5342     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5343         if ((contents == 0             &&
5344              read_exec_only == 1       &&
5345              seg_32bit == 0            &&
5346              limit_in_pages == 0       &&
5347              seg_not_present == 1      &&
5348              useable == 0 )) {
5349             entry_1 = 0;
5350             entry_2 = 0;
5351             goto install;
5352         }
5353     }
5354 
5355     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5356         (ldt_info.limit & 0x0ffff);
5357     entry_2 = (ldt_info.base_addr & 0xff000000) |
5358         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5359         (ldt_info.limit & 0xf0000) |
5360         ((read_exec_only ^ 1) << 9) |
5361         (contents << 10) |
5362         ((seg_not_present ^ 1) << 15) |
5363         (seg_32bit << 22) |
5364         (limit_in_pages << 23) |
5365         (useable << 20) |
5366         (lm << 21) |
5367         0x7000;
5368 
5369     /* Install the new entry ...  */
5370 install:
5371     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5372     lp[0] = tswap32(entry_1);
5373     lp[1] = tswap32(entry_2);
5374     return 0;
5375 }
5376 
5377 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5378 {
5379     struct target_modify_ldt_ldt_s *target_ldt_info;
5380     uint64_t *gdt_table = g2h(env->gdt.base);
5381     uint32_t base_addr, limit, flags;
5382     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5383     int seg_not_present, useable, lm;
5384     uint32_t *lp, entry_1, entry_2;
5385 
5386     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5387     if (!target_ldt_info)
5388         return -TARGET_EFAULT;
5389     idx = tswap32(target_ldt_info->entry_number);
5390     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5391         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5392         unlock_user_struct(target_ldt_info, ptr, 1);
5393         return -TARGET_EINVAL;
5394     }
5395     lp = (uint32_t *)(gdt_table + idx);
5396     entry_1 = tswap32(lp[0]);
5397     entry_2 = tswap32(lp[1]);
5398 
5399     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5400     contents = (entry_2 >> 10) & 3;
5401     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5402     seg_32bit = (entry_2 >> 22) & 1;
5403     limit_in_pages = (entry_2 >> 23) & 1;
5404     useable = (entry_2 >> 20) & 1;
5405 #ifdef TARGET_ABI32
5406     lm = 0;
5407 #else
5408     lm = (entry_2 >> 21) & 1;
5409 #endif
5410     flags = (seg_32bit << 0) | (contents << 1) |
5411         (read_exec_only << 3) | (limit_in_pages << 4) |
5412         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5413     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5414     base_addr = (entry_1 >> 16) |
5415         (entry_2 & 0xff000000) |
5416         ((entry_2 & 0xff) << 16);
5417     target_ldt_info->base_addr = tswapal(base_addr);
5418     target_ldt_info->limit = tswap32(limit);
5419     target_ldt_info->flags = tswap32(flags);
5420     unlock_user_struct(target_ldt_info, ptr, 1);
5421     return 0;
5422 }
5423 #endif /* TARGET_I386 && TARGET_ABI32 */
5424 
5425 #ifndef TARGET_ABI32
5426 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5427 {
5428     abi_long ret = 0;
5429     abi_ulong val;
5430     int idx;
5431 
5432     switch(code) {
5433     case TARGET_ARCH_SET_GS:
5434     case TARGET_ARCH_SET_FS:
5435         if (code == TARGET_ARCH_SET_GS)
5436             idx = R_GS;
5437         else
5438             idx = R_FS;
5439         cpu_x86_load_seg(env, idx, 0);
5440         env->segs[idx].base = addr;
5441         break;
5442     case TARGET_ARCH_GET_GS:
5443     case TARGET_ARCH_GET_FS:
5444         if (code == TARGET_ARCH_GET_GS)
5445             idx = R_GS;
5446         else
5447             idx = R_FS;
5448         val = env->segs[idx].base;
5449         if (put_user(val, addr, abi_ulong))
5450             ret = -TARGET_EFAULT;
5451         break;
5452     default:
5453         ret = -TARGET_EINVAL;
5454         break;
5455     }
5456     return ret;
5457 }
5458 #endif
5459 
5460 #endif /* defined(TARGET_I386) */
5461 
5462 #define NEW_STACK_SIZE 0x40000
5463 
5464 
5465 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5466 typedef struct {
5467     CPUArchState *env;
5468     pthread_mutex_t mutex;
5469     pthread_cond_t cond;
5470     pthread_t thread;
5471     uint32_t tid;
5472     abi_ulong child_tidptr;
5473     abi_ulong parent_tidptr;
5474     sigset_t sigmask;
5475 } new_thread_info;
5476 
5477 static void *clone_func(void *arg)
5478 {
5479     new_thread_info *info = arg;
5480     CPUArchState *env;
5481     CPUState *cpu;
5482     TaskState *ts;
5483 
5484     rcu_register_thread();
5485     tcg_register_thread();
5486     env = info->env;
5487     cpu = ENV_GET_CPU(env);
5488     thread_cpu = cpu;
5489     ts = (TaskState *)cpu->opaque;
5490     info->tid = sys_gettid();
5491     task_settid(ts);
5492     if (info->child_tidptr)
5493         put_user_u32(info->tid, info->child_tidptr);
5494     if (info->parent_tidptr)
5495         put_user_u32(info->tid, info->parent_tidptr);
5496     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5497     /* Enable signals.  */
5498     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5499     /* Signal to the parent that we're ready.  */
5500     pthread_mutex_lock(&info->mutex);
5501     pthread_cond_broadcast(&info->cond);
5502     pthread_mutex_unlock(&info->mutex);
5503     /* Wait until the parent has finished initializing the tls state.  */
5504     pthread_mutex_lock(&clone_lock);
5505     pthread_mutex_unlock(&clone_lock);
5506     cpu_loop(env);
5507     /* never exits */
5508     return NULL;
5509 }
5510 
5511 /* do_fork() Must return host values and target errnos (unlike most
5512    do_*() functions). */
5513 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5514                    abi_ulong parent_tidptr, target_ulong newtls,
5515                    abi_ulong child_tidptr)
5516 {
5517     CPUState *cpu = ENV_GET_CPU(env);
5518     int ret;
5519     TaskState *ts;
5520     CPUState *new_cpu;
5521     CPUArchState *new_env;
5522     sigset_t sigmask;
5523 
5524     flags &= ~CLONE_IGNORED_FLAGS;
5525 
5526     /* Emulate vfork() with fork() */
5527     if (flags & CLONE_VFORK)
5528         flags &= ~(CLONE_VFORK | CLONE_VM);
5529 
5530     if (flags & CLONE_VM) {
5531         TaskState *parent_ts = (TaskState *)cpu->opaque;
5532         new_thread_info info;
5533         pthread_attr_t attr;
5534 
5535         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5536             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5537             return -TARGET_EINVAL;
5538         }
5539 
5540         ts = g_new0(TaskState, 1);
5541         init_task_state(ts);
5542 
5543         /* Grab a mutex so that thread setup appears atomic.  */
5544         pthread_mutex_lock(&clone_lock);
5545 
5546         /* we create a new CPU instance. */
5547         new_env = cpu_copy(env);
5548         /* Init regs that differ from the parent.  */
5549         cpu_clone_regs(new_env, newsp);
5550         new_cpu = ENV_GET_CPU(new_env);
5551         new_cpu->opaque = ts;
5552         ts->bprm = parent_ts->bprm;
5553         ts->info = parent_ts->info;
5554         ts->signal_mask = parent_ts->signal_mask;
5555 
5556         if (flags & CLONE_CHILD_CLEARTID) {
5557             ts->child_tidptr = child_tidptr;
5558         }
5559 
5560         if (flags & CLONE_SETTLS) {
5561             cpu_set_tls (new_env, newtls);
5562         }
5563 
5564         memset(&info, 0, sizeof(info));
5565         pthread_mutex_init(&info.mutex, NULL);
5566         pthread_mutex_lock(&info.mutex);
5567         pthread_cond_init(&info.cond, NULL);
5568         info.env = new_env;
5569         if (flags & CLONE_CHILD_SETTID) {
5570             info.child_tidptr = child_tidptr;
5571         }
5572         if (flags & CLONE_PARENT_SETTID) {
5573             info.parent_tidptr = parent_tidptr;
5574         }
5575 
5576         ret = pthread_attr_init(&attr);
5577         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5578         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5579         /* It is not safe to deliver signals until the child has finished
5580            initializing, so temporarily block all signals.  */
5581         sigfillset(&sigmask);
5582         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5583         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5584 
5585         /* If this is our first additional thread, we need to ensure we
5586          * generate code for parallel execution and flush old translations.
5587          */
5588         if (!parallel_cpus) {
5589             parallel_cpus = true;
5590             tb_flush(cpu);
5591         }
5592 
5593         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5594         /* TODO: Free new CPU state if thread creation failed.  */
5595 
5596         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5597         pthread_attr_destroy(&attr);
5598         if (ret == 0) {
5599             /* Wait for the child to initialize.  */
5600             pthread_cond_wait(&info.cond, &info.mutex);
5601             ret = info.tid;
5602         } else {
5603             ret = -1;
5604         }
5605         pthread_mutex_unlock(&info.mutex);
5606         pthread_cond_destroy(&info.cond);
5607         pthread_mutex_destroy(&info.mutex);
5608         pthread_mutex_unlock(&clone_lock);
5609     } else {
5610         /* if no CLONE_VM, we consider it is a fork */
5611         if (flags & CLONE_INVALID_FORK_FLAGS) {
5612             return -TARGET_EINVAL;
5613         }
5614 
5615         /* We can't support custom termination signals */
5616         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5617             return -TARGET_EINVAL;
5618         }
5619 
5620         if (block_signals()) {
5621             return -TARGET_ERESTARTSYS;
5622         }
5623 
5624         fork_start();
5625         ret = fork();
5626         if (ret == 0) {
5627             /* Child Process.  */
5628             cpu_clone_regs(env, newsp);
5629             fork_end(1);
5630             /* There is a race condition here.  The parent process could
5631                theoretically read the TID in the child process before the child
5632                tid is set.  This would require using either ptrace
5633                (not implemented) or having *_tidptr to point at a shared memory
5634                mapping.  We can't repeat the spinlock hack used above because
5635                the child process gets its own copy of the lock.  */
5636             if (flags & CLONE_CHILD_SETTID)
5637                 put_user_u32(sys_gettid(), child_tidptr);
5638             if (flags & CLONE_PARENT_SETTID)
5639                 put_user_u32(sys_gettid(), parent_tidptr);
5640             ts = (TaskState *)cpu->opaque;
5641             if (flags & CLONE_SETTLS)
5642                 cpu_set_tls (env, newtls);
5643             if (flags & CLONE_CHILD_CLEARTID)
5644                 ts->child_tidptr = child_tidptr;
5645         } else {
5646             fork_end(0);
5647         }
5648     }
5649     return ret;
5650 }
5651 
5652 /* warning : doesn't handle linux specific flags... */
5653 static int target_to_host_fcntl_cmd(int cmd)
5654 {
5655     int ret;
5656 
5657     switch(cmd) {
5658     case TARGET_F_DUPFD:
5659     case TARGET_F_GETFD:
5660     case TARGET_F_SETFD:
5661     case TARGET_F_GETFL:
5662     case TARGET_F_SETFL:
5663         ret = cmd;
5664         break;
5665     case TARGET_F_GETLK:
5666         ret = F_GETLK64;
5667         break;
5668     case TARGET_F_SETLK:
5669         ret = F_SETLK64;
5670         break;
5671     case TARGET_F_SETLKW:
5672         ret = F_SETLKW64;
5673         break;
5674     case TARGET_F_GETOWN:
5675         ret = F_GETOWN;
5676         break;
5677     case TARGET_F_SETOWN:
5678         ret = F_SETOWN;
5679         break;
5680     case TARGET_F_GETSIG:
5681         ret = F_GETSIG;
5682         break;
5683     case TARGET_F_SETSIG:
5684         ret = F_SETSIG;
5685         break;
5686 #if TARGET_ABI_BITS == 32
5687     case TARGET_F_GETLK64:
5688         ret = F_GETLK64;
5689         break;
5690     case TARGET_F_SETLK64:
5691         ret = F_SETLK64;
5692         break;
5693     case TARGET_F_SETLKW64:
5694         ret = F_SETLKW64;
5695         break;
5696 #endif
5697     case TARGET_F_SETLEASE:
5698         ret = F_SETLEASE;
5699         break;
5700     case TARGET_F_GETLEASE:
5701         ret = F_GETLEASE;
5702         break;
5703 #ifdef F_DUPFD_CLOEXEC
5704     case TARGET_F_DUPFD_CLOEXEC:
5705         ret = F_DUPFD_CLOEXEC;
5706         break;
5707 #endif
5708     case TARGET_F_NOTIFY:
5709         ret = F_NOTIFY;
5710         break;
5711 #ifdef F_GETOWN_EX
5712     case TARGET_F_GETOWN_EX:
5713         ret = F_GETOWN_EX;
5714         break;
5715 #endif
5716 #ifdef F_SETOWN_EX
5717     case TARGET_F_SETOWN_EX:
5718         ret = F_SETOWN_EX;
5719         break;
5720 #endif
5721 #ifdef F_SETPIPE_SZ
5722     case TARGET_F_SETPIPE_SZ:
5723         ret = F_SETPIPE_SZ;
5724         break;
5725     case TARGET_F_GETPIPE_SZ:
5726         ret = F_GETPIPE_SZ;
5727         break;
5728 #endif
5729     default:
5730         ret = -TARGET_EINVAL;
5731         break;
5732     }
5733 
5734 #if defined(__powerpc64__)
5735     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5736      * is not supported by kernel. The glibc fcntl call actually adjusts
5737      * them to 5, 6 and 7 before making the syscall(). Since we make the
5738      * syscall directly, adjust to what is supported by the kernel.
5739      */
5740     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5741         ret -= F_GETLK64 - 5;
5742     }
5743 #endif
5744 
5745     return ret;
5746 }
5747 
5748 #define FLOCK_TRANSTBL \
5749     switch (type) { \
5750     TRANSTBL_CONVERT(F_RDLCK); \
5751     TRANSTBL_CONVERT(F_WRLCK); \
5752     TRANSTBL_CONVERT(F_UNLCK); \
5753     TRANSTBL_CONVERT(F_EXLCK); \
5754     TRANSTBL_CONVERT(F_SHLCK); \
5755     }
5756 
5757 static int target_to_host_flock(int type)
5758 {
5759 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5760     FLOCK_TRANSTBL
5761 #undef  TRANSTBL_CONVERT
5762     return -TARGET_EINVAL;
5763 }
5764 
5765 static int host_to_target_flock(int type)
5766 {
5767 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5768     FLOCK_TRANSTBL
5769 #undef  TRANSTBL_CONVERT
5770     /* if we don't know how to convert the value coming
5771      * from the host we copy to the target field as-is
5772      */
5773     return type;
5774 }
5775 
5776 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5777                                             abi_ulong target_flock_addr)
5778 {
5779     struct target_flock *target_fl;
5780     int l_type;
5781 
5782     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5783         return -TARGET_EFAULT;
5784     }
5785 
5786     __get_user(l_type, &target_fl->l_type);
5787     l_type = target_to_host_flock(l_type);
5788     if (l_type < 0) {
5789         return l_type;
5790     }
5791     fl->l_type = l_type;
5792     __get_user(fl->l_whence, &target_fl->l_whence);
5793     __get_user(fl->l_start, &target_fl->l_start);
5794     __get_user(fl->l_len, &target_fl->l_len);
5795     __get_user(fl->l_pid, &target_fl->l_pid);
5796     unlock_user_struct(target_fl, target_flock_addr, 0);
5797     return 0;
5798 }
5799 
5800 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5801                                           const struct flock64 *fl)
5802 {
5803     struct target_flock *target_fl;
5804     short l_type;
5805 
5806     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5807         return -TARGET_EFAULT;
5808     }
5809 
5810     l_type = host_to_target_flock(fl->l_type);
5811     __put_user(l_type, &target_fl->l_type);
5812     __put_user(fl->l_whence, &target_fl->l_whence);
5813     __put_user(fl->l_start, &target_fl->l_start);
5814     __put_user(fl->l_len, &target_fl->l_len);
5815     __put_user(fl->l_pid, &target_fl->l_pid);
5816     unlock_user_struct(target_fl, target_flock_addr, 1);
5817     return 0;
5818 }
5819 
5820 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5821 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5822 
5823 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5824 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5825                                                    abi_ulong target_flock_addr)
5826 {
5827     struct target_oabi_flock64 *target_fl;
5828     int l_type;
5829 
5830     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5831         return -TARGET_EFAULT;
5832     }
5833 
5834     __get_user(l_type, &target_fl->l_type);
5835     l_type = target_to_host_flock(l_type);
5836     if (l_type < 0) {
5837         return l_type;
5838     }
5839     fl->l_type = l_type;
5840     __get_user(fl->l_whence, &target_fl->l_whence);
5841     __get_user(fl->l_start, &target_fl->l_start);
5842     __get_user(fl->l_len, &target_fl->l_len);
5843     __get_user(fl->l_pid, &target_fl->l_pid);
5844     unlock_user_struct(target_fl, target_flock_addr, 0);
5845     return 0;
5846 }
5847 
5848 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5849                                                  const struct flock64 *fl)
5850 {
5851     struct target_oabi_flock64 *target_fl;
5852     short l_type;
5853 
5854     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5855         return -TARGET_EFAULT;
5856     }
5857 
5858     l_type = host_to_target_flock(fl->l_type);
5859     __put_user(l_type, &target_fl->l_type);
5860     __put_user(fl->l_whence, &target_fl->l_whence);
5861     __put_user(fl->l_start, &target_fl->l_start);
5862     __put_user(fl->l_len, &target_fl->l_len);
5863     __put_user(fl->l_pid, &target_fl->l_pid);
5864     unlock_user_struct(target_fl, target_flock_addr, 1);
5865     return 0;
5866 }
5867 #endif
5868 
5869 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5870                                               abi_ulong target_flock_addr)
5871 {
5872     struct target_flock64 *target_fl;
5873     int l_type;
5874 
5875     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5876         return -TARGET_EFAULT;
5877     }
5878 
5879     __get_user(l_type, &target_fl->l_type);
5880     l_type = target_to_host_flock(l_type);
5881     if (l_type < 0) {
5882         return l_type;
5883     }
5884     fl->l_type = l_type;
5885     __get_user(fl->l_whence, &target_fl->l_whence);
5886     __get_user(fl->l_start, &target_fl->l_start);
5887     __get_user(fl->l_len, &target_fl->l_len);
5888     __get_user(fl->l_pid, &target_fl->l_pid);
5889     unlock_user_struct(target_fl, target_flock_addr, 0);
5890     return 0;
5891 }
5892 
5893 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5894                                             const struct flock64 *fl)
5895 {
5896     struct target_flock64 *target_fl;
5897     short l_type;
5898 
5899     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5900         return -TARGET_EFAULT;
5901     }
5902 
5903     l_type = host_to_target_flock(fl->l_type);
5904     __put_user(l_type, &target_fl->l_type);
5905     __put_user(fl->l_whence, &target_fl->l_whence);
5906     __put_user(fl->l_start, &target_fl->l_start);
5907     __put_user(fl->l_len, &target_fl->l_len);
5908     __put_user(fl->l_pid, &target_fl->l_pid);
5909     unlock_user_struct(target_fl, target_flock_addr, 1);
5910     return 0;
5911 }
5912 
5913 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5914 {
5915     struct flock64 fl64;
5916 #ifdef F_GETOWN_EX
5917     struct f_owner_ex fox;
5918     struct target_f_owner_ex *target_fox;
5919 #endif
5920     abi_long ret;
5921     int host_cmd = target_to_host_fcntl_cmd(cmd);
5922 
5923     if (host_cmd == -TARGET_EINVAL)
5924 	    return host_cmd;
5925 
5926     switch(cmd) {
5927     case TARGET_F_GETLK:
5928         ret = copy_from_user_flock(&fl64, arg);
5929         if (ret) {
5930             return ret;
5931         }
5932         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5933         if (ret == 0) {
5934             ret = copy_to_user_flock(arg, &fl64);
5935         }
5936         break;
5937 
5938     case TARGET_F_SETLK:
5939     case TARGET_F_SETLKW:
5940         ret = copy_from_user_flock(&fl64, arg);
5941         if (ret) {
5942             return ret;
5943         }
5944         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5945         break;
5946 
5947     case TARGET_F_GETLK64:
5948         ret = copy_from_user_flock64(&fl64, arg);
5949         if (ret) {
5950             return ret;
5951         }
5952         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5953         if (ret == 0) {
5954             ret = copy_to_user_flock64(arg, &fl64);
5955         }
5956         break;
5957     case TARGET_F_SETLK64:
5958     case TARGET_F_SETLKW64:
5959         ret = copy_from_user_flock64(&fl64, arg);
5960         if (ret) {
5961             return ret;
5962         }
5963         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5964         break;
5965 
5966     case TARGET_F_GETFL:
5967         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5968         if (ret >= 0) {
5969             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5970         }
5971         break;
5972 
5973     case TARGET_F_SETFL:
5974         ret = get_errno(safe_fcntl(fd, host_cmd,
5975                                    target_to_host_bitmask(arg,
5976                                                           fcntl_flags_tbl)));
5977         break;
5978 
5979 #ifdef F_GETOWN_EX
5980     case TARGET_F_GETOWN_EX:
5981         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5982         if (ret >= 0) {
5983             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5984                 return -TARGET_EFAULT;
5985             target_fox->type = tswap32(fox.type);
5986             target_fox->pid = tswap32(fox.pid);
5987             unlock_user_struct(target_fox, arg, 1);
5988         }
5989         break;
5990 #endif
5991 
5992 #ifdef F_SETOWN_EX
5993     case TARGET_F_SETOWN_EX:
5994         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5995             return -TARGET_EFAULT;
5996         fox.type = tswap32(target_fox->type);
5997         fox.pid = tswap32(target_fox->pid);
5998         unlock_user_struct(target_fox, arg, 0);
5999         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6000         break;
6001 #endif
6002 
6003     case TARGET_F_SETOWN:
6004     case TARGET_F_GETOWN:
6005     case TARGET_F_SETSIG:
6006     case TARGET_F_GETSIG:
6007     case TARGET_F_SETLEASE:
6008     case TARGET_F_GETLEASE:
6009     case TARGET_F_SETPIPE_SZ:
6010     case TARGET_F_GETPIPE_SZ:
6011         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6012         break;
6013 
6014     default:
6015         ret = get_errno(safe_fcntl(fd, cmd, arg));
6016         break;
6017     }
6018     return ret;
6019 }
6020 
6021 #ifdef USE_UID16
6022 
6023 static inline int high2lowuid(int uid)
6024 {
6025     if (uid > 65535)
6026         return 65534;
6027     else
6028         return uid;
6029 }
6030 
6031 static inline int high2lowgid(int gid)
6032 {
6033     if (gid > 65535)
6034         return 65534;
6035     else
6036         return gid;
6037 }
6038 
6039 static inline int low2highuid(int uid)
6040 {
6041     if ((int16_t)uid == -1)
6042         return -1;
6043     else
6044         return uid;
6045 }
6046 
6047 static inline int low2highgid(int gid)
6048 {
6049     if ((int16_t)gid == -1)
6050         return -1;
6051     else
6052         return gid;
6053 }
6054 static inline int tswapid(int id)
6055 {
6056     return tswap16(id);
6057 }
6058 
6059 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6060 
6061 #else /* !USE_UID16 */
6062 static inline int high2lowuid(int uid)
6063 {
6064     return uid;
6065 }
6066 static inline int high2lowgid(int gid)
6067 {
6068     return gid;
6069 }
6070 static inline int low2highuid(int uid)
6071 {
6072     return uid;
6073 }
6074 static inline int low2highgid(int gid)
6075 {
6076     return gid;
6077 }
6078 static inline int tswapid(int id)
6079 {
6080     return tswap32(id);
6081 }
6082 
6083 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6084 
6085 #endif /* USE_UID16 */
6086 
6087 /* We must do direct syscalls for setting UID/GID, because we want to
6088  * implement the Linux system call semantics of "change only for this thread",
6089  * not the libc/POSIX semantics of "change for all threads in process".
6090  * (See http://ewontfix.com/17/ for more details.)
6091  * We use the 32-bit version of the syscalls if present; if it is not
6092  * then either the host architecture supports 32-bit UIDs natively with
6093  * the standard syscall, or the 16-bit UID is the best we can do.
6094  */
6095 #ifdef __NR_setuid32
6096 #define __NR_sys_setuid __NR_setuid32
6097 #else
6098 #define __NR_sys_setuid __NR_setuid
6099 #endif
6100 #ifdef __NR_setgid32
6101 #define __NR_sys_setgid __NR_setgid32
6102 #else
6103 #define __NR_sys_setgid __NR_setgid
6104 #endif
6105 #ifdef __NR_setresuid32
6106 #define __NR_sys_setresuid __NR_setresuid32
6107 #else
6108 #define __NR_sys_setresuid __NR_setresuid
6109 #endif
6110 #ifdef __NR_setresgid32
6111 #define __NR_sys_setresgid __NR_setresgid32
6112 #else
6113 #define __NR_sys_setresgid __NR_setresgid
6114 #endif
6115 
6116 _syscall1(int, sys_setuid, uid_t, uid)
6117 _syscall1(int, sys_setgid, gid_t, gid)
6118 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6119 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6120 
6121 void syscall_init(void)
6122 {
6123     IOCTLEntry *ie;
6124     const argtype *arg_type;
6125     int size;
6126     int i;
6127 
6128     thunk_init(STRUCT_MAX);
6129 
6130 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6131 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6132 #include "syscall_types.h"
6133 #undef STRUCT
6134 #undef STRUCT_SPECIAL
6135 
6136     /* Build target_to_host_errno_table[] table from
6137      * host_to_target_errno_table[]. */
6138     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6139         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6140     }
6141 
6142     /* we patch the ioctl size if necessary. We rely on the fact that
6143        no ioctl has all the bits at '1' in the size field */
6144     ie = ioctl_entries;
6145     while (ie->target_cmd != 0) {
6146         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6147             TARGET_IOC_SIZEMASK) {
6148             arg_type = ie->arg_type;
6149             if (arg_type[0] != TYPE_PTR) {
6150                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6151                         ie->target_cmd);
6152                 exit(1);
6153             }
6154             arg_type++;
6155             size = thunk_type_size(arg_type, 0);
6156             ie->target_cmd = (ie->target_cmd &
6157                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6158                 (size << TARGET_IOC_SIZESHIFT);
6159         }
6160 
6161         /* automatic consistency check if same arch */
6162 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6163     (defined(__x86_64__) && defined(TARGET_X86_64))
6164         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6165             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6166                     ie->name, ie->target_cmd, ie->host_cmd);
6167         }
6168 #endif
6169         ie++;
6170     }
6171 }
6172 
6173 #if TARGET_ABI_BITS == 32
6174 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6175 {
6176 #ifdef TARGET_WORDS_BIGENDIAN
6177     return ((uint64_t)word0 << 32) | word1;
6178 #else
6179     return ((uint64_t)word1 << 32) | word0;
6180 #endif
6181 }
6182 #else /* TARGET_ABI_BITS == 32 */
6183 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6184 {
6185     return word0;
6186 }
6187 #endif /* TARGET_ABI_BITS != 32 */
6188 
6189 #ifdef TARGET_NR_truncate64
6190 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6191                                          abi_long arg2,
6192                                          abi_long arg3,
6193                                          abi_long arg4)
6194 {
6195     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6196         arg2 = arg3;
6197         arg3 = arg4;
6198     }
6199     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6200 }
6201 #endif
6202 
6203 #ifdef TARGET_NR_ftruncate64
6204 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6205                                           abi_long arg2,
6206                                           abi_long arg3,
6207                                           abi_long arg4)
6208 {
6209     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6210         arg2 = arg3;
6211         arg3 = arg4;
6212     }
6213     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6214 }
6215 #endif
6216 
6217 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6218                                                abi_ulong target_addr)
6219 {
6220     struct target_timespec *target_ts;
6221 
6222     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6223         return -TARGET_EFAULT;
6224     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6225     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6226     unlock_user_struct(target_ts, target_addr, 0);
6227     return 0;
6228 }
6229 
6230 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6231                                                struct timespec *host_ts)
6232 {
6233     struct target_timespec *target_ts;
6234 
6235     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6236         return -TARGET_EFAULT;
6237     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6238     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6239     unlock_user_struct(target_ts, target_addr, 1);
6240     return 0;
6241 }
6242 
6243 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6244                                                  abi_ulong target_addr)
6245 {
6246     struct target_itimerspec *target_itspec;
6247 
6248     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6249         return -TARGET_EFAULT;
6250     }
6251 
6252     host_itspec->it_interval.tv_sec =
6253                             tswapal(target_itspec->it_interval.tv_sec);
6254     host_itspec->it_interval.tv_nsec =
6255                             tswapal(target_itspec->it_interval.tv_nsec);
6256     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6257     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6258 
6259     unlock_user_struct(target_itspec, target_addr, 1);
6260     return 0;
6261 }
6262 
6263 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6264                                                struct itimerspec *host_its)
6265 {
6266     struct target_itimerspec *target_itspec;
6267 
6268     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6269         return -TARGET_EFAULT;
6270     }
6271 
6272     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6273     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6274 
6275     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6276     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6277 
6278     unlock_user_struct(target_itspec, target_addr, 0);
6279     return 0;
6280 }
6281 
6282 static inline abi_long target_to_host_timex(struct timex *host_tx,
6283                                             abi_long target_addr)
6284 {
6285     struct target_timex *target_tx;
6286 
6287     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6288         return -TARGET_EFAULT;
6289     }
6290 
6291     __get_user(host_tx->modes, &target_tx->modes);
6292     __get_user(host_tx->offset, &target_tx->offset);
6293     __get_user(host_tx->freq, &target_tx->freq);
6294     __get_user(host_tx->maxerror, &target_tx->maxerror);
6295     __get_user(host_tx->esterror, &target_tx->esterror);
6296     __get_user(host_tx->status, &target_tx->status);
6297     __get_user(host_tx->constant, &target_tx->constant);
6298     __get_user(host_tx->precision, &target_tx->precision);
6299     __get_user(host_tx->tolerance, &target_tx->tolerance);
6300     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6301     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6302     __get_user(host_tx->tick, &target_tx->tick);
6303     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6304     __get_user(host_tx->jitter, &target_tx->jitter);
6305     __get_user(host_tx->shift, &target_tx->shift);
6306     __get_user(host_tx->stabil, &target_tx->stabil);
6307     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6308     __get_user(host_tx->calcnt, &target_tx->calcnt);
6309     __get_user(host_tx->errcnt, &target_tx->errcnt);
6310     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6311     __get_user(host_tx->tai, &target_tx->tai);
6312 
6313     unlock_user_struct(target_tx, target_addr, 0);
6314     return 0;
6315 }
6316 
6317 static inline abi_long host_to_target_timex(abi_long target_addr,
6318                                             struct timex *host_tx)
6319 {
6320     struct target_timex *target_tx;
6321 
6322     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6323         return -TARGET_EFAULT;
6324     }
6325 
6326     __put_user(host_tx->modes, &target_tx->modes);
6327     __put_user(host_tx->offset, &target_tx->offset);
6328     __put_user(host_tx->freq, &target_tx->freq);
6329     __put_user(host_tx->maxerror, &target_tx->maxerror);
6330     __put_user(host_tx->esterror, &target_tx->esterror);
6331     __put_user(host_tx->status, &target_tx->status);
6332     __put_user(host_tx->constant, &target_tx->constant);
6333     __put_user(host_tx->precision, &target_tx->precision);
6334     __put_user(host_tx->tolerance, &target_tx->tolerance);
6335     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6336     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6337     __put_user(host_tx->tick, &target_tx->tick);
6338     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6339     __put_user(host_tx->jitter, &target_tx->jitter);
6340     __put_user(host_tx->shift, &target_tx->shift);
6341     __put_user(host_tx->stabil, &target_tx->stabil);
6342     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6343     __put_user(host_tx->calcnt, &target_tx->calcnt);
6344     __put_user(host_tx->errcnt, &target_tx->errcnt);
6345     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6346     __put_user(host_tx->tai, &target_tx->tai);
6347 
6348     unlock_user_struct(target_tx, target_addr, 1);
6349     return 0;
6350 }
6351 
6352 
6353 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6354                                                abi_ulong target_addr)
6355 {
6356     struct target_sigevent *target_sevp;
6357 
6358     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6359         return -TARGET_EFAULT;
6360     }
6361 
6362     /* This union is awkward on 64 bit systems because it has a 32 bit
6363      * integer and a pointer in it; we follow the conversion approach
6364      * used for handling sigval types in signal.c so the guest should get
6365      * the correct value back even if we did a 64 bit byteswap and it's
6366      * using the 32 bit integer.
6367      */
6368     host_sevp->sigev_value.sival_ptr =
6369         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6370     host_sevp->sigev_signo =
6371         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6372     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6373     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6374 
6375     unlock_user_struct(target_sevp, target_addr, 1);
6376     return 0;
6377 }
6378 
6379 #if defined(TARGET_NR_mlockall)
6380 static inline int target_to_host_mlockall_arg(int arg)
6381 {
6382     int result = 0;
6383 
6384     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6385         result |= MCL_CURRENT;
6386     }
6387     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6388         result |= MCL_FUTURE;
6389     }
6390     return result;
6391 }
6392 #endif
6393 
6394 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6395      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6396      defined(TARGET_NR_newfstatat))
6397 static inline abi_long host_to_target_stat64(void *cpu_env,
6398                                              abi_ulong target_addr,
6399                                              struct stat *host_st)
6400 {
6401 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6402     if (((CPUARMState *)cpu_env)->eabi) {
6403         struct target_eabi_stat64 *target_st;
6404 
6405         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6406             return -TARGET_EFAULT;
6407         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6408         __put_user(host_st->st_dev, &target_st->st_dev);
6409         __put_user(host_st->st_ino, &target_st->st_ino);
6410 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6411         __put_user(host_st->st_ino, &target_st->__st_ino);
6412 #endif
6413         __put_user(host_st->st_mode, &target_st->st_mode);
6414         __put_user(host_st->st_nlink, &target_st->st_nlink);
6415         __put_user(host_st->st_uid, &target_st->st_uid);
6416         __put_user(host_st->st_gid, &target_st->st_gid);
6417         __put_user(host_st->st_rdev, &target_st->st_rdev);
6418         __put_user(host_st->st_size, &target_st->st_size);
6419         __put_user(host_st->st_blksize, &target_st->st_blksize);
6420         __put_user(host_st->st_blocks, &target_st->st_blocks);
6421         __put_user(host_st->st_atime, &target_st->target_st_atime);
6422         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6423         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6424 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6425         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6426         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6427         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6428 #endif
6429         unlock_user_struct(target_st, target_addr, 1);
6430     } else
6431 #endif
6432     {
6433 #if defined(TARGET_HAS_STRUCT_STAT64)
6434         struct target_stat64 *target_st;
6435 #else
6436         struct target_stat *target_st;
6437 #endif
6438 
6439         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6440             return -TARGET_EFAULT;
6441         memset(target_st, 0, sizeof(*target_st));
6442         __put_user(host_st->st_dev, &target_st->st_dev);
6443         __put_user(host_st->st_ino, &target_st->st_ino);
6444 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6445         __put_user(host_st->st_ino, &target_st->__st_ino);
6446 #endif
6447         __put_user(host_st->st_mode, &target_st->st_mode);
6448         __put_user(host_st->st_nlink, &target_st->st_nlink);
6449         __put_user(host_st->st_uid, &target_st->st_uid);
6450         __put_user(host_st->st_gid, &target_st->st_gid);
6451         __put_user(host_st->st_rdev, &target_st->st_rdev);
6452         /* XXX: better use of kernel struct */
6453         __put_user(host_st->st_size, &target_st->st_size);
6454         __put_user(host_st->st_blksize, &target_st->st_blksize);
6455         __put_user(host_st->st_blocks, &target_st->st_blocks);
6456         __put_user(host_st->st_atime, &target_st->target_st_atime);
6457         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6458         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6459 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6460         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6461         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6462         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6463 #endif
6464         unlock_user_struct(target_st, target_addr, 1);
6465     }
6466 
6467     return 0;
6468 }
6469 #endif
6470 
6471 /* ??? Using host futex calls even when target atomic operations
6472    are not really atomic probably breaks things.  However implementing
6473    futexes locally would make futexes shared between multiple processes
6474    tricky.  However they're probably useless because guest atomic
6475    operations won't work either.  */
6476 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6477                     target_ulong uaddr2, int val3)
6478 {
6479     struct timespec ts, *pts;
6480     int base_op;
6481 
6482     /* ??? We assume FUTEX_* constants are the same on both host
6483        and target.  */
6484 #ifdef FUTEX_CMD_MASK
6485     base_op = op & FUTEX_CMD_MASK;
6486 #else
6487     base_op = op;
6488 #endif
6489     switch (base_op) {
6490     case FUTEX_WAIT:
6491     case FUTEX_WAIT_BITSET:
6492         if (timeout) {
6493             pts = &ts;
6494             target_to_host_timespec(pts, timeout);
6495         } else {
6496             pts = NULL;
6497         }
6498         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6499                          pts, NULL, val3));
6500     case FUTEX_WAKE:
6501         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6502     case FUTEX_FD:
6503         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6504     case FUTEX_REQUEUE:
6505     case FUTEX_CMP_REQUEUE:
6506     case FUTEX_WAKE_OP:
6507         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6508            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6509            But the prototype takes a `struct timespec *'; insert casts
6510            to satisfy the compiler.  We do not need to tswap TIMEOUT
6511            since it's not compared to guest memory.  */
6512         pts = (struct timespec *)(uintptr_t) timeout;
6513         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6514                                     g2h(uaddr2),
6515                                     (base_op == FUTEX_CMP_REQUEUE
6516                                      ? tswap32(val3)
6517                                      : val3)));
6518     default:
6519         return -TARGET_ENOSYS;
6520     }
6521 }
6522 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6523 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6524                                      abi_long handle, abi_long mount_id,
6525                                      abi_long flags)
6526 {
6527     struct file_handle *target_fh;
6528     struct file_handle *fh;
6529     int mid = 0;
6530     abi_long ret;
6531     char *name;
6532     unsigned int size, total_size;
6533 
6534     if (get_user_s32(size, handle)) {
6535         return -TARGET_EFAULT;
6536     }
6537 
6538     name = lock_user_string(pathname);
6539     if (!name) {
6540         return -TARGET_EFAULT;
6541     }
6542 
6543     total_size = sizeof(struct file_handle) + size;
6544     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6545     if (!target_fh) {
6546         unlock_user(name, pathname, 0);
6547         return -TARGET_EFAULT;
6548     }
6549 
6550     fh = g_malloc0(total_size);
6551     fh->handle_bytes = size;
6552 
6553     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6554     unlock_user(name, pathname, 0);
6555 
6556     /* man name_to_handle_at(2):
6557      * Other than the use of the handle_bytes field, the caller should treat
6558      * the file_handle structure as an opaque data type
6559      */
6560 
6561     memcpy(target_fh, fh, total_size);
6562     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6563     target_fh->handle_type = tswap32(fh->handle_type);
6564     g_free(fh);
6565     unlock_user(target_fh, handle, total_size);
6566 
6567     if (put_user_s32(mid, mount_id)) {
6568         return -TARGET_EFAULT;
6569     }
6570 
6571     return ret;
6572 
6573 }
6574 #endif
6575 
6576 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6577 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6578                                      abi_long flags)
6579 {
6580     struct file_handle *target_fh;
6581     struct file_handle *fh;
6582     unsigned int size, total_size;
6583     abi_long ret;
6584 
6585     if (get_user_s32(size, handle)) {
6586         return -TARGET_EFAULT;
6587     }
6588 
6589     total_size = sizeof(struct file_handle) + size;
6590     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6591     if (!target_fh) {
6592         return -TARGET_EFAULT;
6593     }
6594 
6595     fh = g_memdup(target_fh, total_size);
6596     fh->handle_bytes = size;
6597     fh->handle_type = tswap32(target_fh->handle_type);
6598 
6599     ret = get_errno(open_by_handle_at(mount_fd, fh,
6600                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6601 
6602     g_free(fh);
6603 
6604     unlock_user(target_fh, handle, total_size);
6605 
6606     return ret;
6607 }
6608 #endif
6609 
6610 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6611 
6612 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6613 {
6614     int host_flags;
6615     target_sigset_t *target_mask;
6616     sigset_t host_mask;
6617     abi_long ret;
6618 
6619     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6620         return -TARGET_EINVAL;
6621     }
6622     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6623         return -TARGET_EFAULT;
6624     }
6625 
6626     target_to_host_sigset(&host_mask, target_mask);
6627 
6628     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6629 
6630     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6631     if (ret >= 0) {
6632         fd_trans_register(ret, &target_signalfd_trans);
6633     }
6634 
6635     unlock_user_struct(target_mask, mask, 0);
6636 
6637     return ret;
6638 }
6639 #endif
6640 
6641 /* Map host to target signal numbers for the wait family of syscalls.
6642    Assume all other status bits are the same.  */
6643 int host_to_target_waitstatus(int status)
6644 {
6645     if (WIFSIGNALED(status)) {
6646         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6647     }
6648     if (WIFSTOPPED(status)) {
6649         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6650                | (status & 0xff);
6651     }
6652     return status;
6653 }
6654 
6655 static int open_self_cmdline(void *cpu_env, int fd)
6656 {
6657     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6658     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6659     int i;
6660 
6661     for (i = 0; i < bprm->argc; i++) {
6662         size_t len = strlen(bprm->argv[i]) + 1;
6663 
6664         if (write(fd, bprm->argv[i], len) != len) {
6665             return -1;
6666         }
6667     }
6668 
6669     return 0;
6670 }
6671 
6672 static int open_self_maps(void *cpu_env, int fd)
6673 {
6674     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6675     TaskState *ts = cpu->opaque;
6676     FILE *fp;
6677     char *line = NULL;
6678     size_t len = 0;
6679     ssize_t read;
6680 
6681     fp = fopen("/proc/self/maps", "r");
6682     if (fp == NULL) {
6683         return -1;
6684     }
6685 
6686     while ((read = getline(&line, &len, fp)) != -1) {
6687         int fields, dev_maj, dev_min, inode;
6688         uint64_t min, max, offset;
6689         char flag_r, flag_w, flag_x, flag_p;
6690         char path[512] = "";
6691         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6692                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6693                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6694 
6695         if ((fields < 10) || (fields > 11)) {
6696             continue;
6697         }
6698         if (h2g_valid(min)) {
6699             int flags = page_get_flags(h2g(min));
6700             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6701             if (page_check_range(h2g(min), max - min, flags) == -1) {
6702                 continue;
6703             }
6704             if (h2g(min) == ts->info->stack_limit) {
6705                 pstrcpy(path, sizeof(path), "      [stack]");
6706             }
6707             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6708                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6709                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6710                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6711                     path[0] ? "         " : "", path);
6712         }
6713     }
6714 
6715     free(line);
6716     fclose(fp);
6717 
6718     return 0;
6719 }
6720 
6721 static int open_self_stat(void *cpu_env, int fd)
6722 {
6723     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6724     TaskState *ts = cpu->opaque;
6725     abi_ulong start_stack = ts->info->start_stack;
6726     int i;
6727 
6728     for (i = 0; i < 44; i++) {
6729       char buf[128];
6730       int len;
6731       uint64_t val = 0;
6732 
6733       if (i == 0) {
6734         /* pid */
6735         val = getpid();
6736         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6737       } else if (i == 1) {
6738         /* app name */
6739         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6740       } else if (i == 27) {
6741         /* stack bottom */
6742         val = start_stack;
6743         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6744       } else {
6745         /* for the rest, there is MasterCard */
6746         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6747       }
6748 
6749       len = strlen(buf);
6750       if (write(fd, buf, len) != len) {
6751           return -1;
6752       }
6753     }
6754 
6755     return 0;
6756 }
6757 
6758 static int open_self_auxv(void *cpu_env, int fd)
6759 {
6760     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6761     TaskState *ts = cpu->opaque;
6762     abi_ulong auxv = ts->info->saved_auxv;
6763     abi_ulong len = ts->info->auxv_len;
6764     char *ptr;
6765 
6766     /*
6767      * Auxiliary vector is stored in target process stack.
6768      * read in whole auxv vector and copy it to file
6769      */
6770     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6771     if (ptr != NULL) {
6772         while (len > 0) {
6773             ssize_t r;
6774             r = write(fd, ptr, len);
6775             if (r <= 0) {
6776                 break;
6777             }
6778             len -= r;
6779             ptr += r;
6780         }
6781         lseek(fd, 0, SEEK_SET);
6782         unlock_user(ptr, auxv, len);
6783     }
6784 
6785     return 0;
6786 }
6787 
6788 static int is_proc_myself(const char *filename, const char *entry)
6789 {
6790     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6791         filename += strlen("/proc/");
6792         if (!strncmp(filename, "self/", strlen("self/"))) {
6793             filename += strlen("self/");
6794         } else if (*filename >= '1' && *filename <= '9') {
6795             char myself[80];
6796             snprintf(myself, sizeof(myself), "%d/", getpid());
6797             if (!strncmp(filename, myself, strlen(myself))) {
6798                 filename += strlen(myself);
6799             } else {
6800                 return 0;
6801             }
6802         } else {
6803             return 0;
6804         }
6805         if (!strcmp(filename, entry)) {
6806             return 1;
6807         }
6808     }
6809     return 0;
6810 }
6811 
6812 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6813     defined(TARGET_SPARC) || defined(TARGET_M68K)
6814 static int is_proc(const char *filename, const char *entry)
6815 {
6816     return strcmp(filename, entry) == 0;
6817 }
6818 #endif
6819 
6820 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6821 static int open_net_route(void *cpu_env, int fd)
6822 {
6823     FILE *fp;
6824     char *line = NULL;
6825     size_t len = 0;
6826     ssize_t read;
6827 
6828     fp = fopen("/proc/net/route", "r");
6829     if (fp == NULL) {
6830         return -1;
6831     }
6832 
6833     /* read header */
6834 
6835     read = getline(&line, &len, fp);
6836     dprintf(fd, "%s", line);
6837 
6838     /* read routes */
6839 
6840     while ((read = getline(&line, &len, fp)) != -1) {
6841         char iface[16];
6842         uint32_t dest, gw, mask;
6843         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6844         int fields;
6845 
6846         fields = sscanf(line,
6847                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6848                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6849                         &mask, &mtu, &window, &irtt);
6850         if (fields != 11) {
6851             continue;
6852         }
6853         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6854                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6855                 metric, tswap32(mask), mtu, window, irtt);
6856     }
6857 
6858     free(line);
6859     fclose(fp);
6860 
6861     return 0;
6862 }
6863 #endif
6864 
6865 #if defined(TARGET_SPARC)
6866 static int open_cpuinfo(void *cpu_env, int fd)
6867 {
6868     dprintf(fd, "type\t\t: sun4u\n");
6869     return 0;
6870 }
6871 #endif
6872 
6873 #if defined(TARGET_M68K)
6874 static int open_hardware(void *cpu_env, int fd)
6875 {
6876     dprintf(fd, "Model:\t\tqemu-m68k\n");
6877     return 0;
6878 }
6879 #endif
6880 
6881 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6882 {
6883     struct fake_open {
6884         const char *filename;
6885         int (*fill)(void *cpu_env, int fd);
6886         int (*cmp)(const char *s1, const char *s2);
6887     };
6888     const struct fake_open *fake_open;
6889     static const struct fake_open fakes[] = {
6890         { "maps", open_self_maps, is_proc_myself },
6891         { "stat", open_self_stat, is_proc_myself },
6892         { "auxv", open_self_auxv, is_proc_myself },
6893         { "cmdline", open_self_cmdline, is_proc_myself },
6894 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6895         { "/proc/net/route", open_net_route, is_proc },
6896 #endif
6897 #if defined(TARGET_SPARC)
6898         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6899 #endif
6900 #if defined(TARGET_M68K)
6901         { "/proc/hardware", open_hardware, is_proc },
6902 #endif
6903         { NULL, NULL, NULL }
6904     };
6905 
6906     if (is_proc_myself(pathname, "exe")) {
6907         int execfd = qemu_getauxval(AT_EXECFD);
6908         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6909     }
6910 
6911     for (fake_open = fakes; fake_open->filename; fake_open++) {
6912         if (fake_open->cmp(pathname, fake_open->filename)) {
6913             break;
6914         }
6915     }
6916 
6917     if (fake_open->filename) {
6918         const char *tmpdir;
6919         char filename[PATH_MAX];
6920         int fd, r;
6921 
6922         /* create temporary file to map stat to */
6923         tmpdir = getenv("TMPDIR");
6924         if (!tmpdir)
6925             tmpdir = "/tmp";
6926         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6927         fd = mkstemp(filename);
6928         if (fd < 0) {
6929             return fd;
6930         }
6931         unlink(filename);
6932 
6933         if ((r = fake_open->fill(cpu_env, fd))) {
6934             int e = errno;
6935             close(fd);
6936             errno = e;
6937             return r;
6938         }
6939         lseek(fd, 0, SEEK_SET);
6940 
6941         return fd;
6942     }
6943 
6944     return safe_openat(dirfd, path(pathname), flags, mode);
6945 }
6946 
6947 #define TIMER_MAGIC 0x0caf0000
6948 #define TIMER_MAGIC_MASK 0xffff0000
6949 
6950 /* Convert QEMU provided timer ID back to internal 16bit index format */
6951 static target_timer_t get_timer_id(abi_long arg)
6952 {
6953     target_timer_t timerid = arg;
6954 
6955     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6956         return -TARGET_EINVAL;
6957     }
6958 
6959     timerid &= 0xffff;
6960 
6961     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6962         return -TARGET_EINVAL;
6963     }
6964 
6965     return timerid;
6966 }
6967 
6968 static int target_to_host_cpu_mask(unsigned long *host_mask,
6969                                    size_t host_size,
6970                                    abi_ulong target_addr,
6971                                    size_t target_size)
6972 {
6973     unsigned target_bits = sizeof(abi_ulong) * 8;
6974     unsigned host_bits = sizeof(*host_mask) * 8;
6975     abi_ulong *target_mask;
6976     unsigned i, j;
6977 
6978     assert(host_size >= target_size);
6979 
6980     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6981     if (!target_mask) {
6982         return -TARGET_EFAULT;
6983     }
6984     memset(host_mask, 0, host_size);
6985 
6986     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6987         unsigned bit = i * target_bits;
6988         abi_ulong val;
6989 
6990         __get_user(val, &target_mask[i]);
6991         for (j = 0; j < target_bits; j++, bit++) {
6992             if (val & (1UL << j)) {
6993                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6994             }
6995         }
6996     }
6997 
6998     unlock_user(target_mask, target_addr, 0);
6999     return 0;
7000 }
7001 
7002 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7003                                    size_t host_size,
7004                                    abi_ulong target_addr,
7005                                    size_t target_size)
7006 {
7007     unsigned target_bits = sizeof(abi_ulong) * 8;
7008     unsigned host_bits = sizeof(*host_mask) * 8;
7009     abi_ulong *target_mask;
7010     unsigned i, j;
7011 
7012     assert(host_size >= target_size);
7013 
7014     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7015     if (!target_mask) {
7016         return -TARGET_EFAULT;
7017     }
7018 
7019     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7020         unsigned bit = i * target_bits;
7021         abi_ulong val = 0;
7022 
7023         for (j = 0; j < target_bits; j++, bit++) {
7024             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7025                 val |= 1UL << j;
7026             }
7027         }
7028         __put_user(val, &target_mask[i]);
7029     }
7030 
7031     unlock_user(target_mask, target_addr, target_size);
7032     return 0;
7033 }
7034 
7035 /* This is an internal helper for do_syscall so that it is easier
7036  * to have a single return point, so that actions, such as logging
7037  * of syscall results, can be performed.
7038  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7039  */
7040 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7041                             abi_long arg2, abi_long arg3, abi_long arg4,
7042                             abi_long arg5, abi_long arg6, abi_long arg7,
7043                             abi_long arg8)
7044 {
7045     CPUState *cpu = ENV_GET_CPU(cpu_env);
7046     abi_long ret;
7047 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7048     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7049     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7050     struct stat st;
7051 #endif
7052 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7053     || defined(TARGET_NR_fstatfs)
7054     struct statfs stfs;
7055 #endif
7056     void *p;
7057 
7058     switch(num) {
7059     case TARGET_NR_exit:
7060         /* In old applications this may be used to implement _exit(2).
7061            However in threaded applictions it is used for thread termination,
7062            and _exit_group is used for application termination.
7063            Do thread termination if we have more then one thread.  */
7064 
7065         if (block_signals()) {
7066             return -TARGET_ERESTARTSYS;
7067         }
7068 
7069         cpu_list_lock();
7070 
7071         if (CPU_NEXT(first_cpu)) {
7072             TaskState *ts;
7073 
7074             /* Remove the CPU from the list.  */
7075             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7076 
7077             cpu_list_unlock();
7078 
7079             ts = cpu->opaque;
7080             if (ts->child_tidptr) {
7081                 put_user_u32(0, ts->child_tidptr);
7082                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7083                           NULL, NULL, 0);
7084             }
7085             thread_cpu = NULL;
7086             object_unref(OBJECT(cpu));
7087             g_free(ts);
7088             rcu_unregister_thread();
7089             pthread_exit(NULL);
7090         }
7091 
7092         cpu_list_unlock();
7093         preexit_cleanup(cpu_env, arg1);
7094         _exit(arg1);
7095         return 0; /* avoid warning */
7096     case TARGET_NR_read:
7097         if (arg2 == 0 && arg3 == 0) {
7098             return get_errno(safe_read(arg1, 0, 0));
7099         } else {
7100             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7101                 return -TARGET_EFAULT;
7102             ret = get_errno(safe_read(arg1, p, arg3));
7103             if (ret >= 0 &&
7104                 fd_trans_host_to_target_data(arg1)) {
7105                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7106             }
7107             unlock_user(p, arg2, ret);
7108         }
7109         return ret;
7110     case TARGET_NR_write:
7111         if (arg2 == 0 && arg3 == 0) {
7112             return get_errno(safe_write(arg1, 0, 0));
7113         }
7114         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7115             return -TARGET_EFAULT;
7116         if (fd_trans_target_to_host_data(arg1)) {
7117             void *copy = g_malloc(arg3);
7118             memcpy(copy, p, arg3);
7119             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7120             if (ret >= 0) {
7121                 ret = get_errno(safe_write(arg1, copy, ret));
7122             }
7123             g_free(copy);
7124         } else {
7125             ret = get_errno(safe_write(arg1, p, arg3));
7126         }
7127         unlock_user(p, arg2, 0);
7128         return ret;
7129 
7130 #ifdef TARGET_NR_open
7131     case TARGET_NR_open:
7132         if (!(p = lock_user_string(arg1)))
7133             return -TARGET_EFAULT;
7134         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7135                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7136                                   arg3));
7137         fd_trans_unregister(ret);
7138         unlock_user(p, arg1, 0);
7139         return ret;
7140 #endif
7141     case TARGET_NR_openat:
7142         if (!(p = lock_user_string(arg2)))
7143             return -TARGET_EFAULT;
7144         ret = get_errno(do_openat(cpu_env, arg1, p,
7145                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7146                                   arg4));
7147         fd_trans_unregister(ret);
7148         unlock_user(p, arg2, 0);
7149         return ret;
7150 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7151     case TARGET_NR_name_to_handle_at:
7152         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7153         return ret;
7154 #endif
7155 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7156     case TARGET_NR_open_by_handle_at:
7157         ret = do_open_by_handle_at(arg1, arg2, arg3);
7158         fd_trans_unregister(ret);
7159         return ret;
7160 #endif
7161     case TARGET_NR_close:
7162         fd_trans_unregister(arg1);
7163         return get_errno(close(arg1));
7164 
7165     case TARGET_NR_brk:
7166         return do_brk(arg1);
7167 #ifdef TARGET_NR_fork
7168     case TARGET_NR_fork:
7169         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7170 #endif
7171 #ifdef TARGET_NR_waitpid
7172     case TARGET_NR_waitpid:
7173         {
7174             int status;
7175             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7176             if (!is_error(ret) && arg2 && ret
7177                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7178                 return -TARGET_EFAULT;
7179         }
7180         return ret;
7181 #endif
7182 #ifdef TARGET_NR_waitid
7183     case TARGET_NR_waitid:
7184         {
7185             siginfo_t info;
7186             info.si_pid = 0;
7187             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7188             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7189                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7190                     return -TARGET_EFAULT;
7191                 host_to_target_siginfo(p, &info);
7192                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7193             }
7194         }
7195         return ret;
7196 #endif
7197 #ifdef TARGET_NR_creat /* not on alpha */
7198     case TARGET_NR_creat:
7199         if (!(p = lock_user_string(arg1)))
7200             return -TARGET_EFAULT;
7201         ret = get_errno(creat(p, arg2));
7202         fd_trans_unregister(ret);
7203         unlock_user(p, arg1, 0);
7204         return ret;
7205 #endif
7206 #ifdef TARGET_NR_link
7207     case TARGET_NR_link:
7208         {
7209             void * p2;
7210             p = lock_user_string(arg1);
7211             p2 = lock_user_string(arg2);
7212             if (!p || !p2)
7213                 ret = -TARGET_EFAULT;
7214             else
7215                 ret = get_errno(link(p, p2));
7216             unlock_user(p2, arg2, 0);
7217             unlock_user(p, arg1, 0);
7218         }
7219         return ret;
7220 #endif
7221 #if defined(TARGET_NR_linkat)
7222     case TARGET_NR_linkat:
7223         {
7224             void * p2 = NULL;
7225             if (!arg2 || !arg4)
7226                 return -TARGET_EFAULT;
7227             p  = lock_user_string(arg2);
7228             p2 = lock_user_string(arg4);
7229             if (!p || !p2)
7230                 ret = -TARGET_EFAULT;
7231             else
7232                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7233             unlock_user(p, arg2, 0);
7234             unlock_user(p2, arg4, 0);
7235         }
7236         return ret;
7237 #endif
7238 #ifdef TARGET_NR_unlink
7239     case TARGET_NR_unlink:
7240         if (!(p = lock_user_string(arg1)))
7241             return -TARGET_EFAULT;
7242         ret = get_errno(unlink(p));
7243         unlock_user(p, arg1, 0);
7244         return ret;
7245 #endif
7246 #if defined(TARGET_NR_unlinkat)
7247     case TARGET_NR_unlinkat:
7248         if (!(p = lock_user_string(arg2)))
7249             return -TARGET_EFAULT;
7250         ret = get_errno(unlinkat(arg1, p, arg3));
7251         unlock_user(p, arg2, 0);
7252         return ret;
7253 #endif
7254     case TARGET_NR_execve:
7255         {
7256             char **argp, **envp;
7257             int argc, envc;
7258             abi_ulong gp;
7259             abi_ulong guest_argp;
7260             abi_ulong guest_envp;
7261             abi_ulong addr;
7262             char **q;
7263             int total_size = 0;
7264 
7265             argc = 0;
7266             guest_argp = arg2;
7267             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7268                 if (get_user_ual(addr, gp))
7269                     return -TARGET_EFAULT;
7270                 if (!addr)
7271                     break;
7272                 argc++;
7273             }
7274             envc = 0;
7275             guest_envp = arg3;
7276             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7277                 if (get_user_ual(addr, gp))
7278                     return -TARGET_EFAULT;
7279                 if (!addr)
7280                     break;
7281                 envc++;
7282             }
7283 
7284             argp = g_new0(char *, argc + 1);
7285             envp = g_new0(char *, envc + 1);
7286 
7287             for (gp = guest_argp, q = argp; gp;
7288                   gp += sizeof(abi_ulong), q++) {
7289                 if (get_user_ual(addr, gp))
7290                     goto execve_efault;
7291                 if (!addr)
7292                     break;
7293                 if (!(*q = lock_user_string(addr)))
7294                     goto execve_efault;
7295                 total_size += strlen(*q) + 1;
7296             }
7297             *q = NULL;
7298 
7299             for (gp = guest_envp, q = envp; gp;
7300                   gp += sizeof(abi_ulong), q++) {
7301                 if (get_user_ual(addr, gp))
7302                     goto execve_efault;
7303                 if (!addr)
7304                     break;
7305                 if (!(*q = lock_user_string(addr)))
7306                     goto execve_efault;
7307                 total_size += strlen(*q) + 1;
7308             }
7309             *q = NULL;
7310 
7311             if (!(p = lock_user_string(arg1)))
7312                 goto execve_efault;
7313             /* Although execve() is not an interruptible syscall it is
7314              * a special case where we must use the safe_syscall wrapper:
7315              * if we allow a signal to happen before we make the host
7316              * syscall then we will 'lose' it, because at the point of
7317              * execve the process leaves QEMU's control. So we use the
7318              * safe syscall wrapper to ensure that we either take the
7319              * signal as a guest signal, or else it does not happen
7320              * before the execve completes and makes it the other
7321              * program's problem.
7322              */
7323             ret = get_errno(safe_execve(p, argp, envp));
7324             unlock_user(p, arg1, 0);
7325 
7326             goto execve_end;
7327 
7328         execve_efault:
7329             ret = -TARGET_EFAULT;
7330 
7331         execve_end:
7332             for (gp = guest_argp, q = argp; *q;
7333                   gp += sizeof(abi_ulong), q++) {
7334                 if (get_user_ual(addr, gp)
7335                     || !addr)
7336                     break;
7337                 unlock_user(*q, addr, 0);
7338             }
7339             for (gp = guest_envp, q = envp; *q;
7340                   gp += sizeof(abi_ulong), q++) {
7341                 if (get_user_ual(addr, gp)
7342                     || !addr)
7343                     break;
7344                 unlock_user(*q, addr, 0);
7345             }
7346 
7347             g_free(argp);
7348             g_free(envp);
7349         }
7350         return ret;
7351     case TARGET_NR_chdir:
7352         if (!(p = lock_user_string(arg1)))
7353             return -TARGET_EFAULT;
7354         ret = get_errno(chdir(p));
7355         unlock_user(p, arg1, 0);
7356         return ret;
7357 #ifdef TARGET_NR_time
7358     case TARGET_NR_time:
7359         {
7360             time_t host_time;
7361             ret = get_errno(time(&host_time));
7362             if (!is_error(ret)
7363                 && arg1
7364                 && put_user_sal(host_time, arg1))
7365                 return -TARGET_EFAULT;
7366         }
7367         return ret;
7368 #endif
7369 #ifdef TARGET_NR_mknod
7370     case TARGET_NR_mknod:
7371         if (!(p = lock_user_string(arg1)))
7372             return -TARGET_EFAULT;
7373         ret = get_errno(mknod(p, arg2, arg3));
7374         unlock_user(p, arg1, 0);
7375         return ret;
7376 #endif
7377 #if defined(TARGET_NR_mknodat)
7378     case TARGET_NR_mknodat:
7379         if (!(p = lock_user_string(arg2)))
7380             return -TARGET_EFAULT;
7381         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7382         unlock_user(p, arg2, 0);
7383         return ret;
7384 #endif
7385 #ifdef TARGET_NR_chmod
7386     case TARGET_NR_chmod:
7387         if (!(p = lock_user_string(arg1)))
7388             return -TARGET_EFAULT;
7389         ret = get_errno(chmod(p, arg2));
7390         unlock_user(p, arg1, 0);
7391         return ret;
7392 #endif
7393 #ifdef TARGET_NR_lseek
7394     case TARGET_NR_lseek:
7395         return get_errno(lseek(arg1, arg2, arg3));
7396 #endif
7397 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7398     /* Alpha specific */
7399     case TARGET_NR_getxpid:
7400         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7401         return get_errno(getpid());
7402 #endif
7403 #ifdef TARGET_NR_getpid
7404     case TARGET_NR_getpid:
7405         return get_errno(getpid());
7406 #endif
7407     case TARGET_NR_mount:
7408         {
7409             /* need to look at the data field */
7410             void *p2, *p3;
7411 
7412             if (arg1) {
7413                 p = lock_user_string(arg1);
7414                 if (!p) {
7415                     return -TARGET_EFAULT;
7416                 }
7417             } else {
7418                 p = NULL;
7419             }
7420 
7421             p2 = lock_user_string(arg2);
7422             if (!p2) {
7423                 if (arg1) {
7424                     unlock_user(p, arg1, 0);
7425                 }
7426                 return -TARGET_EFAULT;
7427             }
7428 
7429             if (arg3) {
7430                 p3 = lock_user_string(arg3);
7431                 if (!p3) {
7432                     if (arg1) {
7433                         unlock_user(p, arg1, 0);
7434                     }
7435                     unlock_user(p2, arg2, 0);
7436                     return -TARGET_EFAULT;
7437                 }
7438             } else {
7439                 p3 = NULL;
7440             }
7441 
7442             /* FIXME - arg5 should be locked, but it isn't clear how to
7443              * do that since it's not guaranteed to be a NULL-terminated
7444              * string.
7445              */
7446             if (!arg5) {
7447                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7448             } else {
7449                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7450             }
7451             ret = get_errno(ret);
7452 
7453             if (arg1) {
7454                 unlock_user(p, arg1, 0);
7455             }
7456             unlock_user(p2, arg2, 0);
7457             if (arg3) {
7458                 unlock_user(p3, arg3, 0);
7459             }
7460         }
7461         return ret;
7462 #ifdef TARGET_NR_umount
7463     case TARGET_NR_umount:
7464         if (!(p = lock_user_string(arg1)))
7465             return -TARGET_EFAULT;
7466         ret = get_errno(umount(p));
7467         unlock_user(p, arg1, 0);
7468         return ret;
7469 #endif
7470 #ifdef TARGET_NR_stime /* not on alpha */
7471     case TARGET_NR_stime:
7472         {
7473             time_t host_time;
7474             if (get_user_sal(host_time, arg1))
7475                 return -TARGET_EFAULT;
7476             return get_errno(stime(&host_time));
7477         }
7478 #endif
7479 #ifdef TARGET_NR_alarm /* not on alpha */
7480     case TARGET_NR_alarm:
7481         return alarm(arg1);
7482 #endif
7483 #ifdef TARGET_NR_pause /* not on alpha */
7484     case TARGET_NR_pause:
7485         if (!block_signals()) {
7486             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7487         }
7488         return -TARGET_EINTR;
7489 #endif
7490 #ifdef TARGET_NR_utime
7491     case TARGET_NR_utime:
7492         {
7493             struct utimbuf tbuf, *host_tbuf;
7494             struct target_utimbuf *target_tbuf;
7495             if (arg2) {
7496                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7497                     return -TARGET_EFAULT;
7498                 tbuf.actime = tswapal(target_tbuf->actime);
7499                 tbuf.modtime = tswapal(target_tbuf->modtime);
7500                 unlock_user_struct(target_tbuf, arg2, 0);
7501                 host_tbuf = &tbuf;
7502             } else {
7503                 host_tbuf = NULL;
7504             }
7505             if (!(p = lock_user_string(arg1)))
7506                 return -TARGET_EFAULT;
7507             ret = get_errno(utime(p, host_tbuf));
7508             unlock_user(p, arg1, 0);
7509         }
7510         return ret;
7511 #endif
7512 #ifdef TARGET_NR_utimes
7513     case TARGET_NR_utimes:
7514         {
7515             struct timeval *tvp, tv[2];
7516             if (arg2) {
7517                 if (copy_from_user_timeval(&tv[0], arg2)
7518                     || copy_from_user_timeval(&tv[1],
7519                                               arg2 + sizeof(struct target_timeval)))
7520                     return -TARGET_EFAULT;
7521                 tvp = tv;
7522             } else {
7523                 tvp = NULL;
7524             }
7525             if (!(p = lock_user_string(arg1)))
7526                 return -TARGET_EFAULT;
7527             ret = get_errno(utimes(p, tvp));
7528             unlock_user(p, arg1, 0);
7529         }
7530         return ret;
7531 #endif
7532 #if defined(TARGET_NR_futimesat)
7533     case TARGET_NR_futimesat:
7534         {
7535             struct timeval *tvp, tv[2];
7536             if (arg3) {
7537                 if (copy_from_user_timeval(&tv[0], arg3)
7538                     || copy_from_user_timeval(&tv[1],
7539                                               arg3 + sizeof(struct target_timeval)))
7540                     return -TARGET_EFAULT;
7541                 tvp = tv;
7542             } else {
7543                 tvp = NULL;
7544             }
7545             if (!(p = lock_user_string(arg2))) {
7546                 return -TARGET_EFAULT;
7547             }
7548             ret = get_errno(futimesat(arg1, path(p), tvp));
7549             unlock_user(p, arg2, 0);
7550         }
7551         return ret;
7552 #endif
7553 #ifdef TARGET_NR_access
7554     case TARGET_NR_access:
7555         if (!(p = lock_user_string(arg1))) {
7556             return -TARGET_EFAULT;
7557         }
7558         ret = get_errno(access(path(p), arg2));
7559         unlock_user(p, arg1, 0);
7560         return ret;
7561 #endif
7562 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7563     case TARGET_NR_faccessat:
7564         if (!(p = lock_user_string(arg2))) {
7565             return -TARGET_EFAULT;
7566         }
7567         ret = get_errno(faccessat(arg1, p, arg3, 0));
7568         unlock_user(p, arg2, 0);
7569         return ret;
7570 #endif
7571 #ifdef TARGET_NR_nice /* not on alpha */
7572     case TARGET_NR_nice:
7573         return get_errno(nice(arg1));
7574 #endif
7575     case TARGET_NR_sync:
7576         sync();
7577         return 0;
7578 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7579     case TARGET_NR_syncfs:
7580         return get_errno(syncfs(arg1));
7581 #endif
7582     case TARGET_NR_kill:
7583         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7584 #ifdef TARGET_NR_rename
7585     case TARGET_NR_rename:
7586         {
7587             void *p2;
7588             p = lock_user_string(arg1);
7589             p2 = lock_user_string(arg2);
7590             if (!p || !p2)
7591                 ret = -TARGET_EFAULT;
7592             else
7593                 ret = get_errno(rename(p, p2));
7594             unlock_user(p2, arg2, 0);
7595             unlock_user(p, arg1, 0);
7596         }
7597         return ret;
7598 #endif
7599 #if defined(TARGET_NR_renameat)
7600     case TARGET_NR_renameat:
7601         {
7602             void *p2;
7603             p  = lock_user_string(arg2);
7604             p2 = lock_user_string(arg4);
7605             if (!p || !p2)
7606                 ret = -TARGET_EFAULT;
7607             else
7608                 ret = get_errno(renameat(arg1, p, arg3, p2));
7609             unlock_user(p2, arg4, 0);
7610             unlock_user(p, arg2, 0);
7611         }
7612         return ret;
7613 #endif
7614 #if defined(TARGET_NR_renameat2)
7615     case TARGET_NR_renameat2:
7616         {
7617             void *p2;
7618             p  = lock_user_string(arg2);
7619             p2 = lock_user_string(arg4);
7620             if (!p || !p2) {
7621                 ret = -TARGET_EFAULT;
7622             } else {
7623                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7624             }
7625             unlock_user(p2, arg4, 0);
7626             unlock_user(p, arg2, 0);
7627         }
7628         return ret;
7629 #endif
7630 #ifdef TARGET_NR_mkdir
7631     case TARGET_NR_mkdir:
7632         if (!(p = lock_user_string(arg1)))
7633             return -TARGET_EFAULT;
7634         ret = get_errno(mkdir(p, arg2));
7635         unlock_user(p, arg1, 0);
7636         return ret;
7637 #endif
7638 #if defined(TARGET_NR_mkdirat)
7639     case TARGET_NR_mkdirat:
7640         if (!(p = lock_user_string(arg2)))
7641             return -TARGET_EFAULT;
7642         ret = get_errno(mkdirat(arg1, p, arg3));
7643         unlock_user(p, arg2, 0);
7644         return ret;
7645 #endif
7646 #ifdef TARGET_NR_rmdir
7647     case TARGET_NR_rmdir:
7648         if (!(p = lock_user_string(arg1)))
7649             return -TARGET_EFAULT;
7650         ret = get_errno(rmdir(p));
7651         unlock_user(p, arg1, 0);
7652         return ret;
7653 #endif
7654     case TARGET_NR_dup:
7655         ret = get_errno(dup(arg1));
7656         if (ret >= 0) {
7657             fd_trans_dup(arg1, ret);
7658         }
7659         return ret;
7660 #ifdef TARGET_NR_pipe
7661     case TARGET_NR_pipe:
7662         return do_pipe(cpu_env, arg1, 0, 0);
7663 #endif
7664 #ifdef TARGET_NR_pipe2
7665     case TARGET_NR_pipe2:
7666         return do_pipe(cpu_env, arg1,
7667                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7668 #endif
7669     case TARGET_NR_times:
7670         {
7671             struct target_tms *tmsp;
7672             struct tms tms;
7673             ret = get_errno(times(&tms));
7674             if (arg1) {
7675                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7676                 if (!tmsp)
7677                     return -TARGET_EFAULT;
7678                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7679                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7680                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7681                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7682             }
7683             if (!is_error(ret))
7684                 ret = host_to_target_clock_t(ret);
7685         }
7686         return ret;
7687     case TARGET_NR_acct:
7688         if (arg1 == 0) {
7689             ret = get_errno(acct(NULL));
7690         } else {
7691             if (!(p = lock_user_string(arg1))) {
7692                 return -TARGET_EFAULT;
7693             }
7694             ret = get_errno(acct(path(p)));
7695             unlock_user(p, arg1, 0);
7696         }
7697         return ret;
7698 #ifdef TARGET_NR_umount2
7699     case TARGET_NR_umount2:
7700         if (!(p = lock_user_string(arg1)))
7701             return -TARGET_EFAULT;
7702         ret = get_errno(umount2(p, arg2));
7703         unlock_user(p, arg1, 0);
7704         return ret;
7705 #endif
7706     case TARGET_NR_ioctl:
7707         return do_ioctl(arg1, arg2, arg3);
7708 #ifdef TARGET_NR_fcntl
7709     case TARGET_NR_fcntl:
7710         return do_fcntl(arg1, arg2, arg3);
7711 #endif
7712     case TARGET_NR_setpgid:
7713         return get_errno(setpgid(arg1, arg2));
7714     case TARGET_NR_umask:
7715         return get_errno(umask(arg1));
7716     case TARGET_NR_chroot:
7717         if (!(p = lock_user_string(arg1)))
7718             return -TARGET_EFAULT;
7719         ret = get_errno(chroot(p));
7720         unlock_user(p, arg1, 0);
7721         return ret;
7722 #ifdef TARGET_NR_dup2
7723     case TARGET_NR_dup2:
7724         ret = get_errno(dup2(arg1, arg2));
7725         if (ret >= 0) {
7726             fd_trans_dup(arg1, arg2);
7727         }
7728         return ret;
7729 #endif
7730 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7731     case TARGET_NR_dup3:
7732     {
7733         int host_flags;
7734 
7735         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7736             return -EINVAL;
7737         }
7738         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7739         ret = get_errno(dup3(arg1, arg2, host_flags));
7740         if (ret >= 0) {
7741             fd_trans_dup(arg1, arg2);
7742         }
7743         return ret;
7744     }
7745 #endif
7746 #ifdef TARGET_NR_getppid /* not on alpha */
7747     case TARGET_NR_getppid:
7748         return get_errno(getppid());
7749 #endif
7750 #ifdef TARGET_NR_getpgrp
7751     case TARGET_NR_getpgrp:
7752         return get_errno(getpgrp());
7753 #endif
7754     case TARGET_NR_setsid:
7755         return get_errno(setsid());
7756 #ifdef TARGET_NR_sigaction
7757     case TARGET_NR_sigaction:
7758         {
7759 #if defined(TARGET_ALPHA)
7760             struct target_sigaction act, oact, *pact = 0;
7761             struct target_old_sigaction *old_act;
7762             if (arg2) {
7763                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7764                     return -TARGET_EFAULT;
7765                 act._sa_handler = old_act->_sa_handler;
7766                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7767                 act.sa_flags = old_act->sa_flags;
7768                 act.sa_restorer = 0;
7769                 unlock_user_struct(old_act, arg2, 0);
7770                 pact = &act;
7771             }
7772             ret = get_errno(do_sigaction(arg1, pact, &oact));
7773             if (!is_error(ret) && arg3) {
7774                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7775                     return -TARGET_EFAULT;
7776                 old_act->_sa_handler = oact._sa_handler;
7777                 old_act->sa_mask = oact.sa_mask.sig[0];
7778                 old_act->sa_flags = oact.sa_flags;
7779                 unlock_user_struct(old_act, arg3, 1);
7780             }
7781 #elif defined(TARGET_MIPS)
7782 	    struct target_sigaction act, oact, *pact, *old_act;
7783 
7784 	    if (arg2) {
7785                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7786                     return -TARGET_EFAULT;
7787 		act._sa_handler = old_act->_sa_handler;
7788 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7789 		act.sa_flags = old_act->sa_flags;
7790 		unlock_user_struct(old_act, arg2, 0);
7791 		pact = &act;
7792 	    } else {
7793 		pact = NULL;
7794 	    }
7795 
7796 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7797 
7798 	    if (!is_error(ret) && arg3) {
7799                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7800                     return -TARGET_EFAULT;
7801 		old_act->_sa_handler = oact._sa_handler;
7802 		old_act->sa_flags = oact.sa_flags;
7803 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7804 		old_act->sa_mask.sig[1] = 0;
7805 		old_act->sa_mask.sig[2] = 0;
7806 		old_act->sa_mask.sig[3] = 0;
7807 		unlock_user_struct(old_act, arg3, 1);
7808 	    }
7809 #else
7810             struct target_old_sigaction *old_act;
7811             struct target_sigaction act, oact, *pact;
7812             if (arg2) {
7813                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7814                     return -TARGET_EFAULT;
7815                 act._sa_handler = old_act->_sa_handler;
7816                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7817                 act.sa_flags = old_act->sa_flags;
7818                 act.sa_restorer = old_act->sa_restorer;
7819 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7820                 act.ka_restorer = 0;
7821 #endif
7822                 unlock_user_struct(old_act, arg2, 0);
7823                 pact = &act;
7824             } else {
7825                 pact = NULL;
7826             }
7827             ret = get_errno(do_sigaction(arg1, pact, &oact));
7828             if (!is_error(ret) && arg3) {
7829                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7830                     return -TARGET_EFAULT;
7831                 old_act->_sa_handler = oact._sa_handler;
7832                 old_act->sa_mask = oact.sa_mask.sig[0];
7833                 old_act->sa_flags = oact.sa_flags;
7834                 old_act->sa_restorer = oact.sa_restorer;
7835                 unlock_user_struct(old_act, arg3, 1);
7836             }
7837 #endif
7838         }
7839         return ret;
7840 #endif
7841     case TARGET_NR_rt_sigaction:
7842         {
7843 #if defined(TARGET_ALPHA)
7844             /* For Alpha and SPARC this is a 5 argument syscall, with
7845              * a 'restorer' parameter which must be copied into the
7846              * sa_restorer field of the sigaction struct.
7847              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7848              * and arg5 is the sigsetsize.
7849              * Alpha also has a separate rt_sigaction struct that it uses
7850              * here; SPARC uses the usual sigaction struct.
7851              */
7852             struct target_rt_sigaction *rt_act;
7853             struct target_sigaction act, oact, *pact = 0;
7854 
7855             if (arg4 != sizeof(target_sigset_t)) {
7856                 return -TARGET_EINVAL;
7857             }
7858             if (arg2) {
7859                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7860                     return -TARGET_EFAULT;
7861                 act._sa_handler = rt_act->_sa_handler;
7862                 act.sa_mask = rt_act->sa_mask;
7863                 act.sa_flags = rt_act->sa_flags;
7864                 act.sa_restorer = arg5;
7865                 unlock_user_struct(rt_act, arg2, 0);
7866                 pact = &act;
7867             }
7868             ret = get_errno(do_sigaction(arg1, pact, &oact));
7869             if (!is_error(ret) && arg3) {
7870                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7871                     return -TARGET_EFAULT;
7872                 rt_act->_sa_handler = oact._sa_handler;
7873                 rt_act->sa_mask = oact.sa_mask;
7874                 rt_act->sa_flags = oact.sa_flags;
7875                 unlock_user_struct(rt_act, arg3, 1);
7876             }
7877 #else
7878 #ifdef TARGET_SPARC
7879             target_ulong restorer = arg4;
7880             target_ulong sigsetsize = arg5;
7881 #else
7882             target_ulong sigsetsize = arg4;
7883 #endif
7884             struct target_sigaction *act;
7885             struct target_sigaction *oact;
7886 
7887             if (sigsetsize != sizeof(target_sigset_t)) {
7888                 return -TARGET_EINVAL;
7889             }
7890             if (arg2) {
7891                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7892                     return -TARGET_EFAULT;
7893                 }
7894 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7895                 act->ka_restorer = restorer;
7896 #endif
7897             } else {
7898                 act = NULL;
7899             }
7900             if (arg3) {
7901                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7902                     ret = -TARGET_EFAULT;
7903                     goto rt_sigaction_fail;
7904                 }
7905             } else
7906                 oact = NULL;
7907             ret = get_errno(do_sigaction(arg1, act, oact));
7908 	rt_sigaction_fail:
7909             if (act)
7910                 unlock_user_struct(act, arg2, 0);
7911             if (oact)
7912                 unlock_user_struct(oact, arg3, 1);
7913 #endif
7914         }
7915         return ret;
7916 #ifdef TARGET_NR_sgetmask /* not on alpha */
7917     case TARGET_NR_sgetmask:
7918         {
7919             sigset_t cur_set;
7920             abi_ulong target_set;
7921             ret = do_sigprocmask(0, NULL, &cur_set);
7922             if (!ret) {
7923                 host_to_target_old_sigset(&target_set, &cur_set);
7924                 ret = target_set;
7925             }
7926         }
7927         return ret;
7928 #endif
7929 #ifdef TARGET_NR_ssetmask /* not on alpha */
7930     case TARGET_NR_ssetmask:
7931         {
7932             sigset_t set, oset;
7933             abi_ulong target_set = arg1;
7934             target_to_host_old_sigset(&set, &target_set);
7935             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7936             if (!ret) {
7937                 host_to_target_old_sigset(&target_set, &oset);
7938                 ret = target_set;
7939             }
7940         }
7941         return ret;
7942 #endif
7943 #ifdef TARGET_NR_sigprocmask
7944     case TARGET_NR_sigprocmask:
7945         {
7946 #if defined(TARGET_ALPHA)
7947             sigset_t set, oldset;
7948             abi_ulong mask;
7949             int how;
7950 
7951             switch (arg1) {
7952             case TARGET_SIG_BLOCK:
7953                 how = SIG_BLOCK;
7954                 break;
7955             case TARGET_SIG_UNBLOCK:
7956                 how = SIG_UNBLOCK;
7957                 break;
7958             case TARGET_SIG_SETMASK:
7959                 how = SIG_SETMASK;
7960                 break;
7961             default:
7962                 return -TARGET_EINVAL;
7963             }
7964             mask = arg2;
7965             target_to_host_old_sigset(&set, &mask);
7966 
7967             ret = do_sigprocmask(how, &set, &oldset);
7968             if (!is_error(ret)) {
7969                 host_to_target_old_sigset(&mask, &oldset);
7970                 ret = mask;
7971                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7972             }
7973 #else
7974             sigset_t set, oldset, *set_ptr;
7975             int how;
7976 
7977             if (arg2) {
7978                 switch (arg1) {
7979                 case TARGET_SIG_BLOCK:
7980                     how = SIG_BLOCK;
7981                     break;
7982                 case TARGET_SIG_UNBLOCK:
7983                     how = SIG_UNBLOCK;
7984                     break;
7985                 case TARGET_SIG_SETMASK:
7986                     how = SIG_SETMASK;
7987                     break;
7988                 default:
7989                     return -TARGET_EINVAL;
7990                 }
7991                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7992                     return -TARGET_EFAULT;
7993                 target_to_host_old_sigset(&set, p);
7994                 unlock_user(p, arg2, 0);
7995                 set_ptr = &set;
7996             } else {
7997                 how = 0;
7998                 set_ptr = NULL;
7999             }
8000             ret = do_sigprocmask(how, set_ptr, &oldset);
8001             if (!is_error(ret) && arg3) {
8002                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8003                     return -TARGET_EFAULT;
8004                 host_to_target_old_sigset(p, &oldset);
8005                 unlock_user(p, arg3, sizeof(target_sigset_t));
8006             }
8007 #endif
8008         }
8009         return ret;
8010 #endif
8011     case TARGET_NR_rt_sigprocmask:
8012         {
8013             int how = arg1;
8014             sigset_t set, oldset, *set_ptr;
8015 
8016             if (arg4 != sizeof(target_sigset_t)) {
8017                 return -TARGET_EINVAL;
8018             }
8019 
8020             if (arg2) {
8021                 switch(how) {
8022                 case TARGET_SIG_BLOCK:
8023                     how = SIG_BLOCK;
8024                     break;
8025                 case TARGET_SIG_UNBLOCK:
8026                     how = SIG_UNBLOCK;
8027                     break;
8028                 case TARGET_SIG_SETMASK:
8029                     how = SIG_SETMASK;
8030                     break;
8031                 default:
8032                     return -TARGET_EINVAL;
8033                 }
8034                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8035                     return -TARGET_EFAULT;
8036                 target_to_host_sigset(&set, p);
8037                 unlock_user(p, arg2, 0);
8038                 set_ptr = &set;
8039             } else {
8040                 how = 0;
8041                 set_ptr = NULL;
8042             }
8043             ret = do_sigprocmask(how, set_ptr, &oldset);
8044             if (!is_error(ret) && arg3) {
8045                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8046                     return -TARGET_EFAULT;
8047                 host_to_target_sigset(p, &oldset);
8048                 unlock_user(p, arg3, sizeof(target_sigset_t));
8049             }
8050         }
8051         return ret;
8052 #ifdef TARGET_NR_sigpending
8053     case TARGET_NR_sigpending:
8054         {
8055             sigset_t set;
8056             ret = get_errno(sigpending(&set));
8057             if (!is_error(ret)) {
8058                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8059                     return -TARGET_EFAULT;
8060                 host_to_target_old_sigset(p, &set);
8061                 unlock_user(p, arg1, sizeof(target_sigset_t));
8062             }
8063         }
8064         return ret;
8065 #endif
8066     case TARGET_NR_rt_sigpending:
8067         {
8068             sigset_t set;
8069 
8070             /* Yes, this check is >, not != like most. We follow the kernel's
8071              * logic and it does it like this because it implements
8072              * NR_sigpending through the same code path, and in that case
8073              * the old_sigset_t is smaller in size.
8074              */
8075             if (arg2 > sizeof(target_sigset_t)) {
8076                 return -TARGET_EINVAL;
8077             }
8078 
8079             ret = get_errno(sigpending(&set));
8080             if (!is_error(ret)) {
8081                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8082                     return -TARGET_EFAULT;
8083                 host_to_target_sigset(p, &set);
8084                 unlock_user(p, arg1, sizeof(target_sigset_t));
8085             }
8086         }
8087         return ret;
8088 #ifdef TARGET_NR_sigsuspend
8089     case TARGET_NR_sigsuspend:
8090         {
8091             TaskState *ts = cpu->opaque;
8092 #if defined(TARGET_ALPHA)
8093             abi_ulong mask = arg1;
8094             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8095 #else
8096             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8097                 return -TARGET_EFAULT;
8098             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8099             unlock_user(p, arg1, 0);
8100 #endif
8101             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8102                                                SIGSET_T_SIZE));
8103             if (ret != -TARGET_ERESTARTSYS) {
8104                 ts->in_sigsuspend = 1;
8105             }
8106         }
8107         return ret;
8108 #endif
8109     case TARGET_NR_rt_sigsuspend:
8110         {
8111             TaskState *ts = cpu->opaque;
8112 
8113             if (arg2 != sizeof(target_sigset_t)) {
8114                 return -TARGET_EINVAL;
8115             }
8116             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8117                 return -TARGET_EFAULT;
8118             target_to_host_sigset(&ts->sigsuspend_mask, p);
8119             unlock_user(p, arg1, 0);
8120             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8121                                                SIGSET_T_SIZE));
8122             if (ret != -TARGET_ERESTARTSYS) {
8123                 ts->in_sigsuspend = 1;
8124             }
8125         }
8126         return ret;
8127     case TARGET_NR_rt_sigtimedwait:
8128         {
8129             sigset_t set;
8130             struct timespec uts, *puts;
8131             siginfo_t uinfo;
8132 
8133             if (arg4 != sizeof(target_sigset_t)) {
8134                 return -TARGET_EINVAL;
8135             }
8136 
8137             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8138                 return -TARGET_EFAULT;
8139             target_to_host_sigset(&set, p);
8140             unlock_user(p, arg1, 0);
8141             if (arg3) {
8142                 puts = &uts;
8143                 target_to_host_timespec(puts, arg3);
8144             } else {
8145                 puts = NULL;
8146             }
8147             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8148                                                  SIGSET_T_SIZE));
8149             if (!is_error(ret)) {
8150                 if (arg2) {
8151                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8152                                   0);
8153                     if (!p) {
8154                         return -TARGET_EFAULT;
8155                     }
8156                     host_to_target_siginfo(p, &uinfo);
8157                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8158                 }
8159                 ret = host_to_target_signal(ret);
8160             }
8161         }
8162         return ret;
8163     case TARGET_NR_rt_sigqueueinfo:
8164         {
8165             siginfo_t uinfo;
8166 
8167             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8168             if (!p) {
8169                 return -TARGET_EFAULT;
8170             }
8171             target_to_host_siginfo(&uinfo, p);
8172             unlock_user(p, arg3, 0);
8173             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8174         }
8175         return ret;
8176     case TARGET_NR_rt_tgsigqueueinfo:
8177         {
8178             siginfo_t uinfo;
8179 
8180             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8181             if (!p) {
8182                 return -TARGET_EFAULT;
8183             }
8184             target_to_host_siginfo(&uinfo, p);
8185             unlock_user(p, arg4, 0);
8186             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8187         }
8188         return ret;
8189 #ifdef TARGET_NR_sigreturn
8190     case TARGET_NR_sigreturn:
8191         if (block_signals()) {
8192             return -TARGET_ERESTARTSYS;
8193         }
8194         return do_sigreturn(cpu_env);
8195 #endif
8196     case TARGET_NR_rt_sigreturn:
8197         if (block_signals()) {
8198             return -TARGET_ERESTARTSYS;
8199         }
8200         return do_rt_sigreturn(cpu_env);
8201     case TARGET_NR_sethostname:
8202         if (!(p = lock_user_string(arg1)))
8203             return -TARGET_EFAULT;
8204         ret = get_errno(sethostname(p, arg2));
8205         unlock_user(p, arg1, 0);
8206         return ret;
8207 #ifdef TARGET_NR_setrlimit
8208     case TARGET_NR_setrlimit:
8209         {
8210             int resource = target_to_host_resource(arg1);
8211             struct target_rlimit *target_rlim;
8212             struct rlimit rlim;
8213             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8214                 return -TARGET_EFAULT;
8215             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8216             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8217             unlock_user_struct(target_rlim, arg2, 0);
8218             /*
8219              * If we just passed through resource limit settings for memory then
8220              * they would also apply to QEMU's own allocations, and QEMU will
8221              * crash or hang or die if its allocations fail. Ideally we would
8222              * track the guest allocations in QEMU and apply the limits ourselves.
8223              * For now, just tell the guest the call succeeded but don't actually
8224              * limit anything.
8225              */
8226             if (resource != RLIMIT_AS &&
8227                 resource != RLIMIT_DATA &&
8228                 resource != RLIMIT_STACK) {
8229                 return get_errno(setrlimit(resource, &rlim));
8230             } else {
8231                 return 0;
8232             }
8233         }
8234 #endif
8235 #ifdef TARGET_NR_getrlimit
8236     case TARGET_NR_getrlimit:
8237         {
8238             int resource = target_to_host_resource(arg1);
8239             struct target_rlimit *target_rlim;
8240             struct rlimit rlim;
8241 
8242             ret = get_errno(getrlimit(resource, &rlim));
8243             if (!is_error(ret)) {
8244                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8245                     return -TARGET_EFAULT;
8246                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8247                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8248                 unlock_user_struct(target_rlim, arg2, 1);
8249             }
8250         }
8251         return ret;
8252 #endif
8253     case TARGET_NR_getrusage:
8254         {
8255             struct rusage rusage;
8256             ret = get_errno(getrusage(arg1, &rusage));
8257             if (!is_error(ret)) {
8258                 ret = host_to_target_rusage(arg2, &rusage);
8259             }
8260         }
8261         return ret;
8262     case TARGET_NR_gettimeofday:
8263         {
8264             struct timeval tv;
8265             ret = get_errno(gettimeofday(&tv, NULL));
8266             if (!is_error(ret)) {
8267                 if (copy_to_user_timeval(arg1, &tv))
8268                     return -TARGET_EFAULT;
8269             }
8270         }
8271         return ret;
8272     case TARGET_NR_settimeofday:
8273         {
8274             struct timeval tv, *ptv = NULL;
8275             struct timezone tz, *ptz = NULL;
8276 
8277             if (arg1) {
8278                 if (copy_from_user_timeval(&tv, arg1)) {
8279                     return -TARGET_EFAULT;
8280                 }
8281                 ptv = &tv;
8282             }
8283 
8284             if (arg2) {
8285                 if (copy_from_user_timezone(&tz, arg2)) {
8286                     return -TARGET_EFAULT;
8287                 }
8288                 ptz = &tz;
8289             }
8290 
8291             return get_errno(settimeofday(ptv, ptz));
8292         }
8293 #if defined(TARGET_NR_select)
8294     case TARGET_NR_select:
8295 #if defined(TARGET_WANT_NI_OLD_SELECT)
8296         /* some architectures used to have old_select here
8297          * but now ENOSYS it.
8298          */
8299         ret = -TARGET_ENOSYS;
8300 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8301         ret = do_old_select(arg1);
8302 #else
8303         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8304 #endif
8305         return ret;
8306 #endif
8307 #ifdef TARGET_NR_pselect6
8308     case TARGET_NR_pselect6:
8309         {
8310             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8311             fd_set rfds, wfds, efds;
8312             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8313             struct timespec ts, *ts_ptr;
8314 
8315             /*
8316              * The 6th arg is actually two args smashed together,
8317              * so we cannot use the C library.
8318              */
8319             sigset_t set;
8320             struct {
8321                 sigset_t *set;
8322                 size_t size;
8323             } sig, *sig_ptr;
8324 
8325             abi_ulong arg_sigset, arg_sigsize, *arg7;
8326             target_sigset_t *target_sigset;
8327 
8328             n = arg1;
8329             rfd_addr = arg2;
8330             wfd_addr = arg3;
8331             efd_addr = arg4;
8332             ts_addr = arg5;
8333 
8334             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8335             if (ret) {
8336                 return ret;
8337             }
8338             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8339             if (ret) {
8340                 return ret;
8341             }
8342             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8343             if (ret) {
8344                 return ret;
8345             }
8346 
8347             /*
8348              * This takes a timespec, and not a timeval, so we cannot
8349              * use the do_select() helper ...
8350              */
8351             if (ts_addr) {
8352                 if (target_to_host_timespec(&ts, ts_addr)) {
8353                     return -TARGET_EFAULT;
8354                 }
8355                 ts_ptr = &ts;
8356             } else {
8357                 ts_ptr = NULL;
8358             }
8359 
8360             /* Extract the two packed args for the sigset */
8361             if (arg6) {
8362                 sig_ptr = &sig;
8363                 sig.size = SIGSET_T_SIZE;
8364 
8365                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8366                 if (!arg7) {
8367                     return -TARGET_EFAULT;
8368                 }
8369                 arg_sigset = tswapal(arg7[0]);
8370                 arg_sigsize = tswapal(arg7[1]);
8371                 unlock_user(arg7, arg6, 0);
8372 
8373                 if (arg_sigset) {
8374                     sig.set = &set;
8375                     if (arg_sigsize != sizeof(*target_sigset)) {
8376                         /* Like the kernel, we enforce correct size sigsets */
8377                         return -TARGET_EINVAL;
8378                     }
8379                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8380                                               sizeof(*target_sigset), 1);
8381                     if (!target_sigset) {
8382                         return -TARGET_EFAULT;
8383                     }
8384                     target_to_host_sigset(&set, target_sigset);
8385                     unlock_user(target_sigset, arg_sigset, 0);
8386                 } else {
8387                     sig.set = NULL;
8388                 }
8389             } else {
8390                 sig_ptr = NULL;
8391             }
8392 
8393             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8394                                           ts_ptr, sig_ptr));
8395 
8396             if (!is_error(ret)) {
8397                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8398                     return -TARGET_EFAULT;
8399                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8400                     return -TARGET_EFAULT;
8401                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8402                     return -TARGET_EFAULT;
8403 
8404                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8405                     return -TARGET_EFAULT;
8406             }
8407         }
8408         return ret;
8409 #endif
8410 #ifdef TARGET_NR_symlink
8411     case TARGET_NR_symlink:
8412         {
8413             void *p2;
8414             p = lock_user_string(arg1);
8415             p2 = lock_user_string(arg2);
8416             if (!p || !p2)
8417                 ret = -TARGET_EFAULT;
8418             else
8419                 ret = get_errno(symlink(p, p2));
8420             unlock_user(p2, arg2, 0);
8421             unlock_user(p, arg1, 0);
8422         }
8423         return ret;
8424 #endif
8425 #if defined(TARGET_NR_symlinkat)
8426     case TARGET_NR_symlinkat:
8427         {
8428             void *p2;
8429             p  = lock_user_string(arg1);
8430             p2 = lock_user_string(arg3);
8431             if (!p || !p2)
8432                 ret = -TARGET_EFAULT;
8433             else
8434                 ret = get_errno(symlinkat(p, arg2, p2));
8435             unlock_user(p2, arg3, 0);
8436             unlock_user(p, arg1, 0);
8437         }
8438         return ret;
8439 #endif
8440 #ifdef TARGET_NR_readlink
8441     case TARGET_NR_readlink:
8442         {
8443             void *p2;
8444             p = lock_user_string(arg1);
8445             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8446             if (!p || !p2) {
8447                 ret = -TARGET_EFAULT;
8448             } else if (!arg3) {
8449                 /* Short circuit this for the magic exe check. */
8450                 ret = -TARGET_EINVAL;
8451             } else if (is_proc_myself((const char *)p, "exe")) {
8452                 char real[PATH_MAX], *temp;
8453                 temp = realpath(exec_path, real);
8454                 /* Return value is # of bytes that we wrote to the buffer. */
8455                 if (temp == NULL) {
8456                     ret = get_errno(-1);
8457                 } else {
8458                     /* Don't worry about sign mismatch as earlier mapping
8459                      * logic would have thrown a bad address error. */
8460                     ret = MIN(strlen(real), arg3);
8461                     /* We cannot NUL terminate the string. */
8462                     memcpy(p2, real, ret);
8463                 }
8464             } else {
8465                 ret = get_errno(readlink(path(p), p2, arg3));
8466             }
8467             unlock_user(p2, arg2, ret);
8468             unlock_user(p, arg1, 0);
8469         }
8470         return ret;
8471 #endif
8472 #if defined(TARGET_NR_readlinkat)
8473     case TARGET_NR_readlinkat:
8474         {
8475             void *p2;
8476             p  = lock_user_string(arg2);
8477             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8478             if (!p || !p2) {
8479                 ret = -TARGET_EFAULT;
8480             } else if (is_proc_myself((const char *)p, "exe")) {
8481                 char real[PATH_MAX], *temp;
8482                 temp = realpath(exec_path, real);
8483                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8484                 snprintf((char *)p2, arg4, "%s", real);
8485             } else {
8486                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8487             }
8488             unlock_user(p2, arg3, ret);
8489             unlock_user(p, arg2, 0);
8490         }
8491         return ret;
8492 #endif
8493 #ifdef TARGET_NR_swapon
8494     case TARGET_NR_swapon:
8495         if (!(p = lock_user_string(arg1)))
8496             return -TARGET_EFAULT;
8497         ret = get_errno(swapon(p, arg2));
8498         unlock_user(p, arg1, 0);
8499         return ret;
8500 #endif
8501     case TARGET_NR_reboot:
8502         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8503            /* arg4 must be ignored in all other cases */
8504            p = lock_user_string(arg4);
8505            if (!p) {
8506                return -TARGET_EFAULT;
8507            }
8508            ret = get_errno(reboot(arg1, arg2, arg3, p));
8509            unlock_user(p, arg4, 0);
8510         } else {
8511            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8512         }
8513         return ret;
8514 #ifdef TARGET_NR_mmap
8515     case TARGET_NR_mmap:
8516 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8517     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8518     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8519     || defined(TARGET_S390X)
8520         {
8521             abi_ulong *v;
8522             abi_ulong v1, v2, v3, v4, v5, v6;
8523             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8524                 return -TARGET_EFAULT;
8525             v1 = tswapal(v[0]);
8526             v2 = tswapal(v[1]);
8527             v3 = tswapal(v[2]);
8528             v4 = tswapal(v[3]);
8529             v5 = tswapal(v[4]);
8530             v6 = tswapal(v[5]);
8531             unlock_user(v, arg1, 0);
8532             ret = get_errno(target_mmap(v1, v2, v3,
8533                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8534                                         v5, v6));
8535         }
8536 #else
8537         ret = get_errno(target_mmap(arg1, arg2, arg3,
8538                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8539                                     arg5,
8540                                     arg6));
8541 #endif
8542         return ret;
8543 #endif
8544 #ifdef TARGET_NR_mmap2
8545     case TARGET_NR_mmap2:
8546 #ifndef MMAP_SHIFT
8547 #define MMAP_SHIFT 12
8548 #endif
8549         ret = target_mmap(arg1, arg2, arg3,
8550                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8551                           arg5, arg6 << MMAP_SHIFT);
8552         return get_errno(ret);
8553 #endif
8554     case TARGET_NR_munmap:
8555         return get_errno(target_munmap(arg1, arg2));
8556     case TARGET_NR_mprotect:
8557         {
8558             TaskState *ts = cpu->opaque;
8559             /* Special hack to detect libc making the stack executable.  */
8560             if ((arg3 & PROT_GROWSDOWN)
8561                 && arg1 >= ts->info->stack_limit
8562                 && arg1 <= ts->info->start_stack) {
8563                 arg3 &= ~PROT_GROWSDOWN;
8564                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8565                 arg1 = ts->info->stack_limit;
8566             }
8567         }
8568         return get_errno(target_mprotect(arg1, arg2, arg3));
8569 #ifdef TARGET_NR_mremap
8570     case TARGET_NR_mremap:
8571         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8572 #endif
8573         /* ??? msync/mlock/munlock are broken for softmmu.  */
8574 #ifdef TARGET_NR_msync
8575     case TARGET_NR_msync:
8576         return get_errno(msync(g2h(arg1), arg2, arg3));
8577 #endif
8578 #ifdef TARGET_NR_mlock
8579     case TARGET_NR_mlock:
8580         return get_errno(mlock(g2h(arg1), arg2));
8581 #endif
8582 #ifdef TARGET_NR_munlock
8583     case TARGET_NR_munlock:
8584         return get_errno(munlock(g2h(arg1), arg2));
8585 #endif
8586 #ifdef TARGET_NR_mlockall
8587     case TARGET_NR_mlockall:
8588         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8589 #endif
8590 #ifdef TARGET_NR_munlockall
8591     case TARGET_NR_munlockall:
8592         return get_errno(munlockall());
8593 #endif
8594 #ifdef TARGET_NR_truncate
8595     case TARGET_NR_truncate:
8596         if (!(p = lock_user_string(arg1)))
8597             return -TARGET_EFAULT;
8598         ret = get_errno(truncate(p, arg2));
8599         unlock_user(p, arg1, 0);
8600         return ret;
8601 #endif
8602 #ifdef TARGET_NR_ftruncate
8603     case TARGET_NR_ftruncate:
8604         return get_errno(ftruncate(arg1, arg2));
8605 #endif
8606     case TARGET_NR_fchmod:
8607         return get_errno(fchmod(arg1, arg2));
8608 #if defined(TARGET_NR_fchmodat)
8609     case TARGET_NR_fchmodat:
8610         if (!(p = lock_user_string(arg2)))
8611             return -TARGET_EFAULT;
8612         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8613         unlock_user(p, arg2, 0);
8614         return ret;
8615 #endif
8616     case TARGET_NR_getpriority:
8617         /* Note that negative values are valid for getpriority, so we must
8618            differentiate based on errno settings.  */
8619         errno = 0;
8620         ret = getpriority(arg1, arg2);
8621         if (ret == -1 && errno != 0) {
8622             return -host_to_target_errno(errno);
8623         }
8624 #ifdef TARGET_ALPHA
8625         /* Return value is the unbiased priority.  Signal no error.  */
8626         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8627 #else
8628         /* Return value is a biased priority to avoid negative numbers.  */
8629         ret = 20 - ret;
8630 #endif
8631         return ret;
8632     case TARGET_NR_setpriority:
8633         return get_errno(setpriority(arg1, arg2, arg3));
8634 #ifdef TARGET_NR_statfs
8635     case TARGET_NR_statfs:
8636         if (!(p = lock_user_string(arg1))) {
8637             return -TARGET_EFAULT;
8638         }
8639         ret = get_errno(statfs(path(p), &stfs));
8640         unlock_user(p, arg1, 0);
8641     convert_statfs:
8642         if (!is_error(ret)) {
8643             struct target_statfs *target_stfs;
8644 
8645             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8646                 return -TARGET_EFAULT;
8647             __put_user(stfs.f_type, &target_stfs->f_type);
8648             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8649             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8650             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8651             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8652             __put_user(stfs.f_files, &target_stfs->f_files);
8653             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8654             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8655             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8656             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8657             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8658 #ifdef _STATFS_F_FLAGS
8659             __put_user(stfs.f_flags, &target_stfs->f_flags);
8660 #else
8661             __put_user(0, &target_stfs->f_flags);
8662 #endif
8663             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8664             unlock_user_struct(target_stfs, arg2, 1);
8665         }
8666         return ret;
8667 #endif
8668 #ifdef TARGET_NR_fstatfs
8669     case TARGET_NR_fstatfs:
8670         ret = get_errno(fstatfs(arg1, &stfs));
8671         goto convert_statfs;
8672 #endif
8673 #ifdef TARGET_NR_statfs64
8674     case TARGET_NR_statfs64:
8675         if (!(p = lock_user_string(arg1))) {
8676             return -TARGET_EFAULT;
8677         }
8678         ret = get_errno(statfs(path(p), &stfs));
8679         unlock_user(p, arg1, 0);
8680     convert_statfs64:
8681         if (!is_error(ret)) {
8682             struct target_statfs64 *target_stfs;
8683 
8684             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8685                 return -TARGET_EFAULT;
8686             __put_user(stfs.f_type, &target_stfs->f_type);
8687             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8688             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8689             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8690             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8691             __put_user(stfs.f_files, &target_stfs->f_files);
8692             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8693             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8694             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8695             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8696             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8697             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8698             unlock_user_struct(target_stfs, arg3, 1);
8699         }
8700         return ret;
8701     case TARGET_NR_fstatfs64:
8702         ret = get_errno(fstatfs(arg1, &stfs));
8703         goto convert_statfs64;
8704 #endif
8705 #ifdef TARGET_NR_socketcall
8706     case TARGET_NR_socketcall:
8707         return do_socketcall(arg1, arg2);
8708 #endif
8709 #ifdef TARGET_NR_accept
8710     case TARGET_NR_accept:
8711         return do_accept4(arg1, arg2, arg3, 0);
8712 #endif
8713 #ifdef TARGET_NR_accept4
8714     case TARGET_NR_accept4:
8715         return do_accept4(arg1, arg2, arg3, arg4);
8716 #endif
8717 #ifdef TARGET_NR_bind
8718     case TARGET_NR_bind:
8719         return do_bind(arg1, arg2, arg3);
8720 #endif
8721 #ifdef TARGET_NR_connect
8722     case TARGET_NR_connect:
8723         return do_connect(arg1, arg2, arg3);
8724 #endif
8725 #ifdef TARGET_NR_getpeername
8726     case TARGET_NR_getpeername:
8727         return do_getpeername(arg1, arg2, arg3);
8728 #endif
8729 #ifdef TARGET_NR_getsockname
8730     case TARGET_NR_getsockname:
8731         return do_getsockname(arg1, arg2, arg3);
8732 #endif
8733 #ifdef TARGET_NR_getsockopt
8734     case TARGET_NR_getsockopt:
8735         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8736 #endif
8737 #ifdef TARGET_NR_listen
8738     case TARGET_NR_listen:
8739         return get_errno(listen(arg1, arg2));
8740 #endif
8741 #ifdef TARGET_NR_recv
8742     case TARGET_NR_recv:
8743         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8744 #endif
8745 #ifdef TARGET_NR_recvfrom
8746     case TARGET_NR_recvfrom:
8747         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8748 #endif
8749 #ifdef TARGET_NR_recvmsg
8750     case TARGET_NR_recvmsg:
8751         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8752 #endif
8753 #ifdef TARGET_NR_send
8754     case TARGET_NR_send:
8755         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8756 #endif
8757 #ifdef TARGET_NR_sendmsg
8758     case TARGET_NR_sendmsg:
8759         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8760 #endif
8761 #ifdef TARGET_NR_sendmmsg
8762     case TARGET_NR_sendmmsg:
8763         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8764     case TARGET_NR_recvmmsg:
8765         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8766 #endif
8767 #ifdef TARGET_NR_sendto
8768     case TARGET_NR_sendto:
8769         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8770 #endif
8771 #ifdef TARGET_NR_shutdown
8772     case TARGET_NR_shutdown:
8773         return get_errno(shutdown(arg1, arg2));
8774 #endif
8775 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8776     case TARGET_NR_getrandom:
8777         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8778         if (!p) {
8779             return -TARGET_EFAULT;
8780         }
8781         ret = get_errno(getrandom(p, arg2, arg3));
8782         unlock_user(p, arg1, ret);
8783         return ret;
8784 #endif
8785 #ifdef TARGET_NR_socket
8786     case TARGET_NR_socket:
8787         return do_socket(arg1, arg2, arg3);
8788 #endif
8789 #ifdef TARGET_NR_socketpair
8790     case TARGET_NR_socketpair:
8791         return do_socketpair(arg1, arg2, arg3, arg4);
8792 #endif
8793 #ifdef TARGET_NR_setsockopt
8794     case TARGET_NR_setsockopt:
8795         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8796 #endif
8797 #if defined(TARGET_NR_syslog)
8798     case TARGET_NR_syslog:
8799         {
8800             int len = arg2;
8801 
8802             switch (arg1) {
8803             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8804             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8805             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8806             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8807             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8808             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8809             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8810             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8811                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8812             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8813             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8814             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8815                 {
8816                     if (len < 0) {
8817                         return -TARGET_EINVAL;
8818                     }
8819                     if (len == 0) {
8820                         return 0;
8821                     }
8822                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8823                     if (!p) {
8824                         return -TARGET_EFAULT;
8825                     }
8826                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8827                     unlock_user(p, arg2, arg3);
8828                 }
8829                 return ret;
8830             default:
8831                 return -TARGET_EINVAL;
8832             }
8833         }
8834         break;
8835 #endif
8836     case TARGET_NR_setitimer:
8837         {
8838             struct itimerval value, ovalue, *pvalue;
8839 
8840             if (arg2) {
8841                 pvalue = &value;
8842                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8843                     || copy_from_user_timeval(&pvalue->it_value,
8844                                               arg2 + sizeof(struct target_timeval)))
8845                     return -TARGET_EFAULT;
8846             } else {
8847                 pvalue = NULL;
8848             }
8849             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8850             if (!is_error(ret) && arg3) {
8851                 if (copy_to_user_timeval(arg3,
8852                                          &ovalue.it_interval)
8853                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8854                                             &ovalue.it_value))
8855                     return -TARGET_EFAULT;
8856             }
8857         }
8858         return ret;
8859     case TARGET_NR_getitimer:
8860         {
8861             struct itimerval value;
8862 
8863             ret = get_errno(getitimer(arg1, &value));
8864             if (!is_error(ret) && arg2) {
8865                 if (copy_to_user_timeval(arg2,
8866                                          &value.it_interval)
8867                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8868                                             &value.it_value))
8869                     return -TARGET_EFAULT;
8870             }
8871         }
8872         return ret;
8873 #ifdef TARGET_NR_stat
8874     case TARGET_NR_stat:
8875         if (!(p = lock_user_string(arg1))) {
8876             return -TARGET_EFAULT;
8877         }
8878         ret = get_errno(stat(path(p), &st));
8879         unlock_user(p, arg1, 0);
8880         goto do_stat;
8881 #endif
8882 #ifdef TARGET_NR_lstat
8883     case TARGET_NR_lstat:
8884         if (!(p = lock_user_string(arg1))) {
8885             return -TARGET_EFAULT;
8886         }
8887         ret = get_errno(lstat(path(p), &st));
8888         unlock_user(p, arg1, 0);
8889         goto do_stat;
8890 #endif
8891 #ifdef TARGET_NR_fstat
8892     case TARGET_NR_fstat:
8893         {
8894             ret = get_errno(fstat(arg1, &st));
8895 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8896         do_stat:
8897 #endif
8898             if (!is_error(ret)) {
8899                 struct target_stat *target_st;
8900 
8901                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8902                     return -TARGET_EFAULT;
8903                 memset(target_st, 0, sizeof(*target_st));
8904                 __put_user(st.st_dev, &target_st->st_dev);
8905                 __put_user(st.st_ino, &target_st->st_ino);
8906                 __put_user(st.st_mode, &target_st->st_mode);
8907                 __put_user(st.st_uid, &target_st->st_uid);
8908                 __put_user(st.st_gid, &target_st->st_gid);
8909                 __put_user(st.st_nlink, &target_st->st_nlink);
8910                 __put_user(st.st_rdev, &target_st->st_rdev);
8911                 __put_user(st.st_size, &target_st->st_size);
8912                 __put_user(st.st_blksize, &target_st->st_blksize);
8913                 __put_user(st.st_blocks, &target_st->st_blocks);
8914                 __put_user(st.st_atime, &target_st->target_st_atime);
8915                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8916                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8917 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8918     defined(TARGET_STAT_HAVE_NSEC)
8919                 __put_user(st.st_atim.tv_nsec,
8920                            &target_st->target_st_atime_nsec);
8921                 __put_user(st.st_mtim.tv_nsec,
8922                            &target_st->target_st_mtime_nsec);
8923                 __put_user(st.st_ctim.tv_nsec,
8924                            &target_st->target_st_ctime_nsec);
8925 #endif
8926                 unlock_user_struct(target_st, arg2, 1);
8927             }
8928         }
8929         return ret;
8930 #endif
8931     case TARGET_NR_vhangup:
8932         return get_errno(vhangup());
8933 #ifdef TARGET_NR_syscall
8934     case TARGET_NR_syscall:
8935         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8936                           arg6, arg7, arg8, 0);
8937 #endif
8938     case TARGET_NR_wait4:
8939         {
8940             int status;
8941             abi_long status_ptr = arg2;
8942             struct rusage rusage, *rusage_ptr;
8943             abi_ulong target_rusage = arg4;
8944             abi_long rusage_err;
8945             if (target_rusage)
8946                 rusage_ptr = &rusage;
8947             else
8948                 rusage_ptr = NULL;
8949             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8950             if (!is_error(ret)) {
8951                 if (status_ptr && ret) {
8952                     status = host_to_target_waitstatus(status);
8953                     if (put_user_s32(status, status_ptr))
8954                         return -TARGET_EFAULT;
8955                 }
8956                 if (target_rusage) {
8957                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8958                     if (rusage_err) {
8959                         ret = rusage_err;
8960                     }
8961                 }
8962             }
8963         }
8964         return ret;
8965 #ifdef TARGET_NR_swapoff
8966     case TARGET_NR_swapoff:
8967         if (!(p = lock_user_string(arg1)))
8968             return -TARGET_EFAULT;
8969         ret = get_errno(swapoff(p));
8970         unlock_user(p, arg1, 0);
8971         return ret;
8972 #endif
8973     case TARGET_NR_sysinfo:
8974         {
8975             struct target_sysinfo *target_value;
8976             struct sysinfo value;
8977             ret = get_errno(sysinfo(&value));
8978             if (!is_error(ret) && arg1)
8979             {
8980                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8981                     return -TARGET_EFAULT;
8982                 __put_user(value.uptime, &target_value->uptime);
8983                 __put_user(value.loads[0], &target_value->loads[0]);
8984                 __put_user(value.loads[1], &target_value->loads[1]);
8985                 __put_user(value.loads[2], &target_value->loads[2]);
8986                 __put_user(value.totalram, &target_value->totalram);
8987                 __put_user(value.freeram, &target_value->freeram);
8988                 __put_user(value.sharedram, &target_value->sharedram);
8989                 __put_user(value.bufferram, &target_value->bufferram);
8990                 __put_user(value.totalswap, &target_value->totalswap);
8991                 __put_user(value.freeswap, &target_value->freeswap);
8992                 __put_user(value.procs, &target_value->procs);
8993                 __put_user(value.totalhigh, &target_value->totalhigh);
8994                 __put_user(value.freehigh, &target_value->freehigh);
8995                 __put_user(value.mem_unit, &target_value->mem_unit);
8996                 unlock_user_struct(target_value, arg1, 1);
8997             }
8998         }
8999         return ret;
9000 #ifdef TARGET_NR_ipc
9001     case TARGET_NR_ipc:
9002         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9003 #endif
9004 #ifdef TARGET_NR_semget
9005     case TARGET_NR_semget:
9006         return get_errno(semget(arg1, arg2, arg3));
9007 #endif
9008 #ifdef TARGET_NR_semop
9009     case TARGET_NR_semop:
9010         return do_semop(arg1, arg2, arg3);
9011 #endif
9012 #ifdef TARGET_NR_semctl
9013     case TARGET_NR_semctl:
9014         return do_semctl(arg1, arg2, arg3, arg4);
9015 #endif
9016 #ifdef TARGET_NR_msgctl
9017     case TARGET_NR_msgctl:
9018         return do_msgctl(arg1, arg2, arg3);
9019 #endif
9020 #ifdef TARGET_NR_msgget
9021     case TARGET_NR_msgget:
9022         return get_errno(msgget(arg1, arg2));
9023 #endif
9024 #ifdef TARGET_NR_msgrcv
9025     case TARGET_NR_msgrcv:
9026         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9027 #endif
9028 #ifdef TARGET_NR_msgsnd
9029     case TARGET_NR_msgsnd:
9030         return do_msgsnd(arg1, arg2, arg3, arg4);
9031 #endif
9032 #ifdef TARGET_NR_shmget
9033     case TARGET_NR_shmget:
9034         return get_errno(shmget(arg1, arg2, arg3));
9035 #endif
9036 #ifdef TARGET_NR_shmctl
9037     case TARGET_NR_shmctl:
9038         return do_shmctl(arg1, arg2, arg3);
9039 #endif
9040 #ifdef TARGET_NR_shmat
9041     case TARGET_NR_shmat:
9042         return do_shmat(cpu_env, arg1, arg2, arg3);
9043 #endif
9044 #ifdef TARGET_NR_shmdt
9045     case TARGET_NR_shmdt:
9046         return do_shmdt(arg1);
9047 #endif
9048     case TARGET_NR_fsync:
9049         return get_errno(fsync(arg1));
9050     case TARGET_NR_clone:
9051         /* Linux manages to have three different orderings for its
9052          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9053          * match the kernel's CONFIG_CLONE_* settings.
9054          * Microblaze is further special in that it uses a sixth
9055          * implicit argument to clone for the TLS pointer.
9056          */
9057 #if defined(TARGET_MICROBLAZE)
9058         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9059 #elif defined(TARGET_CLONE_BACKWARDS)
9060         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9061 #elif defined(TARGET_CLONE_BACKWARDS2)
9062         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9063 #else
9064         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9065 #endif
9066         return ret;
9067 #ifdef __NR_exit_group
9068         /* new thread calls */
9069     case TARGET_NR_exit_group:
9070         preexit_cleanup(cpu_env, arg1);
9071         return get_errno(exit_group(arg1));
9072 #endif
9073     case TARGET_NR_setdomainname:
9074         if (!(p = lock_user_string(arg1)))
9075             return -TARGET_EFAULT;
9076         ret = get_errno(setdomainname(p, arg2));
9077         unlock_user(p, arg1, 0);
9078         return ret;
9079     case TARGET_NR_uname:
9080         /* no need to transcode because we use the linux syscall */
9081         {
9082             struct new_utsname * buf;
9083 
9084             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9085                 return -TARGET_EFAULT;
9086             ret = get_errno(sys_uname(buf));
9087             if (!is_error(ret)) {
9088                 /* Overwrite the native machine name with whatever is being
9089                    emulated. */
9090                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9091                           sizeof(buf->machine));
9092                 /* Allow the user to override the reported release.  */
9093                 if (qemu_uname_release && *qemu_uname_release) {
9094                     g_strlcpy(buf->release, qemu_uname_release,
9095                               sizeof(buf->release));
9096                 }
9097             }
9098             unlock_user_struct(buf, arg1, 1);
9099         }
9100         return ret;
9101 #ifdef TARGET_I386
9102     case TARGET_NR_modify_ldt:
9103         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9104 #if !defined(TARGET_X86_64)
9105     case TARGET_NR_vm86:
9106         return do_vm86(cpu_env, arg1, arg2);
9107 #endif
9108 #endif
9109     case TARGET_NR_adjtimex:
9110         {
9111             struct timex host_buf;
9112 
9113             if (target_to_host_timex(&host_buf, arg1) != 0) {
9114                 return -TARGET_EFAULT;
9115             }
9116             ret = get_errno(adjtimex(&host_buf));
9117             if (!is_error(ret)) {
9118                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9119                     return -TARGET_EFAULT;
9120                 }
9121             }
9122         }
9123         return ret;
9124 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9125     case TARGET_NR_clock_adjtime:
9126         {
9127             struct timex htx, *phtx = &htx;
9128 
9129             if (target_to_host_timex(phtx, arg2) != 0) {
9130                 return -TARGET_EFAULT;
9131             }
9132             ret = get_errno(clock_adjtime(arg1, phtx));
9133             if (!is_error(ret) && phtx) {
9134                 if (host_to_target_timex(arg2, phtx) != 0) {
9135                     return -TARGET_EFAULT;
9136                 }
9137             }
9138         }
9139         return ret;
9140 #endif
9141     case TARGET_NR_getpgid:
9142         return get_errno(getpgid(arg1));
9143     case TARGET_NR_fchdir:
9144         return get_errno(fchdir(arg1));
9145     case TARGET_NR_personality:
9146         return get_errno(personality(arg1));
9147 #ifdef TARGET_NR__llseek /* Not on alpha */
9148     case TARGET_NR__llseek:
9149         {
9150             int64_t res;
9151 #if !defined(__NR_llseek)
9152             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9153             if (res == -1) {
9154                 ret = get_errno(res);
9155             } else {
9156                 ret = 0;
9157             }
9158 #else
9159             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9160 #endif
9161             if ((ret == 0) && put_user_s64(res, arg4)) {
9162                 return -TARGET_EFAULT;
9163             }
9164         }
9165         return ret;
9166 #endif
9167 #ifdef TARGET_NR_getdents
9168     case TARGET_NR_getdents:
9169 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9170 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9171         {
9172             struct target_dirent *target_dirp;
9173             struct linux_dirent *dirp;
9174             abi_long count = arg3;
9175 
9176             dirp = g_try_malloc(count);
9177             if (!dirp) {
9178                 return -TARGET_ENOMEM;
9179             }
9180 
9181             ret = get_errno(sys_getdents(arg1, dirp, count));
9182             if (!is_error(ret)) {
9183                 struct linux_dirent *de;
9184 		struct target_dirent *tde;
9185                 int len = ret;
9186                 int reclen, treclen;
9187 		int count1, tnamelen;
9188 
9189 		count1 = 0;
9190                 de = dirp;
9191                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9192                     return -TARGET_EFAULT;
9193 		tde = target_dirp;
9194                 while (len > 0) {
9195                     reclen = de->d_reclen;
9196                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9197                     assert(tnamelen >= 0);
9198                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9199                     assert(count1 + treclen <= count);
9200                     tde->d_reclen = tswap16(treclen);
9201                     tde->d_ino = tswapal(de->d_ino);
9202                     tde->d_off = tswapal(de->d_off);
9203                     memcpy(tde->d_name, de->d_name, tnamelen);
9204                     de = (struct linux_dirent *)((char *)de + reclen);
9205                     len -= reclen;
9206                     tde = (struct target_dirent *)((char *)tde + treclen);
9207 		    count1 += treclen;
9208                 }
9209 		ret = count1;
9210                 unlock_user(target_dirp, arg2, ret);
9211             }
9212             g_free(dirp);
9213         }
9214 #else
9215         {
9216             struct linux_dirent *dirp;
9217             abi_long count = arg3;
9218 
9219             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9220                 return -TARGET_EFAULT;
9221             ret = get_errno(sys_getdents(arg1, dirp, count));
9222             if (!is_error(ret)) {
9223                 struct linux_dirent *de;
9224                 int len = ret;
9225                 int reclen;
9226                 de = dirp;
9227                 while (len > 0) {
9228                     reclen = de->d_reclen;
9229                     if (reclen > len)
9230                         break;
9231                     de->d_reclen = tswap16(reclen);
9232                     tswapls(&de->d_ino);
9233                     tswapls(&de->d_off);
9234                     de = (struct linux_dirent *)((char *)de + reclen);
9235                     len -= reclen;
9236                 }
9237             }
9238             unlock_user(dirp, arg2, ret);
9239         }
9240 #endif
9241 #else
9242         /* Implement getdents in terms of getdents64 */
9243         {
9244             struct linux_dirent64 *dirp;
9245             abi_long count = arg3;
9246 
9247             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9248             if (!dirp) {
9249                 return -TARGET_EFAULT;
9250             }
9251             ret = get_errno(sys_getdents64(arg1, dirp, count));
9252             if (!is_error(ret)) {
9253                 /* Convert the dirent64 structs to target dirent.  We do this
9254                  * in-place, since we can guarantee that a target_dirent is no
9255                  * larger than a dirent64; however this means we have to be
9256                  * careful to read everything before writing in the new format.
9257                  */
9258                 struct linux_dirent64 *de;
9259                 struct target_dirent *tde;
9260                 int len = ret;
9261                 int tlen = 0;
9262 
9263                 de = dirp;
9264                 tde = (struct target_dirent *)dirp;
9265                 while (len > 0) {
9266                     int namelen, treclen;
9267                     int reclen = de->d_reclen;
9268                     uint64_t ino = de->d_ino;
9269                     int64_t off = de->d_off;
9270                     uint8_t type = de->d_type;
9271 
9272                     namelen = strlen(de->d_name);
9273                     treclen = offsetof(struct target_dirent, d_name)
9274                         + namelen + 2;
9275                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9276 
9277                     memmove(tde->d_name, de->d_name, namelen + 1);
9278                     tde->d_ino = tswapal(ino);
9279                     tde->d_off = tswapal(off);
9280                     tde->d_reclen = tswap16(treclen);
9281                     /* The target_dirent type is in what was formerly a padding
9282                      * byte at the end of the structure:
9283                      */
9284                     *(((char *)tde) + treclen - 1) = type;
9285 
9286                     de = (struct linux_dirent64 *)((char *)de + reclen);
9287                     tde = (struct target_dirent *)((char *)tde + treclen);
9288                     len -= reclen;
9289                     tlen += treclen;
9290                 }
9291                 ret = tlen;
9292             }
9293             unlock_user(dirp, arg2, ret);
9294         }
9295 #endif
9296         return ret;
9297 #endif /* TARGET_NR_getdents */
9298 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9299     case TARGET_NR_getdents64:
9300         {
9301             struct linux_dirent64 *dirp;
9302             abi_long count = arg3;
9303             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9304                 return -TARGET_EFAULT;
9305             ret = get_errno(sys_getdents64(arg1, dirp, count));
9306             if (!is_error(ret)) {
9307                 struct linux_dirent64 *de;
9308                 int len = ret;
9309                 int reclen;
9310                 de = dirp;
9311                 while (len > 0) {
9312                     reclen = de->d_reclen;
9313                     if (reclen > len)
9314                         break;
9315                     de->d_reclen = tswap16(reclen);
9316                     tswap64s((uint64_t *)&de->d_ino);
9317                     tswap64s((uint64_t *)&de->d_off);
9318                     de = (struct linux_dirent64 *)((char *)de + reclen);
9319                     len -= reclen;
9320                 }
9321             }
9322             unlock_user(dirp, arg2, ret);
9323         }
9324         return ret;
9325 #endif /* TARGET_NR_getdents64 */
9326 #if defined(TARGET_NR__newselect)
9327     case TARGET_NR__newselect:
9328         return do_select(arg1, arg2, arg3, arg4, arg5);
9329 #endif
9330 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9331 # ifdef TARGET_NR_poll
9332     case TARGET_NR_poll:
9333 # endif
9334 # ifdef TARGET_NR_ppoll
9335     case TARGET_NR_ppoll:
9336 # endif
9337         {
9338             struct target_pollfd *target_pfd;
9339             unsigned int nfds = arg2;
9340             struct pollfd *pfd;
9341             unsigned int i;
9342 
9343             pfd = NULL;
9344             target_pfd = NULL;
9345             if (nfds) {
9346                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9347                     return -TARGET_EINVAL;
9348                 }
9349 
9350                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9351                                        sizeof(struct target_pollfd) * nfds, 1);
9352                 if (!target_pfd) {
9353                     return -TARGET_EFAULT;
9354                 }
9355 
9356                 pfd = alloca(sizeof(struct pollfd) * nfds);
9357                 for (i = 0; i < nfds; i++) {
9358                     pfd[i].fd = tswap32(target_pfd[i].fd);
9359                     pfd[i].events = tswap16(target_pfd[i].events);
9360                 }
9361             }
9362 
9363             switch (num) {
9364 # ifdef TARGET_NR_ppoll
9365             case TARGET_NR_ppoll:
9366             {
9367                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9368                 target_sigset_t *target_set;
9369                 sigset_t _set, *set = &_set;
9370 
9371                 if (arg3) {
9372                     if (target_to_host_timespec(timeout_ts, arg3)) {
9373                         unlock_user(target_pfd, arg1, 0);
9374                         return -TARGET_EFAULT;
9375                     }
9376                 } else {
9377                     timeout_ts = NULL;
9378                 }
9379 
9380                 if (arg4) {
9381                     if (arg5 != sizeof(target_sigset_t)) {
9382                         unlock_user(target_pfd, arg1, 0);
9383                         return -TARGET_EINVAL;
9384                     }
9385 
9386                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9387                     if (!target_set) {
9388                         unlock_user(target_pfd, arg1, 0);
9389                         return -TARGET_EFAULT;
9390                     }
9391                     target_to_host_sigset(set, target_set);
9392                 } else {
9393                     set = NULL;
9394                 }
9395 
9396                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9397                                            set, SIGSET_T_SIZE));
9398 
9399                 if (!is_error(ret) && arg3) {
9400                     host_to_target_timespec(arg3, timeout_ts);
9401                 }
9402                 if (arg4) {
9403                     unlock_user(target_set, arg4, 0);
9404                 }
9405                 break;
9406             }
9407 # endif
9408 # ifdef TARGET_NR_poll
9409             case TARGET_NR_poll:
9410             {
9411                 struct timespec ts, *pts;
9412 
9413                 if (arg3 >= 0) {
9414                     /* Convert ms to secs, ns */
9415                     ts.tv_sec = arg3 / 1000;
9416                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9417                     pts = &ts;
9418                 } else {
9419                     /* -ve poll() timeout means "infinite" */
9420                     pts = NULL;
9421                 }
9422                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9423                 break;
9424             }
9425 # endif
9426             default:
9427                 g_assert_not_reached();
9428             }
9429 
9430             if (!is_error(ret)) {
9431                 for(i = 0; i < nfds; i++) {
9432                     target_pfd[i].revents = tswap16(pfd[i].revents);
9433                 }
9434             }
9435             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9436         }
9437         return ret;
9438 #endif
9439     case TARGET_NR_flock:
9440         /* NOTE: the flock constant seems to be the same for every
9441            Linux platform */
9442         return get_errno(safe_flock(arg1, arg2));
9443     case TARGET_NR_readv:
9444         {
9445             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9446             if (vec != NULL) {
9447                 ret = get_errno(safe_readv(arg1, vec, arg3));
9448                 unlock_iovec(vec, arg2, arg3, 1);
9449             } else {
9450                 ret = -host_to_target_errno(errno);
9451             }
9452         }
9453         return ret;
9454     case TARGET_NR_writev:
9455         {
9456             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9457             if (vec != NULL) {
9458                 ret = get_errno(safe_writev(arg1, vec, arg3));
9459                 unlock_iovec(vec, arg2, arg3, 0);
9460             } else {
9461                 ret = -host_to_target_errno(errno);
9462             }
9463         }
9464         return ret;
9465 #if defined(TARGET_NR_preadv)
9466     case TARGET_NR_preadv:
9467         {
9468             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9469             if (vec != NULL) {
9470                 unsigned long low, high;
9471 
9472                 target_to_host_low_high(arg4, arg5, &low, &high);
9473                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9474                 unlock_iovec(vec, arg2, arg3, 1);
9475             } else {
9476                 ret = -host_to_target_errno(errno);
9477            }
9478         }
9479         return ret;
9480 #endif
9481 #if defined(TARGET_NR_pwritev)
9482     case TARGET_NR_pwritev:
9483         {
9484             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9485             if (vec != NULL) {
9486                 unsigned long low, high;
9487 
9488                 target_to_host_low_high(arg4, arg5, &low, &high);
9489                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9490                 unlock_iovec(vec, arg2, arg3, 0);
9491             } else {
9492                 ret = -host_to_target_errno(errno);
9493            }
9494         }
9495         return ret;
9496 #endif
9497     case TARGET_NR_getsid:
9498         return get_errno(getsid(arg1));
9499 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9500     case TARGET_NR_fdatasync:
9501         return get_errno(fdatasync(arg1));
9502 #endif
9503 #ifdef TARGET_NR__sysctl
9504     case TARGET_NR__sysctl:
9505         /* We don't implement this, but ENOTDIR is always a safe
9506            return value. */
9507         return -TARGET_ENOTDIR;
9508 #endif
9509     case TARGET_NR_sched_getaffinity:
9510         {
9511             unsigned int mask_size;
9512             unsigned long *mask;
9513 
9514             /*
9515              * sched_getaffinity needs multiples of ulong, so need to take
9516              * care of mismatches between target ulong and host ulong sizes.
9517              */
9518             if (arg2 & (sizeof(abi_ulong) - 1)) {
9519                 return -TARGET_EINVAL;
9520             }
9521             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9522 
9523             mask = alloca(mask_size);
9524             memset(mask, 0, mask_size);
9525             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9526 
9527             if (!is_error(ret)) {
9528                 if (ret > arg2) {
9529                     /* More data returned than the caller's buffer will fit.
9530                      * This only happens if sizeof(abi_long) < sizeof(long)
9531                      * and the caller passed us a buffer holding an odd number
9532                      * of abi_longs. If the host kernel is actually using the
9533                      * extra 4 bytes then fail EINVAL; otherwise we can just
9534                      * ignore them and only copy the interesting part.
9535                      */
9536                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9537                     if (numcpus > arg2 * 8) {
9538                         return -TARGET_EINVAL;
9539                     }
9540                     ret = arg2;
9541                 }
9542 
9543                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9544                     return -TARGET_EFAULT;
9545                 }
9546             }
9547         }
9548         return ret;
9549     case TARGET_NR_sched_setaffinity:
9550         {
9551             unsigned int mask_size;
9552             unsigned long *mask;
9553 
9554             /*
9555              * sched_setaffinity needs multiples of ulong, so need to take
9556              * care of mismatches between target ulong and host ulong sizes.
9557              */
9558             if (arg2 & (sizeof(abi_ulong) - 1)) {
9559                 return -TARGET_EINVAL;
9560             }
9561             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9562             mask = alloca(mask_size);
9563 
9564             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9565             if (ret) {
9566                 return ret;
9567             }
9568 
9569             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9570         }
9571     case TARGET_NR_getcpu:
9572         {
9573             unsigned cpu, node;
9574             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9575                                        arg2 ? &node : NULL,
9576                                        NULL));
9577             if (is_error(ret)) {
9578                 return ret;
9579             }
9580             if (arg1 && put_user_u32(cpu, arg1)) {
9581                 return -TARGET_EFAULT;
9582             }
9583             if (arg2 && put_user_u32(node, arg2)) {
9584                 return -TARGET_EFAULT;
9585             }
9586         }
9587         return ret;
9588     case TARGET_NR_sched_setparam:
9589         {
9590             struct sched_param *target_schp;
9591             struct sched_param schp;
9592 
9593             if (arg2 == 0) {
9594                 return -TARGET_EINVAL;
9595             }
9596             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9597                 return -TARGET_EFAULT;
9598             schp.sched_priority = tswap32(target_schp->sched_priority);
9599             unlock_user_struct(target_schp, arg2, 0);
9600             return get_errno(sched_setparam(arg1, &schp));
9601         }
9602     case TARGET_NR_sched_getparam:
9603         {
9604             struct sched_param *target_schp;
9605             struct sched_param schp;
9606 
9607             if (arg2 == 0) {
9608                 return -TARGET_EINVAL;
9609             }
9610             ret = get_errno(sched_getparam(arg1, &schp));
9611             if (!is_error(ret)) {
9612                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9613                     return -TARGET_EFAULT;
9614                 target_schp->sched_priority = tswap32(schp.sched_priority);
9615                 unlock_user_struct(target_schp, arg2, 1);
9616             }
9617         }
9618         return ret;
9619     case TARGET_NR_sched_setscheduler:
9620         {
9621             struct sched_param *target_schp;
9622             struct sched_param schp;
9623             if (arg3 == 0) {
9624                 return -TARGET_EINVAL;
9625             }
9626             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9627                 return -TARGET_EFAULT;
9628             schp.sched_priority = tswap32(target_schp->sched_priority);
9629             unlock_user_struct(target_schp, arg3, 0);
9630             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9631         }
9632     case TARGET_NR_sched_getscheduler:
9633         return get_errno(sched_getscheduler(arg1));
9634     case TARGET_NR_sched_yield:
9635         return get_errno(sched_yield());
9636     case TARGET_NR_sched_get_priority_max:
9637         return get_errno(sched_get_priority_max(arg1));
9638     case TARGET_NR_sched_get_priority_min:
9639         return get_errno(sched_get_priority_min(arg1));
9640     case TARGET_NR_sched_rr_get_interval:
9641         {
9642             struct timespec ts;
9643             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9644             if (!is_error(ret)) {
9645                 ret = host_to_target_timespec(arg2, &ts);
9646             }
9647         }
9648         return ret;
9649     case TARGET_NR_nanosleep:
9650         {
9651             struct timespec req, rem;
9652             target_to_host_timespec(&req, arg1);
9653             ret = get_errno(safe_nanosleep(&req, &rem));
9654             if (is_error(ret) && arg2) {
9655                 host_to_target_timespec(arg2, &rem);
9656             }
9657         }
9658         return ret;
9659     case TARGET_NR_prctl:
9660         switch (arg1) {
9661         case PR_GET_PDEATHSIG:
9662         {
9663             int deathsig;
9664             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9665             if (!is_error(ret) && arg2
9666                 && put_user_ual(deathsig, arg2)) {
9667                 return -TARGET_EFAULT;
9668             }
9669             return ret;
9670         }
9671 #ifdef PR_GET_NAME
9672         case PR_GET_NAME:
9673         {
9674             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9675             if (!name) {
9676                 return -TARGET_EFAULT;
9677             }
9678             ret = get_errno(prctl(arg1, (unsigned long)name,
9679                                   arg3, arg4, arg5));
9680             unlock_user(name, arg2, 16);
9681             return ret;
9682         }
9683         case PR_SET_NAME:
9684         {
9685             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9686             if (!name) {
9687                 return -TARGET_EFAULT;
9688             }
9689             ret = get_errno(prctl(arg1, (unsigned long)name,
9690                                   arg3, arg4, arg5));
9691             unlock_user(name, arg2, 0);
9692             return ret;
9693         }
9694 #endif
9695 #ifdef TARGET_MIPS
9696         case TARGET_PR_GET_FP_MODE:
9697         {
9698             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9699             ret = 0;
9700             if (env->CP0_Status & (1 << CP0St_FR)) {
9701                 ret |= TARGET_PR_FP_MODE_FR;
9702             }
9703             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9704                 ret |= TARGET_PR_FP_MODE_FRE;
9705             }
9706             return ret;
9707         }
9708         case TARGET_PR_SET_FP_MODE:
9709         {
9710             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9711             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9712             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9713             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9714             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9715 
9716             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9717                                             TARGET_PR_FP_MODE_FRE;
9718 
9719             /* If nothing to change, return right away, successfully.  */
9720             if (old_fr == new_fr && old_fre == new_fre) {
9721                 return 0;
9722             }
9723             /* Check the value is valid */
9724             if (arg2 & ~known_bits) {
9725                 return -TARGET_EOPNOTSUPP;
9726             }
9727             /* Setting FRE without FR is not supported.  */
9728             if (new_fre && !new_fr) {
9729                 return -TARGET_EOPNOTSUPP;
9730             }
9731             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9732                 /* FR1 is not supported */
9733                 return -TARGET_EOPNOTSUPP;
9734             }
9735             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9736                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9737                 /* cannot set FR=0 */
9738                 return -TARGET_EOPNOTSUPP;
9739             }
9740             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9741                 /* Cannot set FRE=1 */
9742                 return -TARGET_EOPNOTSUPP;
9743             }
9744 
9745             int i;
9746             fpr_t *fpr = env->active_fpu.fpr;
9747             for (i = 0; i < 32 ; i += 2) {
9748                 if (!old_fr && new_fr) {
9749                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9750                 } else if (old_fr && !new_fr) {
9751                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9752                 }
9753             }
9754 
9755             if (new_fr) {
9756                 env->CP0_Status |= (1 << CP0St_FR);
9757                 env->hflags |= MIPS_HFLAG_F64;
9758             } else {
9759                 env->CP0_Status &= ~(1 << CP0St_FR);
9760                 env->hflags &= ~MIPS_HFLAG_F64;
9761             }
9762             if (new_fre) {
9763                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9764                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9765                     env->hflags |= MIPS_HFLAG_FRE;
9766                 }
9767             } else {
9768                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9769                 env->hflags &= ~MIPS_HFLAG_FRE;
9770             }
9771 
9772             return 0;
9773         }
9774 #endif /* MIPS */
9775 #ifdef TARGET_AARCH64
9776         case TARGET_PR_SVE_SET_VL:
9777             /*
9778              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9779              * PR_SVE_VL_INHERIT.  Note the kernel definition
9780              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9781              * even though the current architectural maximum is VQ=16.
9782              */
9783             ret = -TARGET_EINVAL;
9784             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9785                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9786                 CPUARMState *env = cpu_env;
9787                 ARMCPU *cpu = arm_env_get_cpu(env);
9788                 uint32_t vq, old_vq;
9789 
9790                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9791                 vq = MAX(arg2 / 16, 1);
9792                 vq = MIN(vq, cpu->sve_max_vq);
9793 
9794                 if (vq < old_vq) {
9795                     aarch64_sve_narrow_vq(env, vq);
9796                 }
9797                 env->vfp.zcr_el[1] = vq - 1;
9798                 ret = vq * 16;
9799             }
9800             return ret;
9801         case TARGET_PR_SVE_GET_VL:
9802             ret = -TARGET_EINVAL;
9803             {
9804                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9805                 if (cpu_isar_feature(aa64_sve, cpu)) {
9806                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9807                 }
9808             }
9809             return ret;
9810         case TARGET_PR_PAC_RESET_KEYS:
9811             {
9812                 CPUARMState *env = cpu_env;
9813                 ARMCPU *cpu = arm_env_get_cpu(env);
9814 
9815                 if (arg3 || arg4 || arg5) {
9816                     return -TARGET_EINVAL;
9817                 }
9818                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9819                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9820                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9821                                TARGET_PR_PAC_APGAKEY);
9822                     int ret = 0;
9823                     Error *err = NULL;
9824 
9825                     if (arg2 == 0) {
9826                         arg2 = all;
9827                     } else if (arg2 & ~all) {
9828                         return -TARGET_EINVAL;
9829                     }
9830                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9831                         ret |= qemu_guest_getrandom(&env->keys.apia,
9832                                                     sizeof(ARMPACKey), &err);
9833                     }
9834                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9835                         ret |= qemu_guest_getrandom(&env->keys.apib,
9836                                                     sizeof(ARMPACKey), &err);
9837                     }
9838                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9839                         ret |= qemu_guest_getrandom(&env->keys.apda,
9840                                                     sizeof(ARMPACKey), &err);
9841                     }
9842                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9843                         ret |= qemu_guest_getrandom(&env->keys.apdb,
9844                                                     sizeof(ARMPACKey), &err);
9845                     }
9846                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9847                         ret |= qemu_guest_getrandom(&env->keys.apga,
9848                                                     sizeof(ARMPACKey), &err);
9849                     }
9850                     if (ret != 0) {
9851                         /*
9852                          * Some unknown failure in the crypto.  The best
9853                          * we can do is log it and fail the syscall.
9854                          * The real syscall cannot fail this way.
9855                          */
9856                         qemu_log_mask(LOG_UNIMP,
9857                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
9858                                       error_get_pretty(err));
9859                         error_free(err);
9860                         return -TARGET_EIO;
9861                     }
9862                     return 0;
9863                 }
9864             }
9865             return -TARGET_EINVAL;
9866 #endif /* AARCH64 */
9867         case PR_GET_SECCOMP:
9868         case PR_SET_SECCOMP:
9869             /* Disable seccomp to prevent the target disabling syscalls we
9870              * need. */
9871             return -TARGET_EINVAL;
9872         default:
9873             /* Most prctl options have no pointer arguments */
9874             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9875         }
9876         break;
9877 #ifdef TARGET_NR_arch_prctl
9878     case TARGET_NR_arch_prctl:
9879 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9880         return do_arch_prctl(cpu_env, arg1, arg2);
9881 #else
9882 #error unreachable
9883 #endif
9884 #endif
9885 #ifdef TARGET_NR_pread64
9886     case TARGET_NR_pread64:
9887         if (regpairs_aligned(cpu_env, num)) {
9888             arg4 = arg5;
9889             arg5 = arg6;
9890         }
9891         if (arg2 == 0 && arg3 == 0) {
9892             /* Special-case NULL buffer and zero length, which should succeed */
9893             p = 0;
9894         } else {
9895             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9896             if (!p) {
9897                 return -TARGET_EFAULT;
9898             }
9899         }
9900         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9901         unlock_user(p, arg2, ret);
9902         return ret;
9903     case TARGET_NR_pwrite64:
9904         if (regpairs_aligned(cpu_env, num)) {
9905             arg4 = arg5;
9906             arg5 = arg6;
9907         }
9908         if (arg2 == 0 && arg3 == 0) {
9909             /* Special-case NULL buffer and zero length, which should succeed */
9910             p = 0;
9911         } else {
9912             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9913             if (!p) {
9914                 return -TARGET_EFAULT;
9915             }
9916         }
9917         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9918         unlock_user(p, arg2, 0);
9919         return ret;
9920 #endif
9921     case TARGET_NR_getcwd:
9922         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9923             return -TARGET_EFAULT;
9924         ret = get_errno(sys_getcwd1(p, arg2));
9925         unlock_user(p, arg1, ret);
9926         return ret;
9927     case TARGET_NR_capget:
9928     case TARGET_NR_capset:
9929     {
9930         struct target_user_cap_header *target_header;
9931         struct target_user_cap_data *target_data = NULL;
9932         struct __user_cap_header_struct header;
9933         struct __user_cap_data_struct data[2];
9934         struct __user_cap_data_struct *dataptr = NULL;
9935         int i, target_datalen;
9936         int data_items = 1;
9937 
9938         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9939             return -TARGET_EFAULT;
9940         }
9941         header.version = tswap32(target_header->version);
9942         header.pid = tswap32(target_header->pid);
9943 
9944         if (header.version != _LINUX_CAPABILITY_VERSION) {
9945             /* Version 2 and up takes pointer to two user_data structs */
9946             data_items = 2;
9947         }
9948 
9949         target_datalen = sizeof(*target_data) * data_items;
9950 
9951         if (arg2) {
9952             if (num == TARGET_NR_capget) {
9953                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9954             } else {
9955                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9956             }
9957             if (!target_data) {
9958                 unlock_user_struct(target_header, arg1, 0);
9959                 return -TARGET_EFAULT;
9960             }
9961 
9962             if (num == TARGET_NR_capset) {
9963                 for (i = 0; i < data_items; i++) {
9964                     data[i].effective = tswap32(target_data[i].effective);
9965                     data[i].permitted = tswap32(target_data[i].permitted);
9966                     data[i].inheritable = tswap32(target_data[i].inheritable);
9967                 }
9968             }
9969 
9970             dataptr = data;
9971         }
9972 
9973         if (num == TARGET_NR_capget) {
9974             ret = get_errno(capget(&header, dataptr));
9975         } else {
9976             ret = get_errno(capset(&header, dataptr));
9977         }
9978 
9979         /* The kernel always updates version for both capget and capset */
9980         target_header->version = tswap32(header.version);
9981         unlock_user_struct(target_header, arg1, 1);
9982 
9983         if (arg2) {
9984             if (num == TARGET_NR_capget) {
9985                 for (i = 0; i < data_items; i++) {
9986                     target_data[i].effective = tswap32(data[i].effective);
9987                     target_data[i].permitted = tswap32(data[i].permitted);
9988                     target_data[i].inheritable = tswap32(data[i].inheritable);
9989                 }
9990                 unlock_user(target_data, arg2, target_datalen);
9991             } else {
9992                 unlock_user(target_data, arg2, 0);
9993             }
9994         }
9995         return ret;
9996     }
9997     case TARGET_NR_sigaltstack:
9998         return do_sigaltstack(arg1, arg2,
9999                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10000 
10001 #ifdef CONFIG_SENDFILE
10002 #ifdef TARGET_NR_sendfile
10003     case TARGET_NR_sendfile:
10004     {
10005         off_t *offp = NULL;
10006         off_t off;
10007         if (arg3) {
10008             ret = get_user_sal(off, arg3);
10009             if (is_error(ret)) {
10010                 return ret;
10011             }
10012             offp = &off;
10013         }
10014         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10015         if (!is_error(ret) && arg3) {
10016             abi_long ret2 = put_user_sal(off, arg3);
10017             if (is_error(ret2)) {
10018                 ret = ret2;
10019             }
10020         }
10021         return ret;
10022     }
10023 #endif
10024 #ifdef TARGET_NR_sendfile64
10025     case TARGET_NR_sendfile64:
10026     {
10027         off_t *offp = NULL;
10028         off_t off;
10029         if (arg3) {
10030             ret = get_user_s64(off, arg3);
10031             if (is_error(ret)) {
10032                 return ret;
10033             }
10034             offp = &off;
10035         }
10036         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10037         if (!is_error(ret) && arg3) {
10038             abi_long ret2 = put_user_s64(off, arg3);
10039             if (is_error(ret2)) {
10040                 ret = ret2;
10041             }
10042         }
10043         return ret;
10044     }
10045 #endif
10046 #endif
10047 #ifdef TARGET_NR_vfork
10048     case TARGET_NR_vfork:
10049         return get_errno(do_fork(cpu_env,
10050                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10051                          0, 0, 0, 0));
10052 #endif
10053 #ifdef TARGET_NR_ugetrlimit
10054     case TARGET_NR_ugetrlimit:
10055     {
10056 	struct rlimit rlim;
10057 	int resource = target_to_host_resource(arg1);
10058 	ret = get_errno(getrlimit(resource, &rlim));
10059 	if (!is_error(ret)) {
10060 	    struct target_rlimit *target_rlim;
10061             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10062                 return -TARGET_EFAULT;
10063 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10064 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10065             unlock_user_struct(target_rlim, arg2, 1);
10066 	}
10067         return ret;
10068     }
10069 #endif
10070 #ifdef TARGET_NR_truncate64
10071     case TARGET_NR_truncate64:
10072         if (!(p = lock_user_string(arg1)))
10073             return -TARGET_EFAULT;
10074 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10075         unlock_user(p, arg1, 0);
10076         return ret;
10077 #endif
10078 #ifdef TARGET_NR_ftruncate64
10079     case TARGET_NR_ftruncate64:
10080         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10081 #endif
10082 #ifdef TARGET_NR_stat64
10083     case TARGET_NR_stat64:
10084         if (!(p = lock_user_string(arg1))) {
10085             return -TARGET_EFAULT;
10086         }
10087         ret = get_errno(stat(path(p), &st));
10088         unlock_user(p, arg1, 0);
10089         if (!is_error(ret))
10090             ret = host_to_target_stat64(cpu_env, arg2, &st);
10091         return ret;
10092 #endif
10093 #ifdef TARGET_NR_lstat64
10094     case TARGET_NR_lstat64:
10095         if (!(p = lock_user_string(arg1))) {
10096             return -TARGET_EFAULT;
10097         }
10098         ret = get_errno(lstat(path(p), &st));
10099         unlock_user(p, arg1, 0);
10100         if (!is_error(ret))
10101             ret = host_to_target_stat64(cpu_env, arg2, &st);
10102         return ret;
10103 #endif
10104 #ifdef TARGET_NR_fstat64
10105     case TARGET_NR_fstat64:
10106         ret = get_errno(fstat(arg1, &st));
10107         if (!is_error(ret))
10108             ret = host_to_target_stat64(cpu_env, arg2, &st);
10109         return ret;
10110 #endif
10111 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10112 #ifdef TARGET_NR_fstatat64
10113     case TARGET_NR_fstatat64:
10114 #endif
10115 #ifdef TARGET_NR_newfstatat
10116     case TARGET_NR_newfstatat:
10117 #endif
10118         if (!(p = lock_user_string(arg2))) {
10119             return -TARGET_EFAULT;
10120         }
10121         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10122         unlock_user(p, arg2, 0);
10123         if (!is_error(ret))
10124             ret = host_to_target_stat64(cpu_env, arg3, &st);
10125         return ret;
10126 #endif
10127 #ifdef TARGET_NR_lchown
10128     case TARGET_NR_lchown:
10129         if (!(p = lock_user_string(arg1)))
10130             return -TARGET_EFAULT;
10131         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10132         unlock_user(p, arg1, 0);
10133         return ret;
10134 #endif
10135 #ifdef TARGET_NR_getuid
10136     case TARGET_NR_getuid:
10137         return get_errno(high2lowuid(getuid()));
10138 #endif
10139 #ifdef TARGET_NR_getgid
10140     case TARGET_NR_getgid:
10141         return get_errno(high2lowgid(getgid()));
10142 #endif
10143 #ifdef TARGET_NR_geteuid
10144     case TARGET_NR_geteuid:
10145         return get_errno(high2lowuid(geteuid()));
10146 #endif
10147 #ifdef TARGET_NR_getegid
10148     case TARGET_NR_getegid:
10149         return get_errno(high2lowgid(getegid()));
10150 #endif
10151     case TARGET_NR_setreuid:
10152         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10153     case TARGET_NR_setregid:
10154         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10155     case TARGET_NR_getgroups:
10156         {
10157             int gidsetsize = arg1;
10158             target_id *target_grouplist;
10159             gid_t *grouplist;
10160             int i;
10161 
10162             grouplist = alloca(gidsetsize * sizeof(gid_t));
10163             ret = get_errno(getgroups(gidsetsize, grouplist));
10164             if (gidsetsize == 0)
10165                 return ret;
10166             if (!is_error(ret)) {
10167                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10168                 if (!target_grouplist)
10169                     return -TARGET_EFAULT;
10170                 for(i = 0;i < ret; i++)
10171                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10172                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10173             }
10174         }
10175         return ret;
10176     case TARGET_NR_setgroups:
10177         {
10178             int gidsetsize = arg1;
10179             target_id *target_grouplist;
10180             gid_t *grouplist = NULL;
10181             int i;
10182             if (gidsetsize) {
10183                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10184                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10185                 if (!target_grouplist) {
10186                     return -TARGET_EFAULT;
10187                 }
10188                 for (i = 0; i < gidsetsize; i++) {
10189                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10190                 }
10191                 unlock_user(target_grouplist, arg2, 0);
10192             }
10193             return get_errno(setgroups(gidsetsize, grouplist));
10194         }
10195     case TARGET_NR_fchown:
10196         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10197 #if defined(TARGET_NR_fchownat)
10198     case TARGET_NR_fchownat:
10199         if (!(p = lock_user_string(arg2)))
10200             return -TARGET_EFAULT;
10201         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10202                                  low2highgid(arg4), arg5));
10203         unlock_user(p, arg2, 0);
10204         return ret;
10205 #endif
10206 #ifdef TARGET_NR_setresuid
10207     case TARGET_NR_setresuid:
10208         return get_errno(sys_setresuid(low2highuid(arg1),
10209                                        low2highuid(arg2),
10210                                        low2highuid(arg3)));
10211 #endif
10212 #ifdef TARGET_NR_getresuid
10213     case TARGET_NR_getresuid:
10214         {
10215             uid_t ruid, euid, suid;
10216             ret = get_errno(getresuid(&ruid, &euid, &suid));
10217             if (!is_error(ret)) {
10218                 if (put_user_id(high2lowuid(ruid), arg1)
10219                     || put_user_id(high2lowuid(euid), arg2)
10220                     || put_user_id(high2lowuid(suid), arg3))
10221                     return -TARGET_EFAULT;
10222             }
10223         }
10224         return ret;
10225 #endif
10226 #ifdef TARGET_NR_getresgid
10227     case TARGET_NR_setresgid:
10228         return get_errno(sys_setresgid(low2highgid(arg1),
10229                                        low2highgid(arg2),
10230                                        low2highgid(arg3)));
10231 #endif
10232 #ifdef TARGET_NR_getresgid
10233     case TARGET_NR_getresgid:
10234         {
10235             gid_t rgid, egid, sgid;
10236             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10237             if (!is_error(ret)) {
10238                 if (put_user_id(high2lowgid(rgid), arg1)
10239                     || put_user_id(high2lowgid(egid), arg2)
10240                     || put_user_id(high2lowgid(sgid), arg3))
10241                     return -TARGET_EFAULT;
10242             }
10243         }
10244         return ret;
10245 #endif
10246 #ifdef TARGET_NR_chown
10247     case TARGET_NR_chown:
10248         if (!(p = lock_user_string(arg1)))
10249             return -TARGET_EFAULT;
10250         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10251         unlock_user(p, arg1, 0);
10252         return ret;
10253 #endif
10254     case TARGET_NR_setuid:
10255         return get_errno(sys_setuid(low2highuid(arg1)));
10256     case TARGET_NR_setgid:
10257         return get_errno(sys_setgid(low2highgid(arg1)));
10258     case TARGET_NR_setfsuid:
10259         return get_errno(setfsuid(arg1));
10260     case TARGET_NR_setfsgid:
10261         return get_errno(setfsgid(arg1));
10262 
10263 #ifdef TARGET_NR_lchown32
10264     case TARGET_NR_lchown32:
10265         if (!(p = lock_user_string(arg1)))
10266             return -TARGET_EFAULT;
10267         ret = get_errno(lchown(p, arg2, arg3));
10268         unlock_user(p, arg1, 0);
10269         return ret;
10270 #endif
10271 #ifdef TARGET_NR_getuid32
10272     case TARGET_NR_getuid32:
10273         return get_errno(getuid());
10274 #endif
10275 
10276 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10277    /* Alpha specific */
10278     case TARGET_NR_getxuid:
10279          {
10280             uid_t euid;
10281             euid=geteuid();
10282             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10283          }
10284         return get_errno(getuid());
10285 #endif
10286 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10287    /* Alpha specific */
10288     case TARGET_NR_getxgid:
10289          {
10290             uid_t egid;
10291             egid=getegid();
10292             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10293          }
10294         return get_errno(getgid());
10295 #endif
10296 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10297     /* Alpha specific */
10298     case TARGET_NR_osf_getsysinfo:
10299         ret = -TARGET_EOPNOTSUPP;
10300         switch (arg1) {
10301           case TARGET_GSI_IEEE_FP_CONTROL:
10302             {
10303                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10304                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10305 
10306                 swcr &= ~SWCR_STATUS_MASK;
10307                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10308 
10309                 if (put_user_u64 (swcr, arg2))
10310                         return -TARGET_EFAULT;
10311                 ret = 0;
10312             }
10313             break;
10314 
10315           /* case GSI_IEEE_STATE_AT_SIGNAL:
10316              -- Not implemented in linux kernel.
10317              case GSI_UACPROC:
10318              -- Retrieves current unaligned access state; not much used.
10319              case GSI_PROC_TYPE:
10320              -- Retrieves implver information; surely not used.
10321              case GSI_GET_HWRPB:
10322              -- Grabs a copy of the HWRPB; surely not used.
10323           */
10324         }
10325         return ret;
10326 #endif
10327 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10328     /* Alpha specific */
10329     case TARGET_NR_osf_setsysinfo:
10330         ret = -TARGET_EOPNOTSUPP;
10331         switch (arg1) {
10332           case TARGET_SSI_IEEE_FP_CONTROL:
10333             {
10334                 uint64_t swcr, fpcr;
10335 
10336                 if (get_user_u64 (swcr, arg2)) {
10337                     return -TARGET_EFAULT;
10338                 }
10339 
10340                 /*
10341                  * The kernel calls swcr_update_status to update the
10342                  * status bits from the fpcr at every point that it
10343                  * could be queried.  Therefore, we store the status
10344                  * bits only in FPCR.
10345                  */
10346                 ((CPUAlphaState *)cpu_env)->swcr
10347                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10348 
10349                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10350                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10351                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10352                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10353                 ret = 0;
10354             }
10355             break;
10356 
10357           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10358             {
10359                 uint64_t exc, fpcr, fex;
10360 
10361                 if (get_user_u64(exc, arg2)) {
10362                     return -TARGET_EFAULT;
10363                 }
10364                 exc &= SWCR_STATUS_MASK;
10365                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10366 
10367                 /* Old exceptions are not signaled.  */
10368                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10369                 fex = exc & ~fex;
10370                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10371                 fex &= ((CPUArchState *)cpu_env)->swcr;
10372 
10373                 /* Update the hardware fpcr.  */
10374                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10375                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10376 
10377                 if (fex) {
10378                     int si_code = TARGET_FPE_FLTUNK;
10379                     target_siginfo_t info;
10380 
10381                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10382                         si_code = TARGET_FPE_FLTUND;
10383                     }
10384                     if (fex & SWCR_TRAP_ENABLE_INE) {
10385                         si_code = TARGET_FPE_FLTRES;
10386                     }
10387                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10388                         si_code = TARGET_FPE_FLTUND;
10389                     }
10390                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10391                         si_code = TARGET_FPE_FLTOVF;
10392                     }
10393                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10394                         si_code = TARGET_FPE_FLTDIV;
10395                     }
10396                     if (fex & SWCR_TRAP_ENABLE_INV) {
10397                         si_code = TARGET_FPE_FLTINV;
10398                     }
10399 
10400                     info.si_signo = SIGFPE;
10401                     info.si_errno = 0;
10402                     info.si_code = si_code;
10403                     info._sifields._sigfault._addr
10404                         = ((CPUArchState *)cpu_env)->pc;
10405                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10406                                  QEMU_SI_FAULT, &info);
10407                 }
10408                 ret = 0;
10409             }
10410             break;
10411 
10412           /* case SSI_NVPAIRS:
10413              -- Used with SSIN_UACPROC to enable unaligned accesses.
10414              case SSI_IEEE_STATE_AT_SIGNAL:
10415              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10416              -- Not implemented in linux kernel
10417           */
10418         }
10419         return ret;
10420 #endif
10421 #ifdef TARGET_NR_osf_sigprocmask
10422     /* Alpha specific.  */
10423     case TARGET_NR_osf_sigprocmask:
10424         {
10425             abi_ulong mask;
10426             int how;
10427             sigset_t set, oldset;
10428 
10429             switch(arg1) {
10430             case TARGET_SIG_BLOCK:
10431                 how = SIG_BLOCK;
10432                 break;
10433             case TARGET_SIG_UNBLOCK:
10434                 how = SIG_UNBLOCK;
10435                 break;
10436             case TARGET_SIG_SETMASK:
10437                 how = SIG_SETMASK;
10438                 break;
10439             default:
10440                 return -TARGET_EINVAL;
10441             }
10442             mask = arg2;
10443             target_to_host_old_sigset(&set, &mask);
10444             ret = do_sigprocmask(how, &set, &oldset);
10445             if (!ret) {
10446                 host_to_target_old_sigset(&mask, &oldset);
10447                 ret = mask;
10448             }
10449         }
10450         return ret;
10451 #endif
10452 
10453 #ifdef TARGET_NR_getgid32
10454     case TARGET_NR_getgid32:
10455         return get_errno(getgid());
10456 #endif
10457 #ifdef TARGET_NR_geteuid32
10458     case TARGET_NR_geteuid32:
10459         return get_errno(geteuid());
10460 #endif
10461 #ifdef TARGET_NR_getegid32
10462     case TARGET_NR_getegid32:
10463         return get_errno(getegid());
10464 #endif
10465 #ifdef TARGET_NR_setreuid32
10466     case TARGET_NR_setreuid32:
10467         return get_errno(setreuid(arg1, arg2));
10468 #endif
10469 #ifdef TARGET_NR_setregid32
10470     case TARGET_NR_setregid32:
10471         return get_errno(setregid(arg1, arg2));
10472 #endif
10473 #ifdef TARGET_NR_getgroups32
10474     case TARGET_NR_getgroups32:
10475         {
10476             int gidsetsize = arg1;
10477             uint32_t *target_grouplist;
10478             gid_t *grouplist;
10479             int i;
10480 
10481             grouplist = alloca(gidsetsize * sizeof(gid_t));
10482             ret = get_errno(getgroups(gidsetsize, grouplist));
10483             if (gidsetsize == 0)
10484                 return ret;
10485             if (!is_error(ret)) {
10486                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10487                 if (!target_grouplist) {
10488                     return -TARGET_EFAULT;
10489                 }
10490                 for(i = 0;i < ret; i++)
10491                     target_grouplist[i] = tswap32(grouplist[i]);
10492                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10493             }
10494         }
10495         return ret;
10496 #endif
10497 #ifdef TARGET_NR_setgroups32
10498     case TARGET_NR_setgroups32:
10499         {
10500             int gidsetsize = arg1;
10501             uint32_t *target_grouplist;
10502             gid_t *grouplist;
10503             int i;
10504 
10505             grouplist = alloca(gidsetsize * sizeof(gid_t));
10506             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10507             if (!target_grouplist) {
10508                 return -TARGET_EFAULT;
10509             }
10510             for(i = 0;i < gidsetsize; i++)
10511                 grouplist[i] = tswap32(target_grouplist[i]);
10512             unlock_user(target_grouplist, arg2, 0);
10513             return get_errno(setgroups(gidsetsize, grouplist));
10514         }
10515 #endif
10516 #ifdef TARGET_NR_fchown32
10517     case TARGET_NR_fchown32:
10518         return get_errno(fchown(arg1, arg2, arg3));
10519 #endif
10520 #ifdef TARGET_NR_setresuid32
10521     case TARGET_NR_setresuid32:
10522         return get_errno(sys_setresuid(arg1, arg2, arg3));
10523 #endif
10524 #ifdef TARGET_NR_getresuid32
10525     case TARGET_NR_getresuid32:
10526         {
10527             uid_t ruid, euid, suid;
10528             ret = get_errno(getresuid(&ruid, &euid, &suid));
10529             if (!is_error(ret)) {
10530                 if (put_user_u32(ruid, arg1)
10531                     || put_user_u32(euid, arg2)
10532                     || put_user_u32(suid, arg3))
10533                     return -TARGET_EFAULT;
10534             }
10535         }
10536         return ret;
10537 #endif
10538 #ifdef TARGET_NR_setresgid32
10539     case TARGET_NR_setresgid32:
10540         return get_errno(sys_setresgid(arg1, arg2, arg3));
10541 #endif
10542 #ifdef TARGET_NR_getresgid32
10543     case TARGET_NR_getresgid32:
10544         {
10545             gid_t rgid, egid, sgid;
10546             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10547             if (!is_error(ret)) {
10548                 if (put_user_u32(rgid, arg1)
10549                     || put_user_u32(egid, arg2)
10550                     || put_user_u32(sgid, arg3))
10551                     return -TARGET_EFAULT;
10552             }
10553         }
10554         return ret;
10555 #endif
10556 #ifdef TARGET_NR_chown32
10557     case TARGET_NR_chown32:
10558         if (!(p = lock_user_string(arg1)))
10559             return -TARGET_EFAULT;
10560         ret = get_errno(chown(p, arg2, arg3));
10561         unlock_user(p, arg1, 0);
10562         return ret;
10563 #endif
10564 #ifdef TARGET_NR_setuid32
10565     case TARGET_NR_setuid32:
10566         return get_errno(sys_setuid(arg1));
10567 #endif
10568 #ifdef TARGET_NR_setgid32
10569     case TARGET_NR_setgid32:
10570         return get_errno(sys_setgid(arg1));
10571 #endif
10572 #ifdef TARGET_NR_setfsuid32
10573     case TARGET_NR_setfsuid32:
10574         return get_errno(setfsuid(arg1));
10575 #endif
10576 #ifdef TARGET_NR_setfsgid32
10577     case TARGET_NR_setfsgid32:
10578         return get_errno(setfsgid(arg1));
10579 #endif
10580 #ifdef TARGET_NR_mincore
10581     case TARGET_NR_mincore:
10582         {
10583             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10584             if (!a) {
10585                 return -TARGET_ENOMEM;
10586             }
10587             p = lock_user_string(arg3);
10588             if (!p) {
10589                 ret = -TARGET_EFAULT;
10590             } else {
10591                 ret = get_errno(mincore(a, arg2, p));
10592                 unlock_user(p, arg3, ret);
10593             }
10594             unlock_user(a, arg1, 0);
10595         }
10596         return ret;
10597 #endif
10598 #ifdef TARGET_NR_arm_fadvise64_64
10599     case TARGET_NR_arm_fadvise64_64:
10600         /* arm_fadvise64_64 looks like fadvise64_64 but
10601          * with different argument order: fd, advice, offset, len
10602          * rather than the usual fd, offset, len, advice.
10603          * Note that offset and len are both 64-bit so appear as
10604          * pairs of 32-bit registers.
10605          */
10606         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10607                             target_offset64(arg5, arg6), arg2);
10608         return -host_to_target_errno(ret);
10609 #endif
10610 
10611 #if TARGET_ABI_BITS == 32
10612 
10613 #ifdef TARGET_NR_fadvise64_64
10614     case TARGET_NR_fadvise64_64:
10615 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10616         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10617         ret = arg2;
10618         arg2 = arg3;
10619         arg3 = arg4;
10620         arg4 = arg5;
10621         arg5 = arg6;
10622         arg6 = ret;
10623 #else
10624         /* 6 args: fd, offset (high, low), len (high, low), advice */
10625         if (regpairs_aligned(cpu_env, num)) {
10626             /* offset is in (3,4), len in (5,6) and advice in 7 */
10627             arg2 = arg3;
10628             arg3 = arg4;
10629             arg4 = arg5;
10630             arg5 = arg6;
10631             arg6 = arg7;
10632         }
10633 #endif
10634         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10635                             target_offset64(arg4, arg5), arg6);
10636         return -host_to_target_errno(ret);
10637 #endif
10638 
10639 #ifdef TARGET_NR_fadvise64
10640     case TARGET_NR_fadvise64:
10641         /* 5 args: fd, offset (high, low), len, advice */
10642         if (regpairs_aligned(cpu_env, num)) {
10643             /* offset is in (3,4), len in 5 and advice in 6 */
10644             arg2 = arg3;
10645             arg3 = arg4;
10646             arg4 = arg5;
10647             arg5 = arg6;
10648         }
10649         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10650         return -host_to_target_errno(ret);
10651 #endif
10652 
10653 #else /* not a 32-bit ABI */
10654 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10655 #ifdef TARGET_NR_fadvise64_64
10656     case TARGET_NR_fadvise64_64:
10657 #endif
10658 #ifdef TARGET_NR_fadvise64
10659     case TARGET_NR_fadvise64:
10660 #endif
10661 #ifdef TARGET_S390X
10662         switch (arg4) {
10663         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10664         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10665         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10666         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10667         default: break;
10668         }
10669 #endif
10670         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10671 #endif
10672 #endif /* end of 64-bit ABI fadvise handling */
10673 
10674 #ifdef TARGET_NR_madvise
10675     case TARGET_NR_madvise:
10676         /* A straight passthrough may not be safe because qemu sometimes
10677            turns private file-backed mappings into anonymous mappings.
10678            This will break MADV_DONTNEED.
10679            This is a hint, so ignoring and returning success is ok.  */
10680         return 0;
10681 #endif
10682 #if TARGET_ABI_BITS == 32
10683     case TARGET_NR_fcntl64:
10684     {
10685 	int cmd;
10686 	struct flock64 fl;
10687         from_flock64_fn *copyfrom = copy_from_user_flock64;
10688         to_flock64_fn *copyto = copy_to_user_flock64;
10689 
10690 #ifdef TARGET_ARM
10691         if (!((CPUARMState *)cpu_env)->eabi) {
10692             copyfrom = copy_from_user_oabi_flock64;
10693             copyto = copy_to_user_oabi_flock64;
10694         }
10695 #endif
10696 
10697 	cmd = target_to_host_fcntl_cmd(arg2);
10698         if (cmd == -TARGET_EINVAL) {
10699             return cmd;
10700         }
10701 
10702         switch(arg2) {
10703         case TARGET_F_GETLK64:
10704             ret = copyfrom(&fl, arg3);
10705             if (ret) {
10706                 break;
10707             }
10708             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10709             if (ret == 0) {
10710                 ret = copyto(arg3, &fl);
10711             }
10712 	    break;
10713 
10714         case TARGET_F_SETLK64:
10715         case TARGET_F_SETLKW64:
10716             ret = copyfrom(&fl, arg3);
10717             if (ret) {
10718                 break;
10719             }
10720             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10721 	    break;
10722         default:
10723             ret = do_fcntl(arg1, arg2, arg3);
10724             break;
10725         }
10726         return ret;
10727     }
10728 #endif
10729 #ifdef TARGET_NR_cacheflush
10730     case TARGET_NR_cacheflush:
10731         /* self-modifying code is handled automatically, so nothing needed */
10732         return 0;
10733 #endif
10734 #ifdef TARGET_NR_getpagesize
10735     case TARGET_NR_getpagesize:
10736         return TARGET_PAGE_SIZE;
10737 #endif
10738     case TARGET_NR_gettid:
10739         return get_errno(sys_gettid());
10740 #ifdef TARGET_NR_readahead
10741     case TARGET_NR_readahead:
10742 #if TARGET_ABI_BITS == 32
10743         if (regpairs_aligned(cpu_env, num)) {
10744             arg2 = arg3;
10745             arg3 = arg4;
10746             arg4 = arg5;
10747         }
10748         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10749 #else
10750         ret = get_errno(readahead(arg1, arg2, arg3));
10751 #endif
10752         return ret;
10753 #endif
10754 #ifdef CONFIG_ATTR
10755 #ifdef TARGET_NR_setxattr
10756     case TARGET_NR_listxattr:
10757     case TARGET_NR_llistxattr:
10758     {
10759         void *p, *b = 0;
10760         if (arg2) {
10761             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10762             if (!b) {
10763                 return -TARGET_EFAULT;
10764             }
10765         }
10766         p = lock_user_string(arg1);
10767         if (p) {
10768             if (num == TARGET_NR_listxattr) {
10769                 ret = get_errno(listxattr(p, b, arg3));
10770             } else {
10771                 ret = get_errno(llistxattr(p, b, arg3));
10772             }
10773         } else {
10774             ret = -TARGET_EFAULT;
10775         }
10776         unlock_user(p, arg1, 0);
10777         unlock_user(b, arg2, arg3);
10778         return ret;
10779     }
10780     case TARGET_NR_flistxattr:
10781     {
10782         void *b = 0;
10783         if (arg2) {
10784             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10785             if (!b) {
10786                 return -TARGET_EFAULT;
10787             }
10788         }
10789         ret = get_errno(flistxattr(arg1, b, arg3));
10790         unlock_user(b, arg2, arg3);
10791         return ret;
10792     }
10793     case TARGET_NR_setxattr:
10794     case TARGET_NR_lsetxattr:
10795         {
10796             void *p, *n, *v = 0;
10797             if (arg3) {
10798                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10799                 if (!v) {
10800                     return -TARGET_EFAULT;
10801                 }
10802             }
10803             p = lock_user_string(arg1);
10804             n = lock_user_string(arg2);
10805             if (p && n) {
10806                 if (num == TARGET_NR_setxattr) {
10807                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10808                 } else {
10809                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10810                 }
10811             } else {
10812                 ret = -TARGET_EFAULT;
10813             }
10814             unlock_user(p, arg1, 0);
10815             unlock_user(n, arg2, 0);
10816             unlock_user(v, arg3, 0);
10817         }
10818         return ret;
10819     case TARGET_NR_fsetxattr:
10820         {
10821             void *n, *v = 0;
10822             if (arg3) {
10823                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10824                 if (!v) {
10825                     return -TARGET_EFAULT;
10826                 }
10827             }
10828             n = lock_user_string(arg2);
10829             if (n) {
10830                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10831             } else {
10832                 ret = -TARGET_EFAULT;
10833             }
10834             unlock_user(n, arg2, 0);
10835             unlock_user(v, arg3, 0);
10836         }
10837         return ret;
10838     case TARGET_NR_getxattr:
10839     case TARGET_NR_lgetxattr:
10840         {
10841             void *p, *n, *v = 0;
10842             if (arg3) {
10843                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10844                 if (!v) {
10845                     return -TARGET_EFAULT;
10846                 }
10847             }
10848             p = lock_user_string(arg1);
10849             n = lock_user_string(arg2);
10850             if (p && n) {
10851                 if (num == TARGET_NR_getxattr) {
10852                     ret = get_errno(getxattr(p, n, v, arg4));
10853                 } else {
10854                     ret = get_errno(lgetxattr(p, n, v, arg4));
10855                 }
10856             } else {
10857                 ret = -TARGET_EFAULT;
10858             }
10859             unlock_user(p, arg1, 0);
10860             unlock_user(n, arg2, 0);
10861             unlock_user(v, arg3, arg4);
10862         }
10863         return ret;
10864     case TARGET_NR_fgetxattr:
10865         {
10866             void *n, *v = 0;
10867             if (arg3) {
10868                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10869                 if (!v) {
10870                     return -TARGET_EFAULT;
10871                 }
10872             }
10873             n = lock_user_string(arg2);
10874             if (n) {
10875                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10876             } else {
10877                 ret = -TARGET_EFAULT;
10878             }
10879             unlock_user(n, arg2, 0);
10880             unlock_user(v, arg3, arg4);
10881         }
10882         return ret;
10883     case TARGET_NR_removexattr:
10884     case TARGET_NR_lremovexattr:
10885         {
10886             void *p, *n;
10887             p = lock_user_string(arg1);
10888             n = lock_user_string(arg2);
10889             if (p && n) {
10890                 if (num == TARGET_NR_removexattr) {
10891                     ret = get_errno(removexattr(p, n));
10892                 } else {
10893                     ret = get_errno(lremovexattr(p, n));
10894                 }
10895             } else {
10896                 ret = -TARGET_EFAULT;
10897             }
10898             unlock_user(p, arg1, 0);
10899             unlock_user(n, arg2, 0);
10900         }
10901         return ret;
10902     case TARGET_NR_fremovexattr:
10903         {
10904             void *n;
10905             n = lock_user_string(arg2);
10906             if (n) {
10907                 ret = get_errno(fremovexattr(arg1, n));
10908             } else {
10909                 ret = -TARGET_EFAULT;
10910             }
10911             unlock_user(n, arg2, 0);
10912         }
10913         return ret;
10914 #endif
10915 #endif /* CONFIG_ATTR */
10916 #ifdef TARGET_NR_set_thread_area
10917     case TARGET_NR_set_thread_area:
10918 #if defined(TARGET_MIPS)
10919       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10920       return 0;
10921 #elif defined(TARGET_CRIS)
10922       if (arg1 & 0xff)
10923           ret = -TARGET_EINVAL;
10924       else {
10925           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10926           ret = 0;
10927       }
10928       return ret;
10929 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10930       return do_set_thread_area(cpu_env, arg1);
10931 #elif defined(TARGET_M68K)
10932       {
10933           TaskState *ts = cpu->opaque;
10934           ts->tp_value = arg1;
10935           return 0;
10936       }
10937 #else
10938       return -TARGET_ENOSYS;
10939 #endif
10940 #endif
10941 #ifdef TARGET_NR_get_thread_area
10942     case TARGET_NR_get_thread_area:
10943 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10944         return do_get_thread_area(cpu_env, arg1);
10945 #elif defined(TARGET_M68K)
10946         {
10947             TaskState *ts = cpu->opaque;
10948             return ts->tp_value;
10949         }
10950 #else
10951         return -TARGET_ENOSYS;
10952 #endif
10953 #endif
10954 #ifdef TARGET_NR_getdomainname
10955     case TARGET_NR_getdomainname:
10956         return -TARGET_ENOSYS;
10957 #endif
10958 
10959 #ifdef TARGET_NR_clock_settime
10960     case TARGET_NR_clock_settime:
10961     {
10962         struct timespec ts;
10963 
10964         ret = target_to_host_timespec(&ts, arg2);
10965         if (!is_error(ret)) {
10966             ret = get_errno(clock_settime(arg1, &ts));
10967         }
10968         return ret;
10969     }
10970 #endif
10971 #ifdef TARGET_NR_clock_gettime
10972     case TARGET_NR_clock_gettime:
10973     {
10974         struct timespec ts;
10975         ret = get_errno(clock_gettime(arg1, &ts));
10976         if (!is_error(ret)) {
10977             ret = host_to_target_timespec(arg2, &ts);
10978         }
10979         return ret;
10980     }
10981 #endif
10982 #ifdef TARGET_NR_clock_getres
10983     case TARGET_NR_clock_getres:
10984     {
10985         struct timespec ts;
10986         ret = get_errno(clock_getres(arg1, &ts));
10987         if (!is_error(ret)) {
10988             host_to_target_timespec(arg2, &ts);
10989         }
10990         return ret;
10991     }
10992 #endif
10993 #ifdef TARGET_NR_clock_nanosleep
10994     case TARGET_NR_clock_nanosleep:
10995     {
10996         struct timespec ts;
10997         target_to_host_timespec(&ts, arg3);
10998         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10999                                              &ts, arg4 ? &ts : NULL));
11000         if (arg4)
11001             host_to_target_timespec(arg4, &ts);
11002 
11003 #if defined(TARGET_PPC)
11004         /* clock_nanosleep is odd in that it returns positive errno values.
11005          * On PPC, CR0 bit 3 should be set in such a situation. */
11006         if (ret && ret != -TARGET_ERESTARTSYS) {
11007             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11008         }
11009 #endif
11010         return ret;
11011     }
11012 #endif
11013 
11014 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11015     case TARGET_NR_set_tid_address:
11016         return get_errno(set_tid_address((int *)g2h(arg1)));
11017 #endif
11018 
11019     case TARGET_NR_tkill:
11020         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11021 
11022     case TARGET_NR_tgkill:
11023         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11024                          target_to_host_signal(arg3)));
11025 
11026 #ifdef TARGET_NR_set_robust_list
11027     case TARGET_NR_set_robust_list:
11028     case TARGET_NR_get_robust_list:
11029         /* The ABI for supporting robust futexes has userspace pass
11030          * the kernel a pointer to a linked list which is updated by
11031          * userspace after the syscall; the list is walked by the kernel
11032          * when the thread exits. Since the linked list in QEMU guest
11033          * memory isn't a valid linked list for the host and we have
11034          * no way to reliably intercept the thread-death event, we can't
11035          * support these. Silently return ENOSYS so that guest userspace
11036          * falls back to a non-robust futex implementation (which should
11037          * be OK except in the corner case of the guest crashing while
11038          * holding a mutex that is shared with another process via
11039          * shared memory).
11040          */
11041         return -TARGET_ENOSYS;
11042 #endif
11043 
11044 #if defined(TARGET_NR_utimensat)
11045     case TARGET_NR_utimensat:
11046         {
11047             struct timespec *tsp, ts[2];
11048             if (!arg3) {
11049                 tsp = NULL;
11050             } else {
11051                 target_to_host_timespec(ts, arg3);
11052                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11053                 tsp = ts;
11054             }
11055             if (!arg2)
11056                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11057             else {
11058                 if (!(p = lock_user_string(arg2))) {
11059                     return -TARGET_EFAULT;
11060                 }
11061                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11062                 unlock_user(p, arg2, 0);
11063             }
11064         }
11065         return ret;
11066 #endif
11067     case TARGET_NR_futex:
11068         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11069 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11070     case TARGET_NR_inotify_init:
11071         ret = get_errno(sys_inotify_init());
11072         if (ret >= 0) {
11073             fd_trans_register(ret, &target_inotify_trans);
11074         }
11075         return ret;
11076 #endif
11077 #ifdef CONFIG_INOTIFY1
11078 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11079     case TARGET_NR_inotify_init1:
11080         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11081                                           fcntl_flags_tbl)));
11082         if (ret >= 0) {
11083             fd_trans_register(ret, &target_inotify_trans);
11084         }
11085         return ret;
11086 #endif
11087 #endif
11088 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11089     case TARGET_NR_inotify_add_watch:
11090         p = lock_user_string(arg2);
11091         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11092         unlock_user(p, arg2, 0);
11093         return ret;
11094 #endif
11095 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11096     case TARGET_NR_inotify_rm_watch:
11097         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11098 #endif
11099 
11100 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11101     case TARGET_NR_mq_open:
11102         {
11103             struct mq_attr posix_mq_attr;
11104             struct mq_attr *pposix_mq_attr;
11105             int host_flags;
11106 
11107             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11108             pposix_mq_attr = NULL;
11109             if (arg4) {
11110                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11111                     return -TARGET_EFAULT;
11112                 }
11113                 pposix_mq_attr = &posix_mq_attr;
11114             }
11115             p = lock_user_string(arg1 - 1);
11116             if (!p) {
11117                 return -TARGET_EFAULT;
11118             }
11119             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11120             unlock_user (p, arg1, 0);
11121         }
11122         return ret;
11123 
11124     case TARGET_NR_mq_unlink:
11125         p = lock_user_string(arg1 - 1);
11126         if (!p) {
11127             return -TARGET_EFAULT;
11128         }
11129         ret = get_errno(mq_unlink(p));
11130         unlock_user (p, arg1, 0);
11131         return ret;
11132 
11133     case TARGET_NR_mq_timedsend:
11134         {
11135             struct timespec ts;
11136 
11137             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11138             if (arg5 != 0) {
11139                 target_to_host_timespec(&ts, arg5);
11140                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11141                 host_to_target_timespec(arg5, &ts);
11142             } else {
11143                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11144             }
11145             unlock_user (p, arg2, arg3);
11146         }
11147         return ret;
11148 
11149     case TARGET_NR_mq_timedreceive:
11150         {
11151             struct timespec ts;
11152             unsigned int prio;
11153 
11154             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11155             if (arg5 != 0) {
11156                 target_to_host_timespec(&ts, arg5);
11157                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11158                                                      &prio, &ts));
11159                 host_to_target_timespec(arg5, &ts);
11160             } else {
11161                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11162                                                      &prio, NULL));
11163             }
11164             unlock_user (p, arg2, arg3);
11165             if (arg4 != 0)
11166                 put_user_u32(prio, arg4);
11167         }
11168         return ret;
11169 
11170     /* Not implemented for now... */
11171 /*     case TARGET_NR_mq_notify: */
11172 /*         break; */
11173 
11174     case TARGET_NR_mq_getsetattr:
11175         {
11176             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11177             ret = 0;
11178             if (arg2 != 0) {
11179                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11180                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11181                                            &posix_mq_attr_out));
11182             } else if (arg3 != 0) {
11183                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11184             }
11185             if (ret == 0 && arg3 != 0) {
11186                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11187             }
11188         }
11189         return ret;
11190 #endif
11191 
11192 #ifdef CONFIG_SPLICE
11193 #ifdef TARGET_NR_tee
11194     case TARGET_NR_tee:
11195         {
11196             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11197         }
11198         return ret;
11199 #endif
11200 #ifdef TARGET_NR_splice
11201     case TARGET_NR_splice:
11202         {
11203             loff_t loff_in, loff_out;
11204             loff_t *ploff_in = NULL, *ploff_out = NULL;
11205             if (arg2) {
11206                 if (get_user_u64(loff_in, arg2)) {
11207                     return -TARGET_EFAULT;
11208                 }
11209                 ploff_in = &loff_in;
11210             }
11211             if (arg4) {
11212                 if (get_user_u64(loff_out, arg4)) {
11213                     return -TARGET_EFAULT;
11214                 }
11215                 ploff_out = &loff_out;
11216             }
11217             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11218             if (arg2) {
11219                 if (put_user_u64(loff_in, arg2)) {
11220                     return -TARGET_EFAULT;
11221                 }
11222             }
11223             if (arg4) {
11224                 if (put_user_u64(loff_out, arg4)) {
11225                     return -TARGET_EFAULT;
11226                 }
11227             }
11228         }
11229         return ret;
11230 #endif
11231 #ifdef TARGET_NR_vmsplice
11232 	case TARGET_NR_vmsplice:
11233         {
11234             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11235             if (vec != NULL) {
11236                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11237                 unlock_iovec(vec, arg2, arg3, 0);
11238             } else {
11239                 ret = -host_to_target_errno(errno);
11240             }
11241         }
11242         return ret;
11243 #endif
11244 #endif /* CONFIG_SPLICE */
11245 #ifdef CONFIG_EVENTFD
11246 #if defined(TARGET_NR_eventfd)
11247     case TARGET_NR_eventfd:
11248         ret = get_errno(eventfd(arg1, 0));
11249         if (ret >= 0) {
11250             fd_trans_register(ret, &target_eventfd_trans);
11251         }
11252         return ret;
11253 #endif
11254 #if defined(TARGET_NR_eventfd2)
11255     case TARGET_NR_eventfd2:
11256     {
11257         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11258         if (arg2 & TARGET_O_NONBLOCK) {
11259             host_flags |= O_NONBLOCK;
11260         }
11261         if (arg2 & TARGET_O_CLOEXEC) {
11262             host_flags |= O_CLOEXEC;
11263         }
11264         ret = get_errno(eventfd(arg1, host_flags));
11265         if (ret >= 0) {
11266             fd_trans_register(ret, &target_eventfd_trans);
11267         }
11268         return ret;
11269     }
11270 #endif
11271 #endif /* CONFIG_EVENTFD  */
11272 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11273     case TARGET_NR_fallocate:
11274 #if TARGET_ABI_BITS == 32
11275         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11276                                   target_offset64(arg5, arg6)));
11277 #else
11278         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11279 #endif
11280         return ret;
11281 #endif
11282 #if defined(CONFIG_SYNC_FILE_RANGE)
11283 #if defined(TARGET_NR_sync_file_range)
11284     case TARGET_NR_sync_file_range:
11285 #if TARGET_ABI_BITS == 32
11286 #if defined(TARGET_MIPS)
11287         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11288                                         target_offset64(arg5, arg6), arg7));
11289 #else
11290         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11291                                         target_offset64(arg4, arg5), arg6));
11292 #endif /* !TARGET_MIPS */
11293 #else
11294         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11295 #endif
11296         return ret;
11297 #endif
11298 #if defined(TARGET_NR_sync_file_range2)
11299     case TARGET_NR_sync_file_range2:
11300         /* This is like sync_file_range but the arguments are reordered */
11301 #if TARGET_ABI_BITS == 32
11302         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11303                                         target_offset64(arg5, arg6), arg2));
11304 #else
11305         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11306 #endif
11307         return ret;
11308 #endif
11309 #endif
11310 #if defined(TARGET_NR_signalfd4)
11311     case TARGET_NR_signalfd4:
11312         return do_signalfd4(arg1, arg2, arg4);
11313 #endif
11314 #if defined(TARGET_NR_signalfd)
11315     case TARGET_NR_signalfd:
11316         return do_signalfd4(arg1, arg2, 0);
11317 #endif
11318 #if defined(CONFIG_EPOLL)
11319 #if defined(TARGET_NR_epoll_create)
11320     case TARGET_NR_epoll_create:
11321         return get_errno(epoll_create(arg1));
11322 #endif
11323 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11324     case TARGET_NR_epoll_create1:
11325         return get_errno(epoll_create1(arg1));
11326 #endif
11327 #if defined(TARGET_NR_epoll_ctl)
11328     case TARGET_NR_epoll_ctl:
11329     {
11330         struct epoll_event ep;
11331         struct epoll_event *epp = 0;
11332         if (arg4) {
11333             struct target_epoll_event *target_ep;
11334             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11335                 return -TARGET_EFAULT;
11336             }
11337             ep.events = tswap32(target_ep->events);
11338             /* The epoll_data_t union is just opaque data to the kernel,
11339              * so we transfer all 64 bits across and need not worry what
11340              * actual data type it is.
11341              */
11342             ep.data.u64 = tswap64(target_ep->data.u64);
11343             unlock_user_struct(target_ep, arg4, 0);
11344             epp = &ep;
11345         }
11346         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11347     }
11348 #endif
11349 
11350 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11351 #if defined(TARGET_NR_epoll_wait)
11352     case TARGET_NR_epoll_wait:
11353 #endif
11354 #if defined(TARGET_NR_epoll_pwait)
11355     case TARGET_NR_epoll_pwait:
11356 #endif
11357     {
11358         struct target_epoll_event *target_ep;
11359         struct epoll_event *ep;
11360         int epfd = arg1;
11361         int maxevents = arg3;
11362         int timeout = arg4;
11363 
11364         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11365             return -TARGET_EINVAL;
11366         }
11367 
11368         target_ep = lock_user(VERIFY_WRITE, arg2,
11369                               maxevents * sizeof(struct target_epoll_event), 1);
11370         if (!target_ep) {
11371             return -TARGET_EFAULT;
11372         }
11373 
11374         ep = g_try_new(struct epoll_event, maxevents);
11375         if (!ep) {
11376             unlock_user(target_ep, arg2, 0);
11377             return -TARGET_ENOMEM;
11378         }
11379 
11380         switch (num) {
11381 #if defined(TARGET_NR_epoll_pwait)
11382         case TARGET_NR_epoll_pwait:
11383         {
11384             target_sigset_t *target_set;
11385             sigset_t _set, *set = &_set;
11386 
11387             if (arg5) {
11388                 if (arg6 != sizeof(target_sigset_t)) {
11389                     ret = -TARGET_EINVAL;
11390                     break;
11391                 }
11392 
11393                 target_set = lock_user(VERIFY_READ, arg5,
11394                                        sizeof(target_sigset_t), 1);
11395                 if (!target_set) {
11396                     ret = -TARGET_EFAULT;
11397                     break;
11398                 }
11399                 target_to_host_sigset(set, target_set);
11400                 unlock_user(target_set, arg5, 0);
11401             } else {
11402                 set = NULL;
11403             }
11404 
11405             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11406                                              set, SIGSET_T_SIZE));
11407             break;
11408         }
11409 #endif
11410 #if defined(TARGET_NR_epoll_wait)
11411         case TARGET_NR_epoll_wait:
11412             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11413                                              NULL, 0));
11414             break;
11415 #endif
11416         default:
11417             ret = -TARGET_ENOSYS;
11418         }
11419         if (!is_error(ret)) {
11420             int i;
11421             for (i = 0; i < ret; i++) {
11422                 target_ep[i].events = tswap32(ep[i].events);
11423                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11424             }
11425             unlock_user(target_ep, arg2,
11426                         ret * sizeof(struct target_epoll_event));
11427         } else {
11428             unlock_user(target_ep, arg2, 0);
11429         }
11430         g_free(ep);
11431         return ret;
11432     }
11433 #endif
11434 #endif
11435 #ifdef TARGET_NR_prlimit64
11436     case TARGET_NR_prlimit64:
11437     {
11438         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11439         struct target_rlimit64 *target_rnew, *target_rold;
11440         struct host_rlimit64 rnew, rold, *rnewp = 0;
11441         int resource = target_to_host_resource(arg2);
11442         if (arg3) {
11443             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11444                 return -TARGET_EFAULT;
11445             }
11446             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11447             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11448             unlock_user_struct(target_rnew, arg3, 0);
11449             rnewp = &rnew;
11450         }
11451 
11452         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11453         if (!is_error(ret) && arg4) {
11454             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11455                 return -TARGET_EFAULT;
11456             }
11457             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11458             target_rold->rlim_max = tswap64(rold.rlim_max);
11459             unlock_user_struct(target_rold, arg4, 1);
11460         }
11461         return ret;
11462     }
11463 #endif
11464 #ifdef TARGET_NR_gethostname
11465     case TARGET_NR_gethostname:
11466     {
11467         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11468         if (name) {
11469             ret = get_errno(gethostname(name, arg2));
11470             unlock_user(name, arg1, arg2);
11471         } else {
11472             ret = -TARGET_EFAULT;
11473         }
11474         return ret;
11475     }
11476 #endif
11477 #ifdef TARGET_NR_atomic_cmpxchg_32
11478     case TARGET_NR_atomic_cmpxchg_32:
11479     {
11480         /* should use start_exclusive from main.c */
11481         abi_ulong mem_value;
11482         if (get_user_u32(mem_value, arg6)) {
11483             target_siginfo_t info;
11484             info.si_signo = SIGSEGV;
11485             info.si_errno = 0;
11486             info.si_code = TARGET_SEGV_MAPERR;
11487             info._sifields._sigfault._addr = arg6;
11488             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11489                          QEMU_SI_FAULT, &info);
11490             ret = 0xdeadbeef;
11491 
11492         }
11493         if (mem_value == arg2)
11494             put_user_u32(arg1, arg6);
11495         return mem_value;
11496     }
11497 #endif
11498 #ifdef TARGET_NR_atomic_barrier
11499     case TARGET_NR_atomic_barrier:
11500         /* Like the kernel implementation and the
11501            qemu arm barrier, no-op this? */
11502         return 0;
11503 #endif
11504 
11505 #ifdef TARGET_NR_timer_create
11506     case TARGET_NR_timer_create:
11507     {
11508         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11509 
11510         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11511 
11512         int clkid = arg1;
11513         int timer_index = next_free_host_timer();
11514 
11515         if (timer_index < 0) {
11516             ret = -TARGET_EAGAIN;
11517         } else {
11518             timer_t *phtimer = g_posix_timers  + timer_index;
11519 
11520             if (arg2) {
11521                 phost_sevp = &host_sevp;
11522                 ret = target_to_host_sigevent(phost_sevp, arg2);
11523                 if (ret != 0) {
11524                     return ret;
11525                 }
11526             }
11527 
11528             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11529             if (ret) {
11530                 phtimer = NULL;
11531             } else {
11532                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11533                     return -TARGET_EFAULT;
11534                 }
11535             }
11536         }
11537         return ret;
11538     }
11539 #endif
11540 
11541 #ifdef TARGET_NR_timer_settime
11542     case TARGET_NR_timer_settime:
11543     {
11544         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11545          * struct itimerspec * old_value */
11546         target_timer_t timerid = get_timer_id(arg1);
11547 
11548         if (timerid < 0) {
11549             ret = timerid;
11550         } else if (arg3 == 0) {
11551             ret = -TARGET_EINVAL;
11552         } else {
11553             timer_t htimer = g_posix_timers[timerid];
11554             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11555 
11556             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11557                 return -TARGET_EFAULT;
11558             }
11559             ret = get_errno(
11560                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11561             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11562                 return -TARGET_EFAULT;
11563             }
11564         }
11565         return ret;
11566     }
11567 #endif
11568 
11569 #ifdef TARGET_NR_timer_gettime
11570     case TARGET_NR_timer_gettime:
11571     {
11572         /* args: timer_t timerid, struct itimerspec *curr_value */
11573         target_timer_t timerid = get_timer_id(arg1);
11574 
11575         if (timerid < 0) {
11576             ret = timerid;
11577         } else if (!arg2) {
11578             ret = -TARGET_EFAULT;
11579         } else {
11580             timer_t htimer = g_posix_timers[timerid];
11581             struct itimerspec hspec;
11582             ret = get_errno(timer_gettime(htimer, &hspec));
11583 
11584             if (host_to_target_itimerspec(arg2, &hspec)) {
11585                 ret = -TARGET_EFAULT;
11586             }
11587         }
11588         return ret;
11589     }
11590 #endif
11591 
11592 #ifdef TARGET_NR_timer_getoverrun
11593     case TARGET_NR_timer_getoverrun:
11594     {
11595         /* args: timer_t timerid */
11596         target_timer_t timerid = get_timer_id(arg1);
11597 
11598         if (timerid < 0) {
11599             ret = timerid;
11600         } else {
11601             timer_t htimer = g_posix_timers[timerid];
11602             ret = get_errno(timer_getoverrun(htimer));
11603         }
11604         fd_trans_unregister(ret);
11605         return ret;
11606     }
11607 #endif
11608 
11609 #ifdef TARGET_NR_timer_delete
11610     case TARGET_NR_timer_delete:
11611     {
11612         /* args: timer_t timerid */
11613         target_timer_t timerid = get_timer_id(arg1);
11614 
11615         if (timerid < 0) {
11616             ret = timerid;
11617         } else {
11618             timer_t htimer = g_posix_timers[timerid];
11619             ret = get_errno(timer_delete(htimer));
11620             g_posix_timers[timerid] = 0;
11621         }
11622         return ret;
11623     }
11624 #endif
11625 
11626 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11627     case TARGET_NR_timerfd_create:
11628         return get_errno(timerfd_create(arg1,
11629                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11630 #endif
11631 
11632 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11633     case TARGET_NR_timerfd_gettime:
11634         {
11635             struct itimerspec its_curr;
11636 
11637             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11638 
11639             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11640                 return -TARGET_EFAULT;
11641             }
11642         }
11643         return ret;
11644 #endif
11645 
11646 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11647     case TARGET_NR_timerfd_settime:
11648         {
11649             struct itimerspec its_new, its_old, *p_new;
11650 
11651             if (arg3) {
11652                 if (target_to_host_itimerspec(&its_new, arg3)) {
11653                     return -TARGET_EFAULT;
11654                 }
11655                 p_new = &its_new;
11656             } else {
11657                 p_new = NULL;
11658             }
11659 
11660             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11661 
11662             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11663                 return -TARGET_EFAULT;
11664             }
11665         }
11666         return ret;
11667 #endif
11668 
11669 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11670     case TARGET_NR_ioprio_get:
11671         return get_errno(ioprio_get(arg1, arg2));
11672 #endif
11673 
11674 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11675     case TARGET_NR_ioprio_set:
11676         return get_errno(ioprio_set(arg1, arg2, arg3));
11677 #endif
11678 
11679 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11680     case TARGET_NR_setns:
11681         return get_errno(setns(arg1, arg2));
11682 #endif
11683 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11684     case TARGET_NR_unshare:
11685         return get_errno(unshare(arg1));
11686 #endif
11687 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11688     case TARGET_NR_kcmp:
11689         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11690 #endif
11691 #ifdef TARGET_NR_swapcontext
11692     case TARGET_NR_swapcontext:
11693         /* PowerPC specific.  */
11694         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11695 #endif
11696 
11697     default:
11698         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11699         return -TARGET_ENOSYS;
11700     }
11701     return ret;
11702 }
11703 
11704 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11705                     abi_long arg2, abi_long arg3, abi_long arg4,
11706                     abi_long arg5, abi_long arg6, abi_long arg7,
11707                     abi_long arg8)
11708 {
11709     CPUState *cpu = ENV_GET_CPU(cpu_env);
11710     abi_long ret;
11711 
11712 #ifdef DEBUG_ERESTARTSYS
11713     /* Debug-only code for exercising the syscall-restart code paths
11714      * in the per-architecture cpu main loops: restart every syscall
11715      * the guest makes once before letting it through.
11716      */
11717     {
11718         static bool flag;
11719         flag = !flag;
11720         if (flag) {
11721             return -TARGET_ERESTARTSYS;
11722         }
11723     }
11724 #endif
11725 
11726     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11727                              arg5, arg6, arg7, arg8);
11728 
11729     if (unlikely(do_strace)) {
11730         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11731         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11732                           arg5, arg6, arg7, arg8);
11733         print_syscall_ret(num, ret);
11734     } else {
11735         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11736                           arg5, arg6, arg7, arg8);
11737     }
11738 
11739     trace_guest_user_syscall_ret(cpu, num, ret);
11740     return ret;
11741 }
11742