xref: /openbmc/qemu/linux-user/syscall.c (revision 275307aa)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #ifdef CONFIG_TIMERFD
59 #include <sys/timerfd.h>
60 #endif
61 #ifdef CONFIG_EVENTFD
62 #include <sys/eventfd.h>
63 #endif
64 #ifdef CONFIG_EPOLL
65 #include <sys/epoll.h>
66 #endif
67 #ifdef CONFIG_ATTR
68 #include "qemu/xattr.h"
69 #endif
70 #ifdef CONFIG_SENDFILE
71 #include <sys/sendfile.h>
72 #endif
73 
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
80 
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
86 #include <linux/kd.h>
87 #include <linux/mtio.h>
88 #include <linux/fs.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
91 #endif
92 #include <linux/fb.h>
93 #if defined(CONFIG_USBFS)
94 #include <linux/usbdevice_fs.h>
95 #include <linux/usb/ch9.h>
96 #endif
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include <linux/if_alg.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
113 
114 #ifndef CLONE_IO
115 #define CLONE_IO                0x80000000      /* Clone io context */
116 #endif
117 
118 /* We can't directly call the host clone syscall, because this will
119  * badly confuse libc (breaking mutexes, for example). So we must
120  * divide clone flags into:
121  *  * flag combinations that look like pthread_create()
122  *  * flag combinations that look like fork()
123  *  * flags we can implement within QEMU itself
124  *  * flags we can't support and will return an error for
125  */
126 /* For thread creation, all these flags must be present; for
127  * fork, none must be present.
128  */
129 #define CLONE_THREAD_FLAGS                              \
130     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
131      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
132 
133 /* These flags are ignored:
134  * CLONE_DETACHED is now ignored by the kernel;
135  * CLONE_IO is just an optimisation hint to the I/O scheduler
136  */
137 #define CLONE_IGNORED_FLAGS                     \
138     (CLONE_DETACHED | CLONE_IO)
139 
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS               \
142     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
143      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
144 
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
149 
150 #define CLONE_INVALID_FORK_FLAGS                                        \
151     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
152 
153 #define CLONE_INVALID_THREAD_FLAGS                                      \
154     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
155        CLONE_IGNORED_FLAGS))
156 
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158  * have almost all been allocated. We cannot support any of
159  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161  * The checks against the invalid thread masks above will catch these.
162  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
163  */
164 
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166  * once. This exercises the codepaths for restart.
167  */
168 //#define DEBUG_ERESTARTSYS
169 
170 //#include <linux/msdos_fs.h>
171 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
172 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
173 
174 #undef _syscall0
175 #undef _syscall1
176 #undef _syscall2
177 #undef _syscall3
178 #undef _syscall4
179 #undef _syscall5
180 #undef _syscall6
181 
182 #define _syscall0(type,name)		\
183 static type name (void)			\
184 {					\
185 	return syscall(__NR_##name);	\
186 }
187 
188 #define _syscall1(type,name,type1,arg1)		\
189 static type name (type1 arg1)			\
190 {						\
191 	return syscall(__NR_##name, arg1);	\
192 }
193 
194 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
195 static type name (type1 arg1,type2 arg2)		\
196 {							\
197 	return syscall(__NR_##name, arg1, arg2);	\
198 }
199 
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
201 static type name (type1 arg1,type2 arg2,type3 arg3)		\
202 {								\
203 	return syscall(__NR_##name, arg1, arg2, arg3);		\
204 }
205 
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
208 {										\
209 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
210 }
211 
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
213 		  type5,arg5)							\
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
215 {										\
216 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
217 }
218 
219 
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
221 		  type5,arg5,type6,arg6)					\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
223                   type6 arg6)							\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
226 }
227 
228 
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
241 
242 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
243 #define __NR__llseek __NR_lseek
244 #endif
245 
246 /* Newer kernel ports have llseek() instead of _llseek() */
247 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
248 #define TARGET_NR__llseek TARGET_NR_llseek
249 #endif
250 
251 #define __NR_sys_gettid __NR_gettid
252 _syscall0(int, sys_gettid)
253 
254 /* For the 64-bit guest on 32-bit host case we must emulate
255  * getdents using getdents64, because otherwise the host
256  * might hand us back more dirent records than we can fit
257  * into the guest buffer after structure format conversion.
258  * Otherwise we emulate getdents with getdents if the host has it.
259  */
260 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
261 #define EMULATE_GETDENTS_WITH_GETDENTS
262 #endif
263 
264 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #endif
267 #if (defined(TARGET_NR_getdents) && \
268       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
269     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
270 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
271 #endif
272 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
273 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
274           loff_t *, res, uint, wh);
275 #endif
276 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
277 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
278           siginfo_t *, uinfo)
279 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
280 #ifdef __NR_exit_group
281 _syscall1(int,exit_group,int,error_code)
282 #endif
283 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
284 _syscall1(int,set_tid_address,int *,tidptr)
285 #endif
286 #if defined(TARGET_NR_futex) && defined(__NR_futex)
287 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
288           const struct timespec *,timeout,int *,uaddr2,int,val3)
289 #endif
290 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
291 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
292           unsigned long *, user_mask_ptr);
293 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
294 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
295           unsigned long *, user_mask_ptr);
296 #define __NR_sys_getcpu __NR_getcpu
297 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
298 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
299           void *, arg);
300 _syscall2(int, capget, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 _syscall2(int, capset, struct __user_cap_header_struct *, header,
303           struct __user_cap_data_struct *, data);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get, int, which, int, who)
306 #endif
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
309 #endif
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
312 #endif
313 
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
316           unsigned long, idx1, unsigned long, idx2)
317 #endif
318 
319 static bitmask_transtbl fcntl_flags_tbl[] = {
320   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
321   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
322   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
323   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
324   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
325   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
326   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
327   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
328   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
329   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
330   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
331   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
332   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
333 #if defined(O_DIRECT)
334   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
335 #endif
336 #if defined(O_NOATIME)
337   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
338 #endif
339 #if defined(O_CLOEXEC)
340   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
341 #endif
342 #if defined(O_PATH)
343   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
344 #endif
345 #if defined(O_TMPFILE)
346   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
347 #endif
348   /* Don't terminate the list prematurely on 64-bit host+guest.  */
349 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
350   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
351 #endif
352   { 0, 0, 0, 0 }
353 };
354 
355 static int sys_getcwd1(char *buf, size_t size)
356 {
357   if (getcwd(buf, size) == NULL) {
358       /* getcwd() sets errno */
359       return (-1);
360   }
361   return strlen(buf)+1;
362 }
363 
364 #ifdef TARGET_NR_utimensat
365 #if defined(__NR_utimensat)
366 #define __NR_sys_utimensat __NR_utimensat
367 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
368           const struct timespec *,tsp,int,flags)
369 #else
370 static int sys_utimensat(int dirfd, const char *pathname,
371                          const struct timespec times[2], int flags)
372 {
373     errno = ENOSYS;
374     return -1;
375 }
376 #endif
377 #endif /* TARGET_NR_utimensat */
378 
379 #ifdef TARGET_NR_renameat2
380 #if defined(__NR_renameat2)
381 #define __NR_sys_renameat2 __NR_renameat2
382 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
383           const char *, new, unsigned int, flags)
384 #else
385 static int sys_renameat2(int oldfd, const char *old,
386                          int newfd, const char *new, int flags)
387 {
388     if (flags == 0) {
389         return renameat(oldfd, old, newfd, new);
390     }
391     errno = ENOSYS;
392     return -1;
393 }
394 #endif
395 #endif /* TARGET_NR_renameat2 */
396 
397 #ifdef CONFIG_INOTIFY
398 #include <sys/inotify.h>
399 
400 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
401 static int sys_inotify_init(void)
402 {
403   return (inotify_init());
404 }
405 #endif
406 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
407 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
408 {
409   return (inotify_add_watch(fd, pathname, mask));
410 }
411 #endif
412 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
413 static int sys_inotify_rm_watch(int fd, int32_t wd)
414 {
415   return (inotify_rm_watch(fd, wd));
416 }
417 #endif
418 #ifdef CONFIG_INOTIFY1
419 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
420 static int sys_inotify_init1(int flags)
421 {
422   return (inotify_init1(flags));
423 }
424 #endif
425 #endif
426 #else
427 /* Userspace can usually survive runtime without inotify */
428 #undef TARGET_NR_inotify_init
429 #undef TARGET_NR_inotify_init1
430 #undef TARGET_NR_inotify_add_watch
431 #undef TARGET_NR_inotify_rm_watch
432 #endif /* CONFIG_INOTIFY  */
433 
434 #if defined(TARGET_NR_prlimit64)
435 #ifndef __NR_prlimit64
436 # define __NR_prlimit64 -1
437 #endif
438 #define __NR_sys_prlimit64 __NR_prlimit64
439 /* The glibc rlimit structure may not be that used by the underlying syscall */
440 struct host_rlimit64 {
441     uint64_t rlim_cur;
442     uint64_t rlim_max;
443 };
444 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
445           const struct host_rlimit64 *, new_limit,
446           struct host_rlimit64 *, old_limit)
447 #endif
448 
449 
450 #if defined(TARGET_NR_timer_create)
451 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
452 static timer_t g_posix_timers[32] = { 0, } ;
453 
454 static inline int next_free_host_timer(void)
455 {
456     int k ;
457     /* FIXME: Does finding the next free slot require a lock? */
458     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
459         if (g_posix_timers[k] == 0) {
460             g_posix_timers[k] = (timer_t) 1;
461             return k;
462         }
463     }
464     return -1;
465 }
466 #endif
467 
468 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
469 #ifdef TARGET_ARM
470 static inline int regpairs_aligned(void *cpu_env, int num)
471 {
472     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
473 }
474 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
475 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
476 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
477 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
478  * of registers which translates to the same as ARM/MIPS, because we start with
479  * r3 as arg1 */
480 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
481 #elif defined(TARGET_SH4)
482 /* SH4 doesn't align register pairs, except for p{read,write}64 */
483 static inline int regpairs_aligned(void *cpu_env, int num)
484 {
485     switch (num) {
486     case TARGET_NR_pread64:
487     case TARGET_NR_pwrite64:
488         return 1;
489 
490     default:
491         return 0;
492     }
493 }
494 #elif defined(TARGET_XTENSA)
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
496 #else
497 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
498 #endif
499 
500 #define ERRNO_TABLE_SIZE 1200
501 
502 /* target_to_host_errno_table[] is initialized from
503  * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
505 };
506 
507 /*
508  * This list is the union of errno values overridden in asm-<arch>/errno.h
509  * minus the errnos that are not actually generic to all archs.
510  */
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512     [EAGAIN]		= TARGET_EAGAIN,
513     [EIDRM]		= TARGET_EIDRM,
514     [ECHRNG]		= TARGET_ECHRNG,
515     [EL2NSYNC]		= TARGET_EL2NSYNC,
516     [EL3HLT]		= TARGET_EL3HLT,
517     [EL3RST]		= TARGET_EL3RST,
518     [ELNRNG]		= TARGET_ELNRNG,
519     [EUNATCH]		= TARGET_EUNATCH,
520     [ENOCSI]		= TARGET_ENOCSI,
521     [EL2HLT]		= TARGET_EL2HLT,
522     [EDEADLK]		= TARGET_EDEADLK,
523     [ENOLCK]		= TARGET_ENOLCK,
524     [EBADE]		= TARGET_EBADE,
525     [EBADR]		= TARGET_EBADR,
526     [EXFULL]		= TARGET_EXFULL,
527     [ENOANO]		= TARGET_ENOANO,
528     [EBADRQC]		= TARGET_EBADRQC,
529     [EBADSLT]		= TARGET_EBADSLT,
530     [EBFONT]		= TARGET_EBFONT,
531     [ENOSTR]		= TARGET_ENOSTR,
532     [ENODATA]		= TARGET_ENODATA,
533     [ETIME]		= TARGET_ETIME,
534     [ENOSR]		= TARGET_ENOSR,
535     [ENONET]		= TARGET_ENONET,
536     [ENOPKG]		= TARGET_ENOPKG,
537     [EREMOTE]		= TARGET_EREMOTE,
538     [ENOLINK]		= TARGET_ENOLINK,
539     [EADV]		= TARGET_EADV,
540     [ESRMNT]		= TARGET_ESRMNT,
541     [ECOMM]		= TARGET_ECOMM,
542     [EPROTO]		= TARGET_EPROTO,
543     [EDOTDOT]		= TARGET_EDOTDOT,
544     [EMULTIHOP]		= TARGET_EMULTIHOP,
545     [EBADMSG]		= TARGET_EBADMSG,
546     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
547     [EOVERFLOW]		= TARGET_EOVERFLOW,
548     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
549     [EBADFD]		= TARGET_EBADFD,
550     [EREMCHG]		= TARGET_EREMCHG,
551     [ELIBACC]		= TARGET_ELIBACC,
552     [ELIBBAD]		= TARGET_ELIBBAD,
553     [ELIBSCN]		= TARGET_ELIBSCN,
554     [ELIBMAX]		= TARGET_ELIBMAX,
555     [ELIBEXEC]		= TARGET_ELIBEXEC,
556     [EILSEQ]		= TARGET_EILSEQ,
557     [ENOSYS]		= TARGET_ENOSYS,
558     [ELOOP]		= TARGET_ELOOP,
559     [ERESTART]		= TARGET_ERESTART,
560     [ESTRPIPE]		= TARGET_ESTRPIPE,
561     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
562     [EUSERS]		= TARGET_EUSERS,
563     [ENOTSOCK]		= TARGET_ENOTSOCK,
564     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
565     [EMSGSIZE]		= TARGET_EMSGSIZE,
566     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
567     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
568     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
569     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
570     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
571     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
572     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
573     [EADDRINUSE]	= TARGET_EADDRINUSE,
574     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
575     [ENETDOWN]		= TARGET_ENETDOWN,
576     [ENETUNREACH]	= TARGET_ENETUNREACH,
577     [ENETRESET]		= TARGET_ENETRESET,
578     [ECONNABORTED]	= TARGET_ECONNABORTED,
579     [ECONNRESET]	= TARGET_ECONNRESET,
580     [ENOBUFS]		= TARGET_ENOBUFS,
581     [EISCONN]		= TARGET_EISCONN,
582     [ENOTCONN]		= TARGET_ENOTCONN,
583     [EUCLEAN]		= TARGET_EUCLEAN,
584     [ENOTNAM]		= TARGET_ENOTNAM,
585     [ENAVAIL]		= TARGET_ENAVAIL,
586     [EISNAM]		= TARGET_EISNAM,
587     [EREMOTEIO]		= TARGET_EREMOTEIO,
588     [EDQUOT]            = TARGET_EDQUOT,
589     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
590     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
591     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
592     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
593     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
594     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
595     [EALREADY]		= TARGET_EALREADY,
596     [EINPROGRESS]	= TARGET_EINPROGRESS,
597     [ESTALE]		= TARGET_ESTALE,
598     [ECANCELED]		= TARGET_ECANCELED,
599     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
600     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
601 #ifdef ENOKEY
602     [ENOKEY]		= TARGET_ENOKEY,
603 #endif
604 #ifdef EKEYEXPIRED
605     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
606 #endif
607 #ifdef EKEYREVOKED
608     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
609 #endif
610 #ifdef EKEYREJECTED
611     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
612 #endif
613 #ifdef EOWNERDEAD
614     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
615 #endif
616 #ifdef ENOTRECOVERABLE
617     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
618 #endif
619 #ifdef ENOMSG
620     [ENOMSG]            = TARGET_ENOMSG,
621 #endif
622 #ifdef ERKFILL
623     [ERFKILL]           = TARGET_ERFKILL,
624 #endif
625 #ifdef EHWPOISON
626     [EHWPOISON]         = TARGET_EHWPOISON,
627 #endif
628 };
629 
630 static inline int host_to_target_errno(int err)
631 {
632     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
633         host_to_target_errno_table[err]) {
634         return host_to_target_errno_table[err];
635     }
636     return err;
637 }
638 
639 static inline int target_to_host_errno(int err)
640 {
641     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
642         target_to_host_errno_table[err]) {
643         return target_to_host_errno_table[err];
644     }
645     return err;
646 }
647 
648 static inline abi_long get_errno(abi_long ret)
649 {
650     if (ret == -1)
651         return -host_to_target_errno(errno);
652     else
653         return ret;
654 }
655 
656 const char *target_strerror(int err)
657 {
658     if (err == TARGET_ERESTARTSYS) {
659         return "To be restarted";
660     }
661     if (err == TARGET_QEMU_ESIGRETURN) {
662         return "Successful exit from sigreturn";
663     }
664 
665     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
666         return NULL;
667     }
668     return strerror(target_to_host_errno(err));
669 }
670 
671 #define safe_syscall0(type, name) \
672 static type safe_##name(void) \
673 { \
674     return safe_syscall(__NR_##name); \
675 }
676 
677 #define safe_syscall1(type, name, type1, arg1) \
678 static type safe_##name(type1 arg1) \
679 { \
680     return safe_syscall(__NR_##name, arg1); \
681 }
682 
683 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
684 static type safe_##name(type1 arg1, type2 arg2) \
685 { \
686     return safe_syscall(__NR_##name, arg1, arg2); \
687 }
688 
689 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
690 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
691 { \
692     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 }
694 
695 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
696     type4, arg4) \
697 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
698 { \
699     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 }
701 
702 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
703     type4, arg4, type5, arg5) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
705     type5 arg5) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 }
709 
710 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5, type6, arg6) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5, type6 arg6) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 }
717 
718 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
719 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
720 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
721               int, flags, mode_t, mode)
722 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
723               struct rusage *, rusage)
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
728               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
729 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
730               struct timespec *, tsp, const sigset_t *, sigmask,
731               size_t, sigsetsize)
732 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
733               int, maxevents, int, timeout, const sigset_t *, sigmask,
734               size_t, sigsetsize)
735 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
736               const struct timespec *,timeout,int *,uaddr2,int,val3)
737 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
738 safe_syscall2(int, kill, pid_t, pid, int, sig)
739 safe_syscall2(int, tkill, int, tid, int, sig)
740 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
741 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
743 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
746               unsigned long, pos_l, unsigned long, pos_h)
747 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
748               socklen_t, addrlen)
749 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
750               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
751 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
752               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
753 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
754 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
755 safe_syscall2(int, flock, int, fd, int, operation)
756 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
757               const struct timespec *, uts, size_t, sigsetsize)
758 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
759               int, flags)
760 safe_syscall2(int, nanosleep, const struct timespec *, req,
761               struct timespec *, rem)
762 #ifdef TARGET_NR_clock_nanosleep
763 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
764               const struct timespec *, req, struct timespec *, rem)
765 #endif
766 #ifdef __NR_ipc
767 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
768               void *, ptr, long, fifth)
769 #endif
770 #ifdef __NR_msgsnd
771 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
772               int, flags)
773 #endif
774 #ifdef __NR_msgrcv
775 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
776               long, msgtype, int, flags)
777 #endif
778 #ifdef __NR_semtimedop
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780               unsigned, nsops, const struct timespec *, timeout)
781 #endif
782 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
783 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
784               size_t, len, unsigned, prio, const struct timespec *, timeout)
785 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
786               size_t, len, unsigned *, prio, const struct timespec *, timeout)
787 #endif
788 /* We do ioctl like this rather than via safe_syscall3 to preserve the
789  * "third argument might be integer or pointer or not present" behaviour of
790  * the libc function.
791  */
792 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
793 /* Similarly for fcntl. Note that callers must always:
794  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
795  *  use the flock64 struct rather than unsuffixed flock
796  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
797  */
798 #ifdef __NR_fcntl64
799 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
800 #else
801 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
802 #endif
803 
804 static inline int host_to_target_sock_type(int host_type)
805 {
806     int target_type;
807 
808     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
809     case SOCK_DGRAM:
810         target_type = TARGET_SOCK_DGRAM;
811         break;
812     case SOCK_STREAM:
813         target_type = TARGET_SOCK_STREAM;
814         break;
815     default:
816         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
817         break;
818     }
819 
820 #if defined(SOCK_CLOEXEC)
821     if (host_type & SOCK_CLOEXEC) {
822         target_type |= TARGET_SOCK_CLOEXEC;
823     }
824 #endif
825 
826 #if defined(SOCK_NONBLOCK)
827     if (host_type & SOCK_NONBLOCK) {
828         target_type |= TARGET_SOCK_NONBLOCK;
829     }
830 #endif
831 
832     return target_type;
833 }
834 
835 static abi_ulong target_brk;
836 static abi_ulong target_original_brk;
837 static abi_ulong brk_page;
838 
839 void target_set_brk(abi_ulong new_brk)
840 {
841     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
842     brk_page = HOST_PAGE_ALIGN(target_brk);
843 }
844 
845 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
846 #define DEBUGF_BRK(message, args...)
847 
848 /* do_brk() must return target values and target errnos. */
849 abi_long do_brk(abi_ulong new_brk)
850 {
851     abi_long mapped_addr;
852     abi_ulong new_alloc_size;
853 
854     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
855 
856     if (!new_brk) {
857         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
858         return target_brk;
859     }
860     if (new_brk < target_original_brk) {
861         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
862                    target_brk);
863         return target_brk;
864     }
865 
866     /* If the new brk is less than the highest page reserved to the
867      * target heap allocation, set it and we're almost done...  */
868     if (new_brk <= brk_page) {
869         /* Heap contents are initialized to zero, as for anonymous
870          * mapped pages.  */
871         if (new_brk > target_brk) {
872             memset(g2h(target_brk), 0, new_brk - target_brk);
873         }
874 	target_brk = new_brk;
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
876 	return target_brk;
877     }
878 
879     /* We need to allocate more memory after the brk... Note that
880      * we don't use MAP_FIXED because that will map over the top of
881      * any existing mapping (like the one with the host libc or qemu
882      * itself); instead we treat "mapped but at wrong address" as
883      * a failure and unmap again.
884      */
885     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
886     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
887                                         PROT_READ|PROT_WRITE,
888                                         MAP_ANON|MAP_PRIVATE, 0, 0));
889 
890     if (mapped_addr == brk_page) {
891         /* Heap contents are initialized to zero, as for anonymous
892          * mapped pages.  Technically the new pages are already
893          * initialized to zero since they *are* anonymous mapped
894          * pages, however we have to take care with the contents that
895          * come from the remaining part of the previous page: it may
896          * contains garbage data due to a previous heap usage (grown
897          * then shrunken).  */
898         memset(g2h(target_brk), 0, brk_page - target_brk);
899 
900         target_brk = new_brk;
901         brk_page = HOST_PAGE_ALIGN(target_brk);
902         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
903             target_brk);
904         return target_brk;
905     } else if (mapped_addr != -1) {
906         /* Mapped but at wrong address, meaning there wasn't actually
907          * enough space for this brk.
908          */
909         target_munmap(mapped_addr, new_alloc_size);
910         mapped_addr = -1;
911         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
912     }
913     else {
914         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
915     }
916 
917 #if defined(TARGET_ALPHA)
918     /* We (partially) emulate OSF/1 on Alpha, which requires we
919        return a proper errno, not an unchanged brk value.  */
920     return -TARGET_ENOMEM;
921 #endif
922     /* For everything else, return the previous break. */
923     return target_brk;
924 }
925 
926 static inline abi_long copy_from_user_fdset(fd_set *fds,
927                                             abi_ulong target_fds_addr,
928                                             int n)
929 {
930     int i, nw, j, k;
931     abi_ulong b, *target_fds;
932 
933     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
934     if (!(target_fds = lock_user(VERIFY_READ,
935                                  target_fds_addr,
936                                  sizeof(abi_ulong) * nw,
937                                  1)))
938         return -TARGET_EFAULT;
939 
940     FD_ZERO(fds);
941     k = 0;
942     for (i = 0; i < nw; i++) {
943         /* grab the abi_ulong */
944         __get_user(b, &target_fds[i]);
945         for (j = 0; j < TARGET_ABI_BITS; j++) {
946             /* check the bit inside the abi_ulong */
947             if ((b >> j) & 1)
948                 FD_SET(k, fds);
949             k++;
950         }
951     }
952 
953     unlock_user(target_fds, target_fds_addr, 0);
954 
955     return 0;
956 }
957 
958 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
959                                                  abi_ulong target_fds_addr,
960                                                  int n)
961 {
962     if (target_fds_addr) {
963         if (copy_from_user_fdset(fds, target_fds_addr, n))
964             return -TARGET_EFAULT;
965         *fds_ptr = fds;
966     } else {
967         *fds_ptr = NULL;
968     }
969     return 0;
970 }
971 
972 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
973                                           const fd_set *fds,
974                                           int n)
975 {
976     int i, nw, j, k;
977     abi_long v;
978     abi_ulong *target_fds;
979 
980     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
981     if (!(target_fds = lock_user(VERIFY_WRITE,
982                                  target_fds_addr,
983                                  sizeof(abi_ulong) * nw,
984                                  0)))
985         return -TARGET_EFAULT;
986 
987     k = 0;
988     for (i = 0; i < nw; i++) {
989         v = 0;
990         for (j = 0; j < TARGET_ABI_BITS; j++) {
991             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
992             k++;
993         }
994         __put_user(v, &target_fds[i]);
995     }
996 
997     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
998 
999     return 0;
1000 }
1001 
1002 #if defined(__alpha__)
1003 #define HOST_HZ 1024
1004 #else
1005 #define HOST_HZ 100
1006 #endif
1007 
1008 static inline abi_long host_to_target_clock_t(long ticks)
1009 {
1010 #if HOST_HZ == TARGET_HZ
1011     return ticks;
1012 #else
1013     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1014 #endif
1015 }
1016 
1017 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1018                                              const struct rusage *rusage)
1019 {
1020     struct target_rusage *target_rusage;
1021 
1022     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1023         return -TARGET_EFAULT;
1024     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1025     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1026     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1027     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1028     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1029     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1030     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1031     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1032     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1033     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1034     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1035     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1036     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1037     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1038     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1039     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1040     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1041     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1042     unlock_user_struct(target_rusage, target_addr, 1);
1043 
1044     return 0;
1045 }
1046 
1047 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1048 {
1049     abi_ulong target_rlim_swap;
1050     rlim_t result;
1051 
1052     target_rlim_swap = tswapal(target_rlim);
1053     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1054         return RLIM_INFINITY;
1055 
1056     result = target_rlim_swap;
1057     if (target_rlim_swap != (rlim_t)result)
1058         return RLIM_INFINITY;
1059 
1060     return result;
1061 }
1062 
1063 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1064 {
1065     abi_ulong target_rlim_swap;
1066     abi_ulong result;
1067 
1068     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1069         target_rlim_swap = TARGET_RLIM_INFINITY;
1070     else
1071         target_rlim_swap = rlim;
1072     result = tswapal(target_rlim_swap);
1073 
1074     return result;
1075 }
1076 
1077 static inline int target_to_host_resource(int code)
1078 {
1079     switch (code) {
1080     case TARGET_RLIMIT_AS:
1081         return RLIMIT_AS;
1082     case TARGET_RLIMIT_CORE:
1083         return RLIMIT_CORE;
1084     case TARGET_RLIMIT_CPU:
1085         return RLIMIT_CPU;
1086     case TARGET_RLIMIT_DATA:
1087         return RLIMIT_DATA;
1088     case TARGET_RLIMIT_FSIZE:
1089         return RLIMIT_FSIZE;
1090     case TARGET_RLIMIT_LOCKS:
1091         return RLIMIT_LOCKS;
1092     case TARGET_RLIMIT_MEMLOCK:
1093         return RLIMIT_MEMLOCK;
1094     case TARGET_RLIMIT_MSGQUEUE:
1095         return RLIMIT_MSGQUEUE;
1096     case TARGET_RLIMIT_NICE:
1097         return RLIMIT_NICE;
1098     case TARGET_RLIMIT_NOFILE:
1099         return RLIMIT_NOFILE;
1100     case TARGET_RLIMIT_NPROC:
1101         return RLIMIT_NPROC;
1102     case TARGET_RLIMIT_RSS:
1103         return RLIMIT_RSS;
1104     case TARGET_RLIMIT_RTPRIO:
1105         return RLIMIT_RTPRIO;
1106     case TARGET_RLIMIT_SIGPENDING:
1107         return RLIMIT_SIGPENDING;
1108     case TARGET_RLIMIT_STACK:
1109         return RLIMIT_STACK;
1110     default:
1111         return code;
1112     }
1113 }
1114 
1115 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1116                                               abi_ulong target_tv_addr)
1117 {
1118     struct target_timeval *target_tv;
1119 
1120     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1121         return -TARGET_EFAULT;
1122 
1123     __get_user(tv->tv_sec, &target_tv->tv_sec);
1124     __get_user(tv->tv_usec, &target_tv->tv_usec);
1125 
1126     unlock_user_struct(target_tv, target_tv_addr, 0);
1127 
1128     return 0;
1129 }
1130 
1131 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1132                                             const struct timeval *tv)
1133 {
1134     struct target_timeval *target_tv;
1135 
1136     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1137         return -TARGET_EFAULT;
1138 
1139     __put_user(tv->tv_sec, &target_tv->tv_sec);
1140     __put_user(tv->tv_usec, &target_tv->tv_usec);
1141 
1142     unlock_user_struct(target_tv, target_tv_addr, 1);
1143 
1144     return 0;
1145 }
1146 
1147 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1148                                                abi_ulong target_tz_addr)
1149 {
1150     struct target_timezone *target_tz;
1151 
1152     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1153         return -TARGET_EFAULT;
1154     }
1155 
1156     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1157     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1158 
1159     unlock_user_struct(target_tz, target_tz_addr, 0);
1160 
1161     return 0;
1162 }
1163 
1164 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1165 #include <mqueue.h>
1166 
1167 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1168                                               abi_ulong target_mq_attr_addr)
1169 {
1170     struct target_mq_attr *target_mq_attr;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1173                           target_mq_attr_addr, 1))
1174         return -TARGET_EFAULT;
1175 
1176     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1177     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1178     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1179     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1180 
1181     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1182 
1183     return 0;
1184 }
1185 
1186 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1187                                             const struct mq_attr *attr)
1188 {
1189     struct target_mq_attr *target_mq_attr;
1190 
1191     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1192                           target_mq_attr_addr, 0))
1193         return -TARGET_EFAULT;
1194 
1195     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1196     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1197     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1198     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1199 
1200     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1201 
1202     return 0;
1203 }
1204 #endif
1205 
1206 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1207 /* do_select() must return target values and target errnos. */
1208 static abi_long do_select(int n,
1209                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1210                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1211 {
1212     fd_set rfds, wfds, efds;
1213     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1214     struct timeval tv;
1215     struct timespec ts, *ts_ptr;
1216     abi_long ret;
1217 
1218     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1219     if (ret) {
1220         return ret;
1221     }
1222     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1223     if (ret) {
1224         return ret;
1225     }
1226     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1227     if (ret) {
1228         return ret;
1229     }
1230 
1231     if (target_tv_addr) {
1232         if (copy_from_user_timeval(&tv, target_tv_addr))
1233             return -TARGET_EFAULT;
1234         ts.tv_sec = tv.tv_sec;
1235         ts.tv_nsec = tv.tv_usec * 1000;
1236         ts_ptr = &ts;
1237     } else {
1238         ts_ptr = NULL;
1239     }
1240 
1241     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1242                                   ts_ptr, NULL));
1243 
1244     if (!is_error(ret)) {
1245         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1246             return -TARGET_EFAULT;
1247         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1248             return -TARGET_EFAULT;
1249         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1250             return -TARGET_EFAULT;
1251 
1252         if (target_tv_addr) {
1253             tv.tv_sec = ts.tv_sec;
1254             tv.tv_usec = ts.tv_nsec / 1000;
1255             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1256                 return -TARGET_EFAULT;
1257             }
1258         }
1259     }
1260 
1261     return ret;
1262 }
1263 
1264 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1265 static abi_long do_old_select(abi_ulong arg1)
1266 {
1267     struct target_sel_arg_struct *sel;
1268     abi_ulong inp, outp, exp, tvp;
1269     long nsel;
1270 
1271     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     nsel = tswapal(sel->n);
1276     inp = tswapal(sel->inp);
1277     outp = tswapal(sel->outp);
1278     exp = tswapal(sel->exp);
1279     tvp = tswapal(sel->tvp);
1280 
1281     unlock_user_struct(sel, arg1, 0);
1282 
1283     return do_select(nsel, inp, outp, exp, tvp);
1284 }
1285 #endif
1286 #endif
1287 
1288 static abi_long do_pipe2(int host_pipe[], int flags)
1289 {
1290 #ifdef CONFIG_PIPE2
1291     return pipe2(host_pipe, flags);
1292 #else
1293     return -ENOSYS;
1294 #endif
1295 }
1296 
1297 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1298                         int flags, int is_pipe2)
1299 {
1300     int host_pipe[2];
1301     abi_long ret;
1302     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1303 
1304     if (is_error(ret))
1305         return get_errno(ret);
1306 
1307     /* Several targets have special calling conventions for the original
1308        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1309     if (!is_pipe2) {
1310 #if defined(TARGET_ALPHA)
1311         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1312         return host_pipe[0];
1313 #elif defined(TARGET_MIPS)
1314         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1315         return host_pipe[0];
1316 #elif defined(TARGET_SH4)
1317         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1318         return host_pipe[0];
1319 #elif defined(TARGET_SPARC)
1320         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1321         return host_pipe[0];
1322 #endif
1323     }
1324 
1325     if (put_user_s32(host_pipe[0], pipedes)
1326         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1327         return -TARGET_EFAULT;
1328     return get_errno(ret);
1329 }
1330 
1331 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1332                                               abi_ulong target_addr,
1333                                               socklen_t len)
1334 {
1335     struct target_ip_mreqn *target_smreqn;
1336 
1337     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1338     if (!target_smreqn)
1339         return -TARGET_EFAULT;
1340     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1341     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1342     if (len == sizeof(struct target_ip_mreqn))
1343         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1344     unlock_user(target_smreqn, target_addr, 0);
1345 
1346     return 0;
1347 }
1348 
1349 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1350                                                abi_ulong target_addr,
1351                                                socklen_t len)
1352 {
1353     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1354     sa_family_t sa_family;
1355     struct target_sockaddr *target_saddr;
1356 
1357     if (fd_trans_target_to_host_addr(fd)) {
1358         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1359     }
1360 
1361     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1362     if (!target_saddr)
1363         return -TARGET_EFAULT;
1364 
1365     sa_family = tswap16(target_saddr->sa_family);
1366 
1367     /* Oops. The caller might send a incomplete sun_path; sun_path
1368      * must be terminated by \0 (see the manual page), but
1369      * unfortunately it is quite common to specify sockaddr_un
1370      * length as "strlen(x->sun_path)" while it should be
1371      * "strlen(...) + 1". We'll fix that here if needed.
1372      * Linux kernel has a similar feature.
1373      */
1374 
1375     if (sa_family == AF_UNIX) {
1376         if (len < unix_maxlen && len > 0) {
1377             char *cp = (char*)target_saddr;
1378 
1379             if ( cp[len-1] && !cp[len] )
1380                 len++;
1381         }
1382         if (len > unix_maxlen)
1383             len = unix_maxlen;
1384     }
1385 
1386     memcpy(addr, target_saddr, len);
1387     addr->sa_family = sa_family;
1388     if (sa_family == AF_NETLINK) {
1389         struct sockaddr_nl *nladdr;
1390 
1391         nladdr = (struct sockaddr_nl *)addr;
1392         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1393         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1394     } else if (sa_family == AF_PACKET) {
1395 	struct target_sockaddr_ll *lladdr;
1396 
1397 	lladdr = (struct target_sockaddr_ll *)addr;
1398 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1399 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1400     }
1401     unlock_user(target_saddr, target_addr, 0);
1402 
1403     return 0;
1404 }
1405 
1406 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1407                                                struct sockaddr *addr,
1408                                                socklen_t len)
1409 {
1410     struct target_sockaddr *target_saddr;
1411 
1412     if (len == 0) {
1413         return 0;
1414     }
1415     assert(addr);
1416 
1417     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1418     if (!target_saddr)
1419         return -TARGET_EFAULT;
1420     memcpy(target_saddr, addr, len);
1421     if (len >= offsetof(struct target_sockaddr, sa_family) +
1422         sizeof(target_saddr->sa_family)) {
1423         target_saddr->sa_family = tswap16(addr->sa_family);
1424     }
1425     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1426         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1427         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1428         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1429     } else if (addr->sa_family == AF_PACKET) {
1430         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1431         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1432         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1433     } else if (addr->sa_family == AF_INET6 &&
1434                len >= sizeof(struct target_sockaddr_in6)) {
1435         struct target_sockaddr_in6 *target_in6 =
1436                (struct target_sockaddr_in6 *)target_saddr;
1437         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1438     }
1439     unlock_user(target_saddr, target_addr, len);
1440 
1441     return 0;
1442 }
1443 
1444 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1445                                            struct target_msghdr *target_msgh)
1446 {
1447     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1448     abi_long msg_controllen;
1449     abi_ulong target_cmsg_addr;
1450     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1451     socklen_t space = 0;
1452 
1453     msg_controllen = tswapal(target_msgh->msg_controllen);
1454     if (msg_controllen < sizeof (struct target_cmsghdr))
1455         goto the_end;
1456     target_cmsg_addr = tswapal(target_msgh->msg_control);
1457     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1458     target_cmsg_start = target_cmsg;
1459     if (!target_cmsg)
1460         return -TARGET_EFAULT;
1461 
1462     while (cmsg && target_cmsg) {
1463         void *data = CMSG_DATA(cmsg);
1464         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1465 
1466         int len = tswapal(target_cmsg->cmsg_len)
1467             - sizeof(struct target_cmsghdr);
1468 
1469         space += CMSG_SPACE(len);
1470         if (space > msgh->msg_controllen) {
1471             space -= CMSG_SPACE(len);
1472             /* This is a QEMU bug, since we allocated the payload
1473              * area ourselves (unlike overflow in host-to-target
1474              * conversion, which is just the guest giving us a buffer
1475              * that's too small). It can't happen for the payload types
1476              * we currently support; if it becomes an issue in future
1477              * we would need to improve our allocation strategy to
1478              * something more intelligent than "twice the size of the
1479              * target buffer we're reading from".
1480              */
1481             gemu_log("Host cmsg overflow\n");
1482             break;
1483         }
1484 
1485         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1486             cmsg->cmsg_level = SOL_SOCKET;
1487         } else {
1488             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1489         }
1490         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1491         cmsg->cmsg_len = CMSG_LEN(len);
1492 
1493         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1494             int *fd = (int *)data;
1495             int *target_fd = (int *)target_data;
1496             int i, numfds = len / sizeof(int);
1497 
1498             for (i = 0; i < numfds; i++) {
1499                 __get_user(fd[i], target_fd + i);
1500             }
1501         } else if (cmsg->cmsg_level == SOL_SOCKET
1502                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1503             struct ucred *cred = (struct ucred *)data;
1504             struct target_ucred *target_cred =
1505                 (struct target_ucred *)target_data;
1506 
1507             __get_user(cred->pid, &target_cred->pid);
1508             __get_user(cred->uid, &target_cred->uid);
1509             __get_user(cred->gid, &target_cred->gid);
1510         } else {
1511             gemu_log("Unsupported ancillary data: %d/%d\n",
1512                                         cmsg->cmsg_level, cmsg->cmsg_type);
1513             memcpy(data, target_data, len);
1514         }
1515 
1516         cmsg = CMSG_NXTHDR(msgh, cmsg);
1517         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1518                                          target_cmsg_start);
1519     }
1520     unlock_user(target_cmsg, target_cmsg_addr, 0);
1521  the_end:
1522     msgh->msg_controllen = space;
1523     return 0;
1524 }
1525 
1526 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1527                                            struct msghdr *msgh)
1528 {
1529     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1530     abi_long msg_controllen;
1531     abi_ulong target_cmsg_addr;
1532     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1533     socklen_t space = 0;
1534 
1535     msg_controllen = tswapal(target_msgh->msg_controllen);
1536     if (msg_controllen < sizeof (struct target_cmsghdr))
1537         goto the_end;
1538     target_cmsg_addr = tswapal(target_msgh->msg_control);
1539     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1540     target_cmsg_start = target_cmsg;
1541     if (!target_cmsg)
1542         return -TARGET_EFAULT;
1543 
1544     while (cmsg && target_cmsg) {
1545         void *data = CMSG_DATA(cmsg);
1546         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1547 
1548         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1549         int tgt_len, tgt_space;
1550 
1551         /* We never copy a half-header but may copy half-data;
1552          * this is Linux's behaviour in put_cmsg(). Note that
1553          * truncation here is a guest problem (which we report
1554          * to the guest via the CTRUNC bit), unlike truncation
1555          * in target_to_host_cmsg, which is a QEMU bug.
1556          */
1557         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1558             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1559             break;
1560         }
1561 
1562         if (cmsg->cmsg_level == SOL_SOCKET) {
1563             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1564         } else {
1565             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1566         }
1567         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1568 
1569         /* Payload types which need a different size of payload on
1570          * the target must adjust tgt_len here.
1571          */
1572         tgt_len = len;
1573         switch (cmsg->cmsg_level) {
1574         case SOL_SOCKET:
1575             switch (cmsg->cmsg_type) {
1576             case SO_TIMESTAMP:
1577                 tgt_len = sizeof(struct target_timeval);
1578                 break;
1579             default:
1580                 break;
1581             }
1582             break;
1583         default:
1584             break;
1585         }
1586 
1587         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1588             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1589             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1590         }
1591 
1592         /* We must now copy-and-convert len bytes of payload
1593          * into tgt_len bytes of destination space. Bear in mind
1594          * that in both source and destination we may be dealing
1595          * with a truncated value!
1596          */
1597         switch (cmsg->cmsg_level) {
1598         case SOL_SOCKET:
1599             switch (cmsg->cmsg_type) {
1600             case SCM_RIGHTS:
1601             {
1602                 int *fd = (int *)data;
1603                 int *target_fd = (int *)target_data;
1604                 int i, numfds = tgt_len / sizeof(int);
1605 
1606                 for (i = 0; i < numfds; i++) {
1607                     __put_user(fd[i], target_fd + i);
1608                 }
1609                 break;
1610             }
1611             case SO_TIMESTAMP:
1612             {
1613                 struct timeval *tv = (struct timeval *)data;
1614                 struct target_timeval *target_tv =
1615                     (struct target_timeval *)target_data;
1616 
1617                 if (len != sizeof(struct timeval) ||
1618                     tgt_len != sizeof(struct target_timeval)) {
1619                     goto unimplemented;
1620                 }
1621 
1622                 /* copy struct timeval to target */
1623                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1624                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1625                 break;
1626             }
1627             case SCM_CREDENTIALS:
1628             {
1629                 struct ucred *cred = (struct ucred *)data;
1630                 struct target_ucred *target_cred =
1631                     (struct target_ucred *)target_data;
1632 
1633                 __put_user(cred->pid, &target_cred->pid);
1634                 __put_user(cred->uid, &target_cred->uid);
1635                 __put_user(cred->gid, &target_cred->gid);
1636                 break;
1637             }
1638             default:
1639                 goto unimplemented;
1640             }
1641             break;
1642 
1643         case SOL_IP:
1644             switch (cmsg->cmsg_type) {
1645             case IP_TTL:
1646             {
1647                 uint32_t *v = (uint32_t *)data;
1648                 uint32_t *t_int = (uint32_t *)target_data;
1649 
1650                 if (len != sizeof(uint32_t) ||
1651                     tgt_len != sizeof(uint32_t)) {
1652                     goto unimplemented;
1653                 }
1654                 __put_user(*v, t_int);
1655                 break;
1656             }
1657             case IP_RECVERR:
1658             {
1659                 struct errhdr_t {
1660                    struct sock_extended_err ee;
1661                    struct sockaddr_in offender;
1662                 };
1663                 struct errhdr_t *errh = (struct errhdr_t *)data;
1664                 struct errhdr_t *target_errh =
1665                     (struct errhdr_t *)target_data;
1666 
1667                 if (len != sizeof(struct errhdr_t) ||
1668                     tgt_len != sizeof(struct errhdr_t)) {
1669                     goto unimplemented;
1670                 }
1671                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1672                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1673                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1674                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1675                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1676                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1677                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1678                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1679                     (void *) &errh->offender, sizeof(errh->offender));
1680                 break;
1681             }
1682             default:
1683                 goto unimplemented;
1684             }
1685             break;
1686 
1687         case SOL_IPV6:
1688             switch (cmsg->cmsg_type) {
1689             case IPV6_HOPLIMIT:
1690             {
1691                 uint32_t *v = (uint32_t *)data;
1692                 uint32_t *t_int = (uint32_t *)target_data;
1693 
1694                 if (len != sizeof(uint32_t) ||
1695                     tgt_len != sizeof(uint32_t)) {
1696                     goto unimplemented;
1697                 }
1698                 __put_user(*v, t_int);
1699                 break;
1700             }
1701             case IPV6_RECVERR:
1702             {
1703                 struct errhdr6_t {
1704                    struct sock_extended_err ee;
1705                    struct sockaddr_in6 offender;
1706                 };
1707                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1708                 struct errhdr6_t *target_errh =
1709                     (struct errhdr6_t *)target_data;
1710 
1711                 if (len != sizeof(struct errhdr6_t) ||
1712                     tgt_len != sizeof(struct errhdr6_t)) {
1713                     goto unimplemented;
1714                 }
1715                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1716                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1717                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1718                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1719                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1720                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1721                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1722                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1723                     (void *) &errh->offender, sizeof(errh->offender));
1724                 break;
1725             }
1726             default:
1727                 goto unimplemented;
1728             }
1729             break;
1730 
1731         default:
1732         unimplemented:
1733             gemu_log("Unsupported ancillary data: %d/%d\n",
1734                                         cmsg->cmsg_level, cmsg->cmsg_type);
1735             memcpy(target_data, data, MIN(len, tgt_len));
1736             if (tgt_len > len) {
1737                 memset(target_data + len, 0, tgt_len - len);
1738             }
1739         }
1740 
1741         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1742         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1743         if (msg_controllen < tgt_space) {
1744             tgt_space = msg_controllen;
1745         }
1746         msg_controllen -= tgt_space;
1747         space += tgt_space;
1748         cmsg = CMSG_NXTHDR(msgh, cmsg);
1749         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1750                                          target_cmsg_start);
1751     }
1752     unlock_user(target_cmsg, target_cmsg_addr, space);
1753  the_end:
1754     target_msgh->msg_controllen = tswapal(space);
1755     return 0;
1756 }
1757 
1758 /* do_setsockopt() Must return target values and target errnos. */
1759 static abi_long do_setsockopt(int sockfd, int level, int optname,
1760                               abi_ulong optval_addr, socklen_t optlen)
1761 {
1762     abi_long ret;
1763     int val;
1764     struct ip_mreqn *ip_mreq;
1765     struct ip_mreq_source *ip_mreq_source;
1766 
1767     switch(level) {
1768     case SOL_TCP:
1769         /* TCP options all take an 'int' value.  */
1770         if (optlen < sizeof(uint32_t))
1771             return -TARGET_EINVAL;
1772 
1773         if (get_user_u32(val, optval_addr))
1774             return -TARGET_EFAULT;
1775         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1776         break;
1777     case SOL_IP:
1778         switch(optname) {
1779         case IP_TOS:
1780         case IP_TTL:
1781         case IP_HDRINCL:
1782         case IP_ROUTER_ALERT:
1783         case IP_RECVOPTS:
1784         case IP_RETOPTS:
1785         case IP_PKTINFO:
1786         case IP_MTU_DISCOVER:
1787         case IP_RECVERR:
1788         case IP_RECVTTL:
1789         case IP_RECVTOS:
1790 #ifdef IP_FREEBIND
1791         case IP_FREEBIND:
1792 #endif
1793         case IP_MULTICAST_TTL:
1794         case IP_MULTICAST_LOOP:
1795             val = 0;
1796             if (optlen >= sizeof(uint32_t)) {
1797                 if (get_user_u32(val, optval_addr))
1798                     return -TARGET_EFAULT;
1799             } else if (optlen >= 1) {
1800                 if (get_user_u8(val, optval_addr))
1801                     return -TARGET_EFAULT;
1802             }
1803             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1804             break;
1805         case IP_ADD_MEMBERSHIP:
1806         case IP_DROP_MEMBERSHIP:
1807             if (optlen < sizeof (struct target_ip_mreq) ||
1808                 optlen > sizeof (struct target_ip_mreqn))
1809                 return -TARGET_EINVAL;
1810 
1811             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1812             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1813             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1814             break;
1815 
1816         case IP_BLOCK_SOURCE:
1817         case IP_UNBLOCK_SOURCE:
1818         case IP_ADD_SOURCE_MEMBERSHIP:
1819         case IP_DROP_SOURCE_MEMBERSHIP:
1820             if (optlen != sizeof (struct target_ip_mreq_source))
1821                 return -TARGET_EINVAL;
1822 
1823             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1824             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1825             unlock_user (ip_mreq_source, optval_addr, 0);
1826             break;
1827 
1828         default:
1829             goto unimplemented;
1830         }
1831         break;
1832     case SOL_IPV6:
1833         switch (optname) {
1834         case IPV6_MTU_DISCOVER:
1835         case IPV6_MTU:
1836         case IPV6_V6ONLY:
1837         case IPV6_RECVPKTINFO:
1838         case IPV6_UNICAST_HOPS:
1839         case IPV6_MULTICAST_HOPS:
1840         case IPV6_MULTICAST_LOOP:
1841         case IPV6_RECVERR:
1842         case IPV6_RECVHOPLIMIT:
1843         case IPV6_2292HOPLIMIT:
1844         case IPV6_CHECKSUM:
1845         case IPV6_ADDRFORM:
1846         case IPV6_2292PKTINFO:
1847         case IPV6_RECVTCLASS:
1848         case IPV6_RECVRTHDR:
1849         case IPV6_2292RTHDR:
1850         case IPV6_RECVHOPOPTS:
1851         case IPV6_2292HOPOPTS:
1852         case IPV6_RECVDSTOPTS:
1853         case IPV6_2292DSTOPTS:
1854         case IPV6_TCLASS:
1855 #ifdef IPV6_RECVPATHMTU
1856         case IPV6_RECVPATHMTU:
1857 #endif
1858 #ifdef IPV6_TRANSPARENT
1859         case IPV6_TRANSPARENT:
1860 #endif
1861 #ifdef IPV6_FREEBIND
1862         case IPV6_FREEBIND:
1863 #endif
1864 #ifdef IPV6_RECVORIGDSTADDR
1865         case IPV6_RECVORIGDSTADDR:
1866 #endif
1867             val = 0;
1868             if (optlen < sizeof(uint32_t)) {
1869                 return -TARGET_EINVAL;
1870             }
1871             if (get_user_u32(val, optval_addr)) {
1872                 return -TARGET_EFAULT;
1873             }
1874             ret = get_errno(setsockopt(sockfd, level, optname,
1875                                        &val, sizeof(val)));
1876             break;
1877         case IPV6_PKTINFO:
1878         {
1879             struct in6_pktinfo pki;
1880 
1881             if (optlen < sizeof(pki)) {
1882                 return -TARGET_EINVAL;
1883             }
1884 
1885             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1886                 return -TARGET_EFAULT;
1887             }
1888 
1889             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1890 
1891             ret = get_errno(setsockopt(sockfd, level, optname,
1892                                        &pki, sizeof(pki)));
1893             break;
1894         }
1895         case IPV6_ADD_MEMBERSHIP:
1896         case IPV6_DROP_MEMBERSHIP:
1897         {
1898             struct ipv6_mreq ipv6mreq;
1899 
1900             if (optlen < sizeof(ipv6mreq)) {
1901                 return -TARGET_EINVAL;
1902             }
1903 
1904             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1905                 return -TARGET_EFAULT;
1906             }
1907 
1908             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1909 
1910             ret = get_errno(setsockopt(sockfd, level, optname,
1911                                        &ipv6mreq, sizeof(ipv6mreq)));
1912             break;
1913         }
1914         default:
1915             goto unimplemented;
1916         }
1917         break;
1918     case SOL_ICMPV6:
1919         switch (optname) {
1920         case ICMPV6_FILTER:
1921         {
1922             struct icmp6_filter icmp6f;
1923 
1924             if (optlen > sizeof(icmp6f)) {
1925                 optlen = sizeof(icmp6f);
1926             }
1927 
1928             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1929                 return -TARGET_EFAULT;
1930             }
1931 
1932             for (val = 0; val < 8; val++) {
1933                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1934             }
1935 
1936             ret = get_errno(setsockopt(sockfd, level, optname,
1937                                        &icmp6f, optlen));
1938             break;
1939         }
1940         default:
1941             goto unimplemented;
1942         }
1943         break;
1944     case SOL_RAW:
1945         switch (optname) {
1946         case ICMP_FILTER:
1947         case IPV6_CHECKSUM:
1948             /* those take an u32 value */
1949             if (optlen < sizeof(uint32_t)) {
1950                 return -TARGET_EINVAL;
1951             }
1952 
1953             if (get_user_u32(val, optval_addr)) {
1954                 return -TARGET_EFAULT;
1955             }
1956             ret = get_errno(setsockopt(sockfd, level, optname,
1957                                        &val, sizeof(val)));
1958             break;
1959 
1960         default:
1961             goto unimplemented;
1962         }
1963         break;
1964 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
1965     case SOL_ALG:
1966         switch (optname) {
1967         case ALG_SET_KEY:
1968         {
1969             char *alg_key = g_malloc(optlen);
1970 
1971             if (!alg_key) {
1972                 return -TARGET_ENOMEM;
1973             }
1974             if (copy_from_user(alg_key, optval_addr, optlen)) {
1975                 g_free(alg_key);
1976                 return -TARGET_EFAULT;
1977             }
1978             ret = get_errno(setsockopt(sockfd, level, optname,
1979                                        alg_key, optlen));
1980             g_free(alg_key);
1981             break;
1982         }
1983         case ALG_SET_AEAD_AUTHSIZE:
1984         {
1985             ret = get_errno(setsockopt(sockfd, level, optname,
1986                                        NULL, optlen));
1987             break;
1988         }
1989         default:
1990             goto unimplemented;
1991         }
1992         break;
1993 #endif
1994     case TARGET_SOL_SOCKET:
1995         switch (optname) {
1996         case TARGET_SO_RCVTIMEO:
1997         {
1998                 struct timeval tv;
1999 
2000                 optname = SO_RCVTIMEO;
2001 
2002 set_timeout:
2003                 if (optlen != sizeof(struct target_timeval)) {
2004                     return -TARGET_EINVAL;
2005                 }
2006 
2007                 if (copy_from_user_timeval(&tv, optval_addr)) {
2008                     return -TARGET_EFAULT;
2009                 }
2010 
2011                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2012                                 &tv, sizeof(tv)));
2013                 return ret;
2014         }
2015         case TARGET_SO_SNDTIMEO:
2016                 optname = SO_SNDTIMEO;
2017                 goto set_timeout;
2018         case TARGET_SO_ATTACH_FILTER:
2019         {
2020                 struct target_sock_fprog *tfprog;
2021                 struct target_sock_filter *tfilter;
2022                 struct sock_fprog fprog;
2023                 struct sock_filter *filter;
2024                 int i;
2025 
2026                 if (optlen != sizeof(*tfprog)) {
2027                     return -TARGET_EINVAL;
2028                 }
2029                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2030                     return -TARGET_EFAULT;
2031                 }
2032                 if (!lock_user_struct(VERIFY_READ, tfilter,
2033                                       tswapal(tfprog->filter), 0)) {
2034                     unlock_user_struct(tfprog, optval_addr, 1);
2035                     return -TARGET_EFAULT;
2036                 }
2037 
2038                 fprog.len = tswap16(tfprog->len);
2039                 filter = g_try_new(struct sock_filter, fprog.len);
2040                 if (filter == NULL) {
2041                     unlock_user_struct(tfilter, tfprog->filter, 1);
2042                     unlock_user_struct(tfprog, optval_addr, 1);
2043                     return -TARGET_ENOMEM;
2044                 }
2045                 for (i = 0; i < fprog.len; i++) {
2046                     filter[i].code = tswap16(tfilter[i].code);
2047                     filter[i].jt = tfilter[i].jt;
2048                     filter[i].jf = tfilter[i].jf;
2049                     filter[i].k = tswap32(tfilter[i].k);
2050                 }
2051                 fprog.filter = filter;
2052 
2053                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2054                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2055                 g_free(filter);
2056 
2057                 unlock_user_struct(tfilter, tfprog->filter, 1);
2058                 unlock_user_struct(tfprog, optval_addr, 1);
2059                 return ret;
2060         }
2061 	case TARGET_SO_BINDTODEVICE:
2062 	{
2063 		char *dev_ifname, *addr_ifname;
2064 
2065 		if (optlen > IFNAMSIZ - 1) {
2066 		    optlen = IFNAMSIZ - 1;
2067 		}
2068 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2069 		if (!dev_ifname) {
2070 		    return -TARGET_EFAULT;
2071 		}
2072 		optname = SO_BINDTODEVICE;
2073 		addr_ifname = alloca(IFNAMSIZ);
2074 		memcpy(addr_ifname, dev_ifname, optlen);
2075 		addr_ifname[optlen] = 0;
2076 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2077                                            addr_ifname, optlen));
2078 		unlock_user (dev_ifname, optval_addr, 0);
2079 		return ret;
2080 	}
2081         case TARGET_SO_LINGER:
2082         {
2083                 struct linger lg;
2084                 struct target_linger *tlg;
2085 
2086                 if (optlen != sizeof(struct target_linger)) {
2087                     return -TARGET_EINVAL;
2088                 }
2089                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2090                     return -TARGET_EFAULT;
2091                 }
2092                 __get_user(lg.l_onoff, &tlg->l_onoff);
2093                 __get_user(lg.l_linger, &tlg->l_linger);
2094                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2095                                 &lg, sizeof(lg)));
2096                 unlock_user_struct(tlg, optval_addr, 0);
2097                 return ret;
2098         }
2099             /* Options with 'int' argument.  */
2100         case TARGET_SO_DEBUG:
2101 		optname = SO_DEBUG;
2102 		break;
2103         case TARGET_SO_REUSEADDR:
2104 		optname = SO_REUSEADDR;
2105 		break;
2106 #ifdef SO_REUSEPORT
2107         case TARGET_SO_REUSEPORT:
2108                 optname = SO_REUSEPORT;
2109                 break;
2110 #endif
2111         case TARGET_SO_TYPE:
2112 		optname = SO_TYPE;
2113 		break;
2114         case TARGET_SO_ERROR:
2115 		optname = SO_ERROR;
2116 		break;
2117         case TARGET_SO_DONTROUTE:
2118 		optname = SO_DONTROUTE;
2119 		break;
2120         case TARGET_SO_BROADCAST:
2121 		optname = SO_BROADCAST;
2122 		break;
2123         case TARGET_SO_SNDBUF:
2124 		optname = SO_SNDBUF;
2125 		break;
2126         case TARGET_SO_SNDBUFFORCE:
2127                 optname = SO_SNDBUFFORCE;
2128                 break;
2129         case TARGET_SO_RCVBUF:
2130 		optname = SO_RCVBUF;
2131 		break;
2132         case TARGET_SO_RCVBUFFORCE:
2133                 optname = SO_RCVBUFFORCE;
2134                 break;
2135         case TARGET_SO_KEEPALIVE:
2136 		optname = SO_KEEPALIVE;
2137 		break;
2138         case TARGET_SO_OOBINLINE:
2139 		optname = SO_OOBINLINE;
2140 		break;
2141         case TARGET_SO_NO_CHECK:
2142 		optname = SO_NO_CHECK;
2143 		break;
2144         case TARGET_SO_PRIORITY:
2145 		optname = SO_PRIORITY;
2146 		break;
2147 #ifdef SO_BSDCOMPAT
2148         case TARGET_SO_BSDCOMPAT:
2149 		optname = SO_BSDCOMPAT;
2150 		break;
2151 #endif
2152         case TARGET_SO_PASSCRED:
2153 		optname = SO_PASSCRED;
2154 		break;
2155         case TARGET_SO_PASSSEC:
2156                 optname = SO_PASSSEC;
2157                 break;
2158         case TARGET_SO_TIMESTAMP:
2159 		optname = SO_TIMESTAMP;
2160 		break;
2161         case TARGET_SO_RCVLOWAT:
2162 		optname = SO_RCVLOWAT;
2163 		break;
2164         default:
2165             goto unimplemented;
2166         }
2167 	if (optlen < sizeof(uint32_t))
2168             return -TARGET_EINVAL;
2169 
2170 	if (get_user_u32(val, optval_addr))
2171             return -TARGET_EFAULT;
2172 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2173         break;
2174     default:
2175     unimplemented:
2176         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2177         ret = -TARGET_ENOPROTOOPT;
2178     }
2179     return ret;
2180 }
2181 
2182 /* do_getsockopt() Must return target values and target errnos. */
2183 static abi_long do_getsockopt(int sockfd, int level, int optname,
2184                               abi_ulong optval_addr, abi_ulong optlen)
2185 {
2186     abi_long ret;
2187     int len, val;
2188     socklen_t lv;
2189 
2190     switch(level) {
2191     case TARGET_SOL_SOCKET:
2192         level = SOL_SOCKET;
2193         switch (optname) {
2194         /* These don't just return a single integer */
2195         case TARGET_SO_RCVTIMEO:
2196         case TARGET_SO_SNDTIMEO:
2197         case TARGET_SO_PEERNAME:
2198             goto unimplemented;
2199         case TARGET_SO_PEERCRED: {
2200             struct ucred cr;
2201             socklen_t crlen;
2202             struct target_ucred *tcr;
2203 
2204             if (get_user_u32(len, optlen)) {
2205                 return -TARGET_EFAULT;
2206             }
2207             if (len < 0) {
2208                 return -TARGET_EINVAL;
2209             }
2210 
2211             crlen = sizeof(cr);
2212             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2213                                        &cr, &crlen));
2214             if (ret < 0) {
2215                 return ret;
2216             }
2217             if (len > crlen) {
2218                 len = crlen;
2219             }
2220             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2221                 return -TARGET_EFAULT;
2222             }
2223             __put_user(cr.pid, &tcr->pid);
2224             __put_user(cr.uid, &tcr->uid);
2225             __put_user(cr.gid, &tcr->gid);
2226             unlock_user_struct(tcr, optval_addr, 1);
2227             if (put_user_u32(len, optlen)) {
2228                 return -TARGET_EFAULT;
2229             }
2230             break;
2231         }
2232         case TARGET_SO_LINGER:
2233         {
2234             struct linger lg;
2235             socklen_t lglen;
2236             struct target_linger *tlg;
2237 
2238             if (get_user_u32(len, optlen)) {
2239                 return -TARGET_EFAULT;
2240             }
2241             if (len < 0) {
2242                 return -TARGET_EINVAL;
2243             }
2244 
2245             lglen = sizeof(lg);
2246             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2247                                        &lg, &lglen));
2248             if (ret < 0) {
2249                 return ret;
2250             }
2251             if (len > lglen) {
2252                 len = lglen;
2253             }
2254             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2255                 return -TARGET_EFAULT;
2256             }
2257             __put_user(lg.l_onoff, &tlg->l_onoff);
2258             __put_user(lg.l_linger, &tlg->l_linger);
2259             unlock_user_struct(tlg, optval_addr, 1);
2260             if (put_user_u32(len, optlen)) {
2261                 return -TARGET_EFAULT;
2262             }
2263             break;
2264         }
2265         /* Options with 'int' argument.  */
2266         case TARGET_SO_DEBUG:
2267             optname = SO_DEBUG;
2268             goto int_case;
2269         case TARGET_SO_REUSEADDR:
2270             optname = SO_REUSEADDR;
2271             goto int_case;
2272 #ifdef SO_REUSEPORT
2273         case TARGET_SO_REUSEPORT:
2274             optname = SO_REUSEPORT;
2275             goto int_case;
2276 #endif
2277         case TARGET_SO_TYPE:
2278             optname = SO_TYPE;
2279             goto int_case;
2280         case TARGET_SO_ERROR:
2281             optname = SO_ERROR;
2282             goto int_case;
2283         case TARGET_SO_DONTROUTE:
2284             optname = SO_DONTROUTE;
2285             goto int_case;
2286         case TARGET_SO_BROADCAST:
2287             optname = SO_BROADCAST;
2288             goto int_case;
2289         case TARGET_SO_SNDBUF:
2290             optname = SO_SNDBUF;
2291             goto int_case;
2292         case TARGET_SO_RCVBUF:
2293             optname = SO_RCVBUF;
2294             goto int_case;
2295         case TARGET_SO_KEEPALIVE:
2296             optname = SO_KEEPALIVE;
2297             goto int_case;
2298         case TARGET_SO_OOBINLINE:
2299             optname = SO_OOBINLINE;
2300             goto int_case;
2301         case TARGET_SO_NO_CHECK:
2302             optname = SO_NO_CHECK;
2303             goto int_case;
2304         case TARGET_SO_PRIORITY:
2305             optname = SO_PRIORITY;
2306             goto int_case;
2307 #ifdef SO_BSDCOMPAT
2308         case TARGET_SO_BSDCOMPAT:
2309             optname = SO_BSDCOMPAT;
2310             goto int_case;
2311 #endif
2312         case TARGET_SO_PASSCRED:
2313             optname = SO_PASSCRED;
2314             goto int_case;
2315         case TARGET_SO_TIMESTAMP:
2316             optname = SO_TIMESTAMP;
2317             goto int_case;
2318         case TARGET_SO_RCVLOWAT:
2319             optname = SO_RCVLOWAT;
2320             goto int_case;
2321         case TARGET_SO_ACCEPTCONN:
2322             optname = SO_ACCEPTCONN;
2323             goto int_case;
2324         default:
2325             goto int_case;
2326         }
2327         break;
2328     case SOL_TCP:
2329         /* TCP options all take an 'int' value.  */
2330     int_case:
2331         if (get_user_u32(len, optlen))
2332             return -TARGET_EFAULT;
2333         if (len < 0)
2334             return -TARGET_EINVAL;
2335         lv = sizeof(lv);
2336         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2337         if (ret < 0)
2338             return ret;
2339         if (optname == SO_TYPE) {
2340             val = host_to_target_sock_type(val);
2341         }
2342         if (len > lv)
2343             len = lv;
2344         if (len == 4) {
2345             if (put_user_u32(val, optval_addr))
2346                 return -TARGET_EFAULT;
2347         } else {
2348             if (put_user_u8(val, optval_addr))
2349                 return -TARGET_EFAULT;
2350         }
2351         if (put_user_u32(len, optlen))
2352             return -TARGET_EFAULT;
2353         break;
2354     case SOL_IP:
2355         switch(optname) {
2356         case IP_TOS:
2357         case IP_TTL:
2358         case IP_HDRINCL:
2359         case IP_ROUTER_ALERT:
2360         case IP_RECVOPTS:
2361         case IP_RETOPTS:
2362         case IP_PKTINFO:
2363         case IP_MTU_DISCOVER:
2364         case IP_RECVERR:
2365         case IP_RECVTOS:
2366 #ifdef IP_FREEBIND
2367         case IP_FREEBIND:
2368 #endif
2369         case IP_MULTICAST_TTL:
2370         case IP_MULTICAST_LOOP:
2371             if (get_user_u32(len, optlen))
2372                 return -TARGET_EFAULT;
2373             if (len < 0)
2374                 return -TARGET_EINVAL;
2375             lv = sizeof(lv);
2376             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2377             if (ret < 0)
2378                 return ret;
2379             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2380                 len = 1;
2381                 if (put_user_u32(len, optlen)
2382                     || put_user_u8(val, optval_addr))
2383                     return -TARGET_EFAULT;
2384             } else {
2385                 if (len > sizeof(int))
2386                     len = sizeof(int);
2387                 if (put_user_u32(len, optlen)
2388                     || put_user_u32(val, optval_addr))
2389                     return -TARGET_EFAULT;
2390             }
2391             break;
2392         default:
2393             ret = -TARGET_ENOPROTOOPT;
2394             break;
2395         }
2396         break;
2397     case SOL_IPV6:
2398         switch (optname) {
2399         case IPV6_MTU_DISCOVER:
2400         case IPV6_MTU:
2401         case IPV6_V6ONLY:
2402         case IPV6_RECVPKTINFO:
2403         case IPV6_UNICAST_HOPS:
2404         case IPV6_MULTICAST_HOPS:
2405         case IPV6_MULTICAST_LOOP:
2406         case IPV6_RECVERR:
2407         case IPV6_RECVHOPLIMIT:
2408         case IPV6_2292HOPLIMIT:
2409         case IPV6_CHECKSUM:
2410         case IPV6_ADDRFORM:
2411         case IPV6_2292PKTINFO:
2412         case IPV6_RECVTCLASS:
2413         case IPV6_RECVRTHDR:
2414         case IPV6_2292RTHDR:
2415         case IPV6_RECVHOPOPTS:
2416         case IPV6_2292HOPOPTS:
2417         case IPV6_RECVDSTOPTS:
2418         case IPV6_2292DSTOPTS:
2419         case IPV6_TCLASS:
2420 #ifdef IPV6_RECVPATHMTU
2421         case IPV6_RECVPATHMTU:
2422 #endif
2423 #ifdef IPV6_TRANSPARENT
2424         case IPV6_TRANSPARENT:
2425 #endif
2426 #ifdef IPV6_FREEBIND
2427         case IPV6_FREEBIND:
2428 #endif
2429 #ifdef IPV6_RECVORIGDSTADDR
2430         case IPV6_RECVORIGDSTADDR:
2431 #endif
2432             if (get_user_u32(len, optlen))
2433                 return -TARGET_EFAULT;
2434             if (len < 0)
2435                 return -TARGET_EINVAL;
2436             lv = sizeof(lv);
2437             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2438             if (ret < 0)
2439                 return ret;
2440             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2441                 len = 1;
2442                 if (put_user_u32(len, optlen)
2443                     || put_user_u8(val, optval_addr))
2444                     return -TARGET_EFAULT;
2445             } else {
2446                 if (len > sizeof(int))
2447                     len = sizeof(int);
2448                 if (put_user_u32(len, optlen)
2449                     || put_user_u32(val, optval_addr))
2450                     return -TARGET_EFAULT;
2451             }
2452             break;
2453         default:
2454             ret = -TARGET_ENOPROTOOPT;
2455             break;
2456         }
2457         break;
2458     default:
2459     unimplemented:
2460         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2461                  level, optname);
2462         ret = -TARGET_EOPNOTSUPP;
2463         break;
2464     }
2465     return ret;
2466 }
2467 
2468 /* Convert target low/high pair representing file offset into the host
2469  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2470  * as the kernel doesn't handle them either.
2471  */
2472 static void target_to_host_low_high(abi_ulong tlow,
2473                                     abi_ulong thigh,
2474                                     unsigned long *hlow,
2475                                     unsigned long *hhigh)
2476 {
2477     uint64_t off = tlow |
2478         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2479         TARGET_LONG_BITS / 2;
2480 
2481     *hlow = off;
2482     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2483 }
2484 
2485 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2486                                 abi_ulong count, int copy)
2487 {
2488     struct target_iovec *target_vec;
2489     struct iovec *vec;
2490     abi_ulong total_len, max_len;
2491     int i;
2492     int err = 0;
2493     bool bad_address = false;
2494 
2495     if (count == 0) {
2496         errno = 0;
2497         return NULL;
2498     }
2499     if (count > IOV_MAX) {
2500         errno = EINVAL;
2501         return NULL;
2502     }
2503 
2504     vec = g_try_new0(struct iovec, count);
2505     if (vec == NULL) {
2506         errno = ENOMEM;
2507         return NULL;
2508     }
2509 
2510     target_vec = lock_user(VERIFY_READ, target_addr,
2511                            count * sizeof(struct target_iovec), 1);
2512     if (target_vec == NULL) {
2513         err = EFAULT;
2514         goto fail2;
2515     }
2516 
2517     /* ??? If host page size > target page size, this will result in a
2518        value larger than what we can actually support.  */
2519     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2520     total_len = 0;
2521 
2522     for (i = 0; i < count; i++) {
2523         abi_ulong base = tswapal(target_vec[i].iov_base);
2524         abi_long len = tswapal(target_vec[i].iov_len);
2525 
2526         if (len < 0) {
2527             err = EINVAL;
2528             goto fail;
2529         } else if (len == 0) {
2530             /* Zero length pointer is ignored.  */
2531             vec[i].iov_base = 0;
2532         } else {
2533             vec[i].iov_base = lock_user(type, base, len, copy);
2534             /* If the first buffer pointer is bad, this is a fault.  But
2535              * subsequent bad buffers will result in a partial write; this
2536              * is realized by filling the vector with null pointers and
2537              * zero lengths. */
2538             if (!vec[i].iov_base) {
2539                 if (i == 0) {
2540                     err = EFAULT;
2541                     goto fail;
2542                 } else {
2543                     bad_address = true;
2544                 }
2545             }
2546             if (bad_address) {
2547                 len = 0;
2548             }
2549             if (len > max_len - total_len) {
2550                 len = max_len - total_len;
2551             }
2552         }
2553         vec[i].iov_len = len;
2554         total_len += len;
2555     }
2556 
2557     unlock_user(target_vec, target_addr, 0);
2558     return vec;
2559 
2560  fail:
2561     while (--i >= 0) {
2562         if (tswapal(target_vec[i].iov_len) > 0) {
2563             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2564         }
2565     }
2566     unlock_user(target_vec, target_addr, 0);
2567  fail2:
2568     g_free(vec);
2569     errno = err;
2570     return NULL;
2571 }
2572 
2573 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2574                          abi_ulong count, int copy)
2575 {
2576     struct target_iovec *target_vec;
2577     int i;
2578 
2579     target_vec = lock_user(VERIFY_READ, target_addr,
2580                            count * sizeof(struct target_iovec), 1);
2581     if (target_vec) {
2582         for (i = 0; i < count; i++) {
2583             abi_ulong base = tswapal(target_vec[i].iov_base);
2584             abi_long len = tswapal(target_vec[i].iov_len);
2585             if (len < 0) {
2586                 break;
2587             }
2588             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2589         }
2590         unlock_user(target_vec, target_addr, 0);
2591     }
2592 
2593     g_free(vec);
2594 }
2595 
2596 static inline int target_to_host_sock_type(int *type)
2597 {
2598     int host_type = 0;
2599     int target_type = *type;
2600 
2601     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2602     case TARGET_SOCK_DGRAM:
2603         host_type = SOCK_DGRAM;
2604         break;
2605     case TARGET_SOCK_STREAM:
2606         host_type = SOCK_STREAM;
2607         break;
2608     default:
2609         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2610         break;
2611     }
2612     if (target_type & TARGET_SOCK_CLOEXEC) {
2613 #if defined(SOCK_CLOEXEC)
2614         host_type |= SOCK_CLOEXEC;
2615 #else
2616         return -TARGET_EINVAL;
2617 #endif
2618     }
2619     if (target_type & TARGET_SOCK_NONBLOCK) {
2620 #if defined(SOCK_NONBLOCK)
2621         host_type |= SOCK_NONBLOCK;
2622 #elif !defined(O_NONBLOCK)
2623         return -TARGET_EINVAL;
2624 #endif
2625     }
2626     *type = host_type;
2627     return 0;
2628 }
2629 
2630 /* Try to emulate socket type flags after socket creation.  */
2631 static int sock_flags_fixup(int fd, int target_type)
2632 {
2633 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2634     if (target_type & TARGET_SOCK_NONBLOCK) {
2635         int flags = fcntl(fd, F_GETFL);
2636         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2637             close(fd);
2638             return -TARGET_EINVAL;
2639         }
2640     }
2641 #endif
2642     return fd;
2643 }
2644 
2645 /* do_socket() Must return target values and target errnos. */
2646 static abi_long do_socket(int domain, int type, int protocol)
2647 {
2648     int target_type = type;
2649     int ret;
2650 
2651     ret = target_to_host_sock_type(&type);
2652     if (ret) {
2653         return ret;
2654     }
2655 
2656     if (domain == PF_NETLINK && !(
2657 #ifdef CONFIG_RTNETLINK
2658          protocol == NETLINK_ROUTE ||
2659 #endif
2660          protocol == NETLINK_KOBJECT_UEVENT ||
2661          protocol == NETLINK_AUDIT)) {
2662         return -EPFNOSUPPORT;
2663     }
2664 
2665     if (domain == AF_PACKET ||
2666         (domain == AF_INET && type == SOCK_PACKET)) {
2667         protocol = tswap16(protocol);
2668     }
2669 
2670     ret = get_errno(socket(domain, type, protocol));
2671     if (ret >= 0) {
2672         ret = sock_flags_fixup(ret, target_type);
2673         if (type == SOCK_PACKET) {
2674             /* Manage an obsolete case :
2675              * if socket type is SOCK_PACKET, bind by name
2676              */
2677             fd_trans_register(ret, &target_packet_trans);
2678         } else if (domain == PF_NETLINK) {
2679             switch (protocol) {
2680 #ifdef CONFIG_RTNETLINK
2681             case NETLINK_ROUTE:
2682                 fd_trans_register(ret, &target_netlink_route_trans);
2683                 break;
2684 #endif
2685             case NETLINK_KOBJECT_UEVENT:
2686                 /* nothing to do: messages are strings */
2687                 break;
2688             case NETLINK_AUDIT:
2689                 fd_trans_register(ret, &target_netlink_audit_trans);
2690                 break;
2691             default:
2692                 g_assert_not_reached();
2693             }
2694         }
2695     }
2696     return ret;
2697 }
2698 
2699 /* do_bind() Must return target values and target errnos. */
2700 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2701                         socklen_t addrlen)
2702 {
2703     void *addr;
2704     abi_long ret;
2705 
2706     if ((int)addrlen < 0) {
2707         return -TARGET_EINVAL;
2708     }
2709 
2710     addr = alloca(addrlen+1);
2711 
2712     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2713     if (ret)
2714         return ret;
2715 
2716     return get_errno(bind(sockfd, addr, addrlen));
2717 }
2718 
2719 /* do_connect() Must return target values and target errnos. */
2720 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2721                            socklen_t addrlen)
2722 {
2723     void *addr;
2724     abi_long ret;
2725 
2726     if ((int)addrlen < 0) {
2727         return -TARGET_EINVAL;
2728     }
2729 
2730     addr = alloca(addrlen+1);
2731 
2732     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2733     if (ret)
2734         return ret;
2735 
2736     return get_errno(safe_connect(sockfd, addr, addrlen));
2737 }
2738 
2739 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2740 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2741                                       int flags, int send)
2742 {
2743     abi_long ret, len;
2744     struct msghdr msg;
2745     abi_ulong count;
2746     struct iovec *vec;
2747     abi_ulong target_vec;
2748 
2749     if (msgp->msg_name) {
2750         msg.msg_namelen = tswap32(msgp->msg_namelen);
2751         msg.msg_name = alloca(msg.msg_namelen+1);
2752         ret = target_to_host_sockaddr(fd, msg.msg_name,
2753                                       tswapal(msgp->msg_name),
2754                                       msg.msg_namelen);
2755         if (ret == -TARGET_EFAULT) {
2756             /* For connected sockets msg_name and msg_namelen must
2757              * be ignored, so returning EFAULT immediately is wrong.
2758              * Instead, pass a bad msg_name to the host kernel, and
2759              * let it decide whether to return EFAULT or not.
2760              */
2761             msg.msg_name = (void *)-1;
2762         } else if (ret) {
2763             goto out2;
2764         }
2765     } else {
2766         msg.msg_name = NULL;
2767         msg.msg_namelen = 0;
2768     }
2769     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2770     msg.msg_control = alloca(msg.msg_controllen);
2771     memset(msg.msg_control, 0, msg.msg_controllen);
2772 
2773     msg.msg_flags = tswap32(msgp->msg_flags);
2774 
2775     count = tswapal(msgp->msg_iovlen);
2776     target_vec = tswapal(msgp->msg_iov);
2777 
2778     if (count > IOV_MAX) {
2779         /* sendrcvmsg returns a different errno for this condition than
2780          * readv/writev, so we must catch it here before lock_iovec() does.
2781          */
2782         ret = -TARGET_EMSGSIZE;
2783         goto out2;
2784     }
2785 
2786     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2787                      target_vec, count, send);
2788     if (vec == NULL) {
2789         ret = -host_to_target_errno(errno);
2790         goto out2;
2791     }
2792     msg.msg_iovlen = count;
2793     msg.msg_iov = vec;
2794 
2795     if (send) {
2796         if (fd_trans_target_to_host_data(fd)) {
2797             void *host_msg;
2798 
2799             host_msg = g_malloc(msg.msg_iov->iov_len);
2800             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2801             ret = fd_trans_target_to_host_data(fd)(host_msg,
2802                                                    msg.msg_iov->iov_len);
2803             if (ret >= 0) {
2804                 msg.msg_iov->iov_base = host_msg;
2805                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2806             }
2807             g_free(host_msg);
2808         } else {
2809             ret = target_to_host_cmsg(&msg, msgp);
2810             if (ret == 0) {
2811                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2812             }
2813         }
2814     } else {
2815         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2816         if (!is_error(ret)) {
2817             len = ret;
2818             if (fd_trans_host_to_target_data(fd)) {
2819                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2820                                                MIN(msg.msg_iov->iov_len, len));
2821             } else {
2822                 ret = host_to_target_cmsg(msgp, &msg);
2823             }
2824             if (!is_error(ret)) {
2825                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2826                 msgp->msg_flags = tswap32(msg.msg_flags);
2827                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2828                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2829                                     msg.msg_name, msg.msg_namelen);
2830                     if (ret) {
2831                         goto out;
2832                     }
2833                 }
2834 
2835                 ret = len;
2836             }
2837         }
2838     }
2839 
2840 out:
2841     unlock_iovec(vec, target_vec, count, !send);
2842 out2:
2843     return ret;
2844 }
2845 
2846 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2847                                int flags, int send)
2848 {
2849     abi_long ret;
2850     struct target_msghdr *msgp;
2851 
2852     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2853                           msgp,
2854                           target_msg,
2855                           send ? 1 : 0)) {
2856         return -TARGET_EFAULT;
2857     }
2858     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2859     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2860     return ret;
2861 }
2862 
2863 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2864  * so it might not have this *mmsg-specific flag either.
2865  */
2866 #ifndef MSG_WAITFORONE
2867 #define MSG_WAITFORONE 0x10000
2868 #endif
2869 
2870 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2871                                 unsigned int vlen, unsigned int flags,
2872                                 int send)
2873 {
2874     struct target_mmsghdr *mmsgp;
2875     abi_long ret = 0;
2876     int i;
2877 
2878     if (vlen > UIO_MAXIOV) {
2879         vlen = UIO_MAXIOV;
2880     }
2881 
2882     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2883     if (!mmsgp) {
2884         return -TARGET_EFAULT;
2885     }
2886 
2887     for (i = 0; i < vlen; i++) {
2888         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2889         if (is_error(ret)) {
2890             break;
2891         }
2892         mmsgp[i].msg_len = tswap32(ret);
2893         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2894         if (flags & MSG_WAITFORONE) {
2895             flags |= MSG_DONTWAIT;
2896         }
2897     }
2898 
2899     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2900 
2901     /* Return number of datagrams sent if we sent any at all;
2902      * otherwise return the error.
2903      */
2904     if (i) {
2905         return i;
2906     }
2907     return ret;
2908 }
2909 
2910 /* do_accept4() Must return target values and target errnos. */
2911 static abi_long do_accept4(int fd, abi_ulong target_addr,
2912                            abi_ulong target_addrlen_addr, int flags)
2913 {
2914     socklen_t addrlen, ret_addrlen;
2915     void *addr;
2916     abi_long ret;
2917     int host_flags;
2918 
2919     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2920 
2921     if (target_addr == 0) {
2922         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2923     }
2924 
2925     /* linux returns EINVAL if addrlen pointer is invalid */
2926     if (get_user_u32(addrlen, target_addrlen_addr))
2927         return -TARGET_EINVAL;
2928 
2929     if ((int)addrlen < 0) {
2930         return -TARGET_EINVAL;
2931     }
2932 
2933     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2934         return -TARGET_EINVAL;
2935 
2936     addr = alloca(addrlen);
2937 
2938     ret_addrlen = addrlen;
2939     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2940     if (!is_error(ret)) {
2941         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2942         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2943             ret = -TARGET_EFAULT;
2944         }
2945     }
2946     return ret;
2947 }
2948 
2949 /* do_getpeername() Must return target values and target errnos. */
2950 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2951                                abi_ulong target_addrlen_addr)
2952 {
2953     socklen_t addrlen, ret_addrlen;
2954     void *addr;
2955     abi_long ret;
2956 
2957     if (get_user_u32(addrlen, target_addrlen_addr))
2958         return -TARGET_EFAULT;
2959 
2960     if ((int)addrlen < 0) {
2961         return -TARGET_EINVAL;
2962     }
2963 
2964     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2965         return -TARGET_EFAULT;
2966 
2967     addr = alloca(addrlen);
2968 
2969     ret_addrlen = addrlen;
2970     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2971     if (!is_error(ret)) {
2972         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2973         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2974             ret = -TARGET_EFAULT;
2975         }
2976     }
2977     return ret;
2978 }
2979 
2980 /* do_getsockname() Must return target values and target errnos. */
2981 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2982                                abi_ulong target_addrlen_addr)
2983 {
2984     socklen_t addrlen, ret_addrlen;
2985     void *addr;
2986     abi_long ret;
2987 
2988     if (get_user_u32(addrlen, target_addrlen_addr))
2989         return -TARGET_EFAULT;
2990 
2991     if ((int)addrlen < 0) {
2992         return -TARGET_EINVAL;
2993     }
2994 
2995     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2996         return -TARGET_EFAULT;
2997 
2998     addr = alloca(addrlen);
2999 
3000     ret_addrlen = addrlen;
3001     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3002     if (!is_error(ret)) {
3003         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3004         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3005             ret = -TARGET_EFAULT;
3006         }
3007     }
3008     return ret;
3009 }
3010 
3011 /* do_socketpair() Must return target values and target errnos. */
3012 static abi_long do_socketpair(int domain, int type, int protocol,
3013                               abi_ulong target_tab_addr)
3014 {
3015     int tab[2];
3016     abi_long ret;
3017 
3018     target_to_host_sock_type(&type);
3019 
3020     ret = get_errno(socketpair(domain, type, protocol, tab));
3021     if (!is_error(ret)) {
3022         if (put_user_s32(tab[0], target_tab_addr)
3023             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3024             ret = -TARGET_EFAULT;
3025     }
3026     return ret;
3027 }
3028 
3029 /* do_sendto() Must return target values and target errnos. */
3030 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3031                           abi_ulong target_addr, socklen_t addrlen)
3032 {
3033     void *addr;
3034     void *host_msg;
3035     void *copy_msg = NULL;
3036     abi_long ret;
3037 
3038     if ((int)addrlen < 0) {
3039         return -TARGET_EINVAL;
3040     }
3041 
3042     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3043     if (!host_msg)
3044         return -TARGET_EFAULT;
3045     if (fd_trans_target_to_host_data(fd)) {
3046         copy_msg = host_msg;
3047         host_msg = g_malloc(len);
3048         memcpy(host_msg, copy_msg, len);
3049         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3050         if (ret < 0) {
3051             goto fail;
3052         }
3053     }
3054     if (target_addr) {
3055         addr = alloca(addrlen+1);
3056         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3057         if (ret) {
3058             goto fail;
3059         }
3060         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3061     } else {
3062         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3063     }
3064 fail:
3065     if (copy_msg) {
3066         g_free(host_msg);
3067         host_msg = copy_msg;
3068     }
3069     unlock_user(host_msg, msg, 0);
3070     return ret;
3071 }
3072 
3073 /* do_recvfrom() Must return target values and target errnos. */
3074 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3075                             abi_ulong target_addr,
3076                             abi_ulong target_addrlen)
3077 {
3078     socklen_t addrlen, ret_addrlen;
3079     void *addr;
3080     void *host_msg;
3081     abi_long ret;
3082 
3083     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3084     if (!host_msg)
3085         return -TARGET_EFAULT;
3086     if (target_addr) {
3087         if (get_user_u32(addrlen, target_addrlen)) {
3088             ret = -TARGET_EFAULT;
3089             goto fail;
3090         }
3091         if ((int)addrlen < 0) {
3092             ret = -TARGET_EINVAL;
3093             goto fail;
3094         }
3095         addr = alloca(addrlen);
3096         ret_addrlen = addrlen;
3097         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3098                                       addr, &ret_addrlen));
3099     } else {
3100         addr = NULL; /* To keep compiler quiet.  */
3101         addrlen = 0; /* To keep compiler quiet.  */
3102         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3103     }
3104     if (!is_error(ret)) {
3105         if (fd_trans_host_to_target_data(fd)) {
3106             abi_long trans;
3107             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3108             if (is_error(trans)) {
3109                 ret = trans;
3110                 goto fail;
3111             }
3112         }
3113         if (target_addr) {
3114             host_to_target_sockaddr(target_addr, addr,
3115                                     MIN(addrlen, ret_addrlen));
3116             if (put_user_u32(ret_addrlen, target_addrlen)) {
3117                 ret = -TARGET_EFAULT;
3118                 goto fail;
3119             }
3120         }
3121         unlock_user(host_msg, msg, len);
3122     } else {
3123 fail:
3124         unlock_user(host_msg, msg, 0);
3125     }
3126     return ret;
3127 }
3128 
3129 #ifdef TARGET_NR_socketcall
3130 /* do_socketcall() must return target values and target errnos. */
3131 static abi_long do_socketcall(int num, abi_ulong vptr)
3132 {
3133     static const unsigned nargs[] = { /* number of arguments per operation */
3134         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3135         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3136         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3137         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3138         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3139         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3140         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3141         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3142         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3143         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3144         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3145         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3146         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3147         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3148         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3149         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3150         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3151         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3152         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3153         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3154     };
3155     abi_long a[6]; /* max 6 args */
3156     unsigned i;
3157 
3158     /* check the range of the first argument num */
3159     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3160     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3161         return -TARGET_EINVAL;
3162     }
3163     /* ensure we have space for args */
3164     if (nargs[num] > ARRAY_SIZE(a)) {
3165         return -TARGET_EINVAL;
3166     }
3167     /* collect the arguments in a[] according to nargs[] */
3168     for (i = 0; i < nargs[num]; ++i) {
3169         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3170             return -TARGET_EFAULT;
3171         }
3172     }
3173     /* now when we have the args, invoke the appropriate underlying function */
3174     switch (num) {
3175     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3176         return do_socket(a[0], a[1], a[2]);
3177     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3178         return do_bind(a[0], a[1], a[2]);
3179     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3180         return do_connect(a[0], a[1], a[2]);
3181     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3182         return get_errno(listen(a[0], a[1]));
3183     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3184         return do_accept4(a[0], a[1], a[2], 0);
3185     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3186         return do_getsockname(a[0], a[1], a[2]);
3187     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3188         return do_getpeername(a[0], a[1], a[2]);
3189     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3190         return do_socketpair(a[0], a[1], a[2], a[3]);
3191     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3192         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3193     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3194         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3195     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3196         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3197     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3198         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3199     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3200         return get_errno(shutdown(a[0], a[1]));
3201     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3202         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3203     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3204         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3205     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3206         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3207     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3208         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3209     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3210         return do_accept4(a[0], a[1], a[2], a[3]);
3211     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3212         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3213     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3214         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3215     default:
3216         gemu_log("Unsupported socketcall: %d\n", num);
3217         return -TARGET_EINVAL;
3218     }
3219 }
3220 #endif
3221 
3222 #define N_SHM_REGIONS	32
3223 
3224 static struct shm_region {
3225     abi_ulong start;
3226     abi_ulong size;
3227     bool in_use;
3228 } shm_regions[N_SHM_REGIONS];
3229 
3230 #ifndef TARGET_SEMID64_DS
3231 /* asm-generic version of this struct */
3232 struct target_semid64_ds
3233 {
3234   struct target_ipc_perm sem_perm;
3235   abi_ulong sem_otime;
3236 #if TARGET_ABI_BITS == 32
3237   abi_ulong __unused1;
3238 #endif
3239   abi_ulong sem_ctime;
3240 #if TARGET_ABI_BITS == 32
3241   abi_ulong __unused2;
3242 #endif
3243   abi_ulong sem_nsems;
3244   abi_ulong __unused3;
3245   abi_ulong __unused4;
3246 };
3247 #endif
3248 
3249 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3250                                                abi_ulong target_addr)
3251 {
3252     struct target_ipc_perm *target_ip;
3253     struct target_semid64_ds *target_sd;
3254 
3255     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3256         return -TARGET_EFAULT;
3257     target_ip = &(target_sd->sem_perm);
3258     host_ip->__key = tswap32(target_ip->__key);
3259     host_ip->uid = tswap32(target_ip->uid);
3260     host_ip->gid = tswap32(target_ip->gid);
3261     host_ip->cuid = tswap32(target_ip->cuid);
3262     host_ip->cgid = tswap32(target_ip->cgid);
3263 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3264     host_ip->mode = tswap32(target_ip->mode);
3265 #else
3266     host_ip->mode = tswap16(target_ip->mode);
3267 #endif
3268 #if defined(TARGET_PPC)
3269     host_ip->__seq = tswap32(target_ip->__seq);
3270 #else
3271     host_ip->__seq = tswap16(target_ip->__seq);
3272 #endif
3273     unlock_user_struct(target_sd, target_addr, 0);
3274     return 0;
3275 }
3276 
3277 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3278                                                struct ipc_perm *host_ip)
3279 {
3280     struct target_ipc_perm *target_ip;
3281     struct target_semid64_ds *target_sd;
3282 
3283     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3284         return -TARGET_EFAULT;
3285     target_ip = &(target_sd->sem_perm);
3286     target_ip->__key = tswap32(host_ip->__key);
3287     target_ip->uid = tswap32(host_ip->uid);
3288     target_ip->gid = tswap32(host_ip->gid);
3289     target_ip->cuid = tswap32(host_ip->cuid);
3290     target_ip->cgid = tswap32(host_ip->cgid);
3291 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3292     target_ip->mode = tswap32(host_ip->mode);
3293 #else
3294     target_ip->mode = tswap16(host_ip->mode);
3295 #endif
3296 #if defined(TARGET_PPC)
3297     target_ip->__seq = tswap32(host_ip->__seq);
3298 #else
3299     target_ip->__seq = tswap16(host_ip->__seq);
3300 #endif
3301     unlock_user_struct(target_sd, target_addr, 1);
3302     return 0;
3303 }
3304 
3305 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3306                                                abi_ulong target_addr)
3307 {
3308     struct target_semid64_ds *target_sd;
3309 
3310     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3311         return -TARGET_EFAULT;
3312     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3313         return -TARGET_EFAULT;
3314     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3315     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3316     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3317     unlock_user_struct(target_sd, target_addr, 0);
3318     return 0;
3319 }
3320 
3321 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3322                                                struct semid_ds *host_sd)
3323 {
3324     struct target_semid64_ds *target_sd;
3325 
3326     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3327         return -TARGET_EFAULT;
3328     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3329         return -TARGET_EFAULT;
3330     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3331     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3332     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3333     unlock_user_struct(target_sd, target_addr, 1);
3334     return 0;
3335 }
3336 
3337 struct target_seminfo {
3338     int semmap;
3339     int semmni;
3340     int semmns;
3341     int semmnu;
3342     int semmsl;
3343     int semopm;
3344     int semume;
3345     int semusz;
3346     int semvmx;
3347     int semaem;
3348 };
3349 
3350 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3351                                               struct seminfo *host_seminfo)
3352 {
3353     struct target_seminfo *target_seminfo;
3354     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3355         return -TARGET_EFAULT;
3356     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3357     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3358     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3359     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3360     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3361     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3362     __put_user(host_seminfo->semume, &target_seminfo->semume);
3363     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3364     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3365     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3366     unlock_user_struct(target_seminfo, target_addr, 1);
3367     return 0;
3368 }
3369 
3370 union semun {
3371 	int val;
3372 	struct semid_ds *buf;
3373 	unsigned short *array;
3374 	struct seminfo *__buf;
3375 };
3376 
3377 union target_semun {
3378 	int val;
3379 	abi_ulong buf;
3380 	abi_ulong array;
3381 	abi_ulong __buf;
3382 };
3383 
3384 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3385                                                abi_ulong target_addr)
3386 {
3387     int nsems;
3388     unsigned short *array;
3389     union semun semun;
3390     struct semid_ds semid_ds;
3391     int i, ret;
3392 
3393     semun.buf = &semid_ds;
3394 
3395     ret = semctl(semid, 0, IPC_STAT, semun);
3396     if (ret == -1)
3397         return get_errno(ret);
3398 
3399     nsems = semid_ds.sem_nsems;
3400 
3401     *host_array = g_try_new(unsigned short, nsems);
3402     if (!*host_array) {
3403         return -TARGET_ENOMEM;
3404     }
3405     array = lock_user(VERIFY_READ, target_addr,
3406                       nsems*sizeof(unsigned short), 1);
3407     if (!array) {
3408         g_free(*host_array);
3409         return -TARGET_EFAULT;
3410     }
3411 
3412     for(i=0; i<nsems; i++) {
3413         __get_user((*host_array)[i], &array[i]);
3414     }
3415     unlock_user(array, target_addr, 0);
3416 
3417     return 0;
3418 }
3419 
3420 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3421                                                unsigned short **host_array)
3422 {
3423     int nsems;
3424     unsigned short *array;
3425     union semun semun;
3426     struct semid_ds semid_ds;
3427     int i, ret;
3428 
3429     semun.buf = &semid_ds;
3430 
3431     ret = semctl(semid, 0, IPC_STAT, semun);
3432     if (ret == -1)
3433         return get_errno(ret);
3434 
3435     nsems = semid_ds.sem_nsems;
3436 
3437     array = lock_user(VERIFY_WRITE, target_addr,
3438                       nsems*sizeof(unsigned short), 0);
3439     if (!array)
3440         return -TARGET_EFAULT;
3441 
3442     for(i=0; i<nsems; i++) {
3443         __put_user((*host_array)[i], &array[i]);
3444     }
3445     g_free(*host_array);
3446     unlock_user(array, target_addr, 1);
3447 
3448     return 0;
3449 }
3450 
3451 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3452                                  abi_ulong target_arg)
3453 {
3454     union target_semun target_su = { .buf = target_arg };
3455     union semun arg;
3456     struct semid_ds dsarg;
3457     unsigned short *array = NULL;
3458     struct seminfo seminfo;
3459     abi_long ret = -TARGET_EINVAL;
3460     abi_long err;
3461     cmd &= 0xff;
3462 
3463     switch( cmd ) {
3464 	case GETVAL:
3465 	case SETVAL:
3466             /* In 64 bit cross-endian situations, we will erroneously pick up
3467              * the wrong half of the union for the "val" element.  To rectify
3468              * this, the entire 8-byte structure is byteswapped, followed by
3469 	     * a swap of the 4 byte val field. In other cases, the data is
3470 	     * already in proper host byte order. */
3471 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3472 		target_su.buf = tswapal(target_su.buf);
3473 		arg.val = tswap32(target_su.val);
3474 	    } else {
3475 		arg.val = target_su.val;
3476 	    }
3477             ret = get_errno(semctl(semid, semnum, cmd, arg));
3478             break;
3479 	case GETALL:
3480 	case SETALL:
3481             err = target_to_host_semarray(semid, &array, target_su.array);
3482             if (err)
3483                 return err;
3484             arg.array = array;
3485             ret = get_errno(semctl(semid, semnum, cmd, arg));
3486             err = host_to_target_semarray(semid, target_su.array, &array);
3487             if (err)
3488                 return err;
3489             break;
3490 	case IPC_STAT:
3491 	case IPC_SET:
3492 	case SEM_STAT:
3493             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3494             if (err)
3495                 return err;
3496             arg.buf = &dsarg;
3497             ret = get_errno(semctl(semid, semnum, cmd, arg));
3498             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3499             if (err)
3500                 return err;
3501             break;
3502 	case IPC_INFO:
3503 	case SEM_INFO:
3504             arg.__buf = &seminfo;
3505             ret = get_errno(semctl(semid, semnum, cmd, arg));
3506             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3507             if (err)
3508                 return err;
3509             break;
3510 	case IPC_RMID:
3511 	case GETPID:
3512 	case GETNCNT:
3513 	case GETZCNT:
3514             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3515             break;
3516     }
3517 
3518     return ret;
3519 }
3520 
3521 struct target_sembuf {
3522     unsigned short sem_num;
3523     short sem_op;
3524     short sem_flg;
3525 };
3526 
3527 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3528                                              abi_ulong target_addr,
3529                                              unsigned nsops)
3530 {
3531     struct target_sembuf *target_sembuf;
3532     int i;
3533 
3534     target_sembuf = lock_user(VERIFY_READ, target_addr,
3535                               nsops*sizeof(struct target_sembuf), 1);
3536     if (!target_sembuf)
3537         return -TARGET_EFAULT;
3538 
3539     for(i=0; i<nsops; i++) {
3540         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3541         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3542         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3543     }
3544 
3545     unlock_user(target_sembuf, target_addr, 0);
3546 
3547     return 0;
3548 }
3549 
3550 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3551 {
3552     struct sembuf sops[nsops];
3553     abi_long ret;
3554 
3555     if (target_to_host_sembuf(sops, ptr, nsops))
3556         return -TARGET_EFAULT;
3557 
3558     ret = -TARGET_ENOSYS;
3559 #ifdef __NR_semtimedop
3560     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3561 #endif
3562 #ifdef __NR_ipc
3563     if (ret == -TARGET_ENOSYS) {
3564         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3565     }
3566 #endif
3567     return ret;
3568 }
3569 
3570 struct target_msqid_ds
3571 {
3572     struct target_ipc_perm msg_perm;
3573     abi_ulong msg_stime;
3574 #if TARGET_ABI_BITS == 32
3575     abi_ulong __unused1;
3576 #endif
3577     abi_ulong msg_rtime;
3578 #if TARGET_ABI_BITS == 32
3579     abi_ulong __unused2;
3580 #endif
3581     abi_ulong msg_ctime;
3582 #if TARGET_ABI_BITS == 32
3583     abi_ulong __unused3;
3584 #endif
3585     abi_ulong __msg_cbytes;
3586     abi_ulong msg_qnum;
3587     abi_ulong msg_qbytes;
3588     abi_ulong msg_lspid;
3589     abi_ulong msg_lrpid;
3590     abi_ulong __unused4;
3591     abi_ulong __unused5;
3592 };
3593 
3594 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3595                                                abi_ulong target_addr)
3596 {
3597     struct target_msqid_ds *target_md;
3598 
3599     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3600         return -TARGET_EFAULT;
3601     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3602         return -TARGET_EFAULT;
3603     host_md->msg_stime = tswapal(target_md->msg_stime);
3604     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3605     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3606     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3607     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3608     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3609     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3610     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3611     unlock_user_struct(target_md, target_addr, 0);
3612     return 0;
3613 }
3614 
3615 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3616                                                struct msqid_ds *host_md)
3617 {
3618     struct target_msqid_ds *target_md;
3619 
3620     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3621         return -TARGET_EFAULT;
3622     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3623         return -TARGET_EFAULT;
3624     target_md->msg_stime = tswapal(host_md->msg_stime);
3625     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3626     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3627     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3628     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3629     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3630     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3631     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3632     unlock_user_struct(target_md, target_addr, 1);
3633     return 0;
3634 }
3635 
3636 struct target_msginfo {
3637     int msgpool;
3638     int msgmap;
3639     int msgmax;
3640     int msgmnb;
3641     int msgmni;
3642     int msgssz;
3643     int msgtql;
3644     unsigned short int msgseg;
3645 };
3646 
3647 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3648                                               struct msginfo *host_msginfo)
3649 {
3650     struct target_msginfo *target_msginfo;
3651     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3652         return -TARGET_EFAULT;
3653     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3654     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3655     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3656     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3657     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3658     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3659     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3660     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3661     unlock_user_struct(target_msginfo, target_addr, 1);
3662     return 0;
3663 }
3664 
3665 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3666 {
3667     struct msqid_ds dsarg;
3668     struct msginfo msginfo;
3669     abi_long ret = -TARGET_EINVAL;
3670 
3671     cmd &= 0xff;
3672 
3673     switch (cmd) {
3674     case IPC_STAT:
3675     case IPC_SET:
3676     case MSG_STAT:
3677         if (target_to_host_msqid_ds(&dsarg,ptr))
3678             return -TARGET_EFAULT;
3679         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3680         if (host_to_target_msqid_ds(ptr,&dsarg))
3681             return -TARGET_EFAULT;
3682         break;
3683     case IPC_RMID:
3684         ret = get_errno(msgctl(msgid, cmd, NULL));
3685         break;
3686     case IPC_INFO:
3687     case MSG_INFO:
3688         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3689         if (host_to_target_msginfo(ptr, &msginfo))
3690             return -TARGET_EFAULT;
3691         break;
3692     }
3693 
3694     return ret;
3695 }
3696 
3697 struct target_msgbuf {
3698     abi_long mtype;
3699     char	mtext[1];
3700 };
3701 
3702 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3703                                  ssize_t msgsz, int msgflg)
3704 {
3705     struct target_msgbuf *target_mb;
3706     struct msgbuf *host_mb;
3707     abi_long ret = 0;
3708 
3709     if (msgsz < 0) {
3710         return -TARGET_EINVAL;
3711     }
3712 
3713     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3714         return -TARGET_EFAULT;
3715     host_mb = g_try_malloc(msgsz + sizeof(long));
3716     if (!host_mb) {
3717         unlock_user_struct(target_mb, msgp, 0);
3718         return -TARGET_ENOMEM;
3719     }
3720     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3721     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3722     ret = -TARGET_ENOSYS;
3723 #ifdef __NR_msgsnd
3724     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3725 #endif
3726 #ifdef __NR_ipc
3727     if (ret == -TARGET_ENOSYS) {
3728         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3729                                  host_mb, 0));
3730     }
3731 #endif
3732     g_free(host_mb);
3733     unlock_user_struct(target_mb, msgp, 0);
3734 
3735     return ret;
3736 }
3737 
3738 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3739                                  ssize_t msgsz, abi_long msgtyp,
3740                                  int msgflg)
3741 {
3742     struct target_msgbuf *target_mb;
3743     char *target_mtext;
3744     struct msgbuf *host_mb;
3745     abi_long ret = 0;
3746 
3747     if (msgsz < 0) {
3748         return -TARGET_EINVAL;
3749     }
3750 
3751     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3752         return -TARGET_EFAULT;
3753 
3754     host_mb = g_try_malloc(msgsz + sizeof(long));
3755     if (!host_mb) {
3756         ret = -TARGET_ENOMEM;
3757         goto end;
3758     }
3759     ret = -TARGET_ENOSYS;
3760 #ifdef __NR_msgrcv
3761     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3762 #endif
3763 #ifdef __NR_ipc
3764     if (ret == -TARGET_ENOSYS) {
3765         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3766                         msgflg, host_mb, msgtyp));
3767     }
3768 #endif
3769 
3770     if (ret > 0) {
3771         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3772         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3773         if (!target_mtext) {
3774             ret = -TARGET_EFAULT;
3775             goto end;
3776         }
3777         memcpy(target_mb->mtext, host_mb->mtext, ret);
3778         unlock_user(target_mtext, target_mtext_addr, ret);
3779     }
3780 
3781     target_mb->mtype = tswapal(host_mb->mtype);
3782 
3783 end:
3784     if (target_mb)
3785         unlock_user_struct(target_mb, msgp, 1);
3786     g_free(host_mb);
3787     return ret;
3788 }
3789 
3790 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3791                                                abi_ulong target_addr)
3792 {
3793     struct target_shmid_ds *target_sd;
3794 
3795     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3796         return -TARGET_EFAULT;
3797     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3798         return -TARGET_EFAULT;
3799     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3800     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3801     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3802     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3803     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3804     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3805     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3806     unlock_user_struct(target_sd, target_addr, 0);
3807     return 0;
3808 }
3809 
3810 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3811                                                struct shmid_ds *host_sd)
3812 {
3813     struct target_shmid_ds *target_sd;
3814 
3815     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3816         return -TARGET_EFAULT;
3817     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3818         return -TARGET_EFAULT;
3819     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3820     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3821     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3822     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3823     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3824     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3825     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3826     unlock_user_struct(target_sd, target_addr, 1);
3827     return 0;
3828 }
3829 
3830 struct  target_shminfo {
3831     abi_ulong shmmax;
3832     abi_ulong shmmin;
3833     abi_ulong shmmni;
3834     abi_ulong shmseg;
3835     abi_ulong shmall;
3836 };
3837 
3838 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3839                                               struct shminfo *host_shminfo)
3840 {
3841     struct target_shminfo *target_shminfo;
3842     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3843         return -TARGET_EFAULT;
3844     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3845     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3846     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3847     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3848     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3849     unlock_user_struct(target_shminfo, target_addr, 1);
3850     return 0;
3851 }
3852 
3853 struct target_shm_info {
3854     int used_ids;
3855     abi_ulong shm_tot;
3856     abi_ulong shm_rss;
3857     abi_ulong shm_swp;
3858     abi_ulong swap_attempts;
3859     abi_ulong swap_successes;
3860 };
3861 
3862 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3863                                                struct shm_info *host_shm_info)
3864 {
3865     struct target_shm_info *target_shm_info;
3866     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3867         return -TARGET_EFAULT;
3868     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3869     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3870     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3871     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3872     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3873     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3874     unlock_user_struct(target_shm_info, target_addr, 1);
3875     return 0;
3876 }
3877 
3878 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3879 {
3880     struct shmid_ds dsarg;
3881     struct shminfo shminfo;
3882     struct shm_info shm_info;
3883     abi_long ret = -TARGET_EINVAL;
3884 
3885     cmd &= 0xff;
3886 
3887     switch(cmd) {
3888     case IPC_STAT:
3889     case IPC_SET:
3890     case SHM_STAT:
3891         if (target_to_host_shmid_ds(&dsarg, buf))
3892             return -TARGET_EFAULT;
3893         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3894         if (host_to_target_shmid_ds(buf, &dsarg))
3895             return -TARGET_EFAULT;
3896         break;
3897     case IPC_INFO:
3898         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3899         if (host_to_target_shminfo(buf, &shminfo))
3900             return -TARGET_EFAULT;
3901         break;
3902     case SHM_INFO:
3903         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3904         if (host_to_target_shm_info(buf, &shm_info))
3905             return -TARGET_EFAULT;
3906         break;
3907     case IPC_RMID:
3908     case SHM_LOCK:
3909     case SHM_UNLOCK:
3910         ret = get_errno(shmctl(shmid, cmd, NULL));
3911         break;
3912     }
3913 
3914     return ret;
3915 }
3916 
3917 #ifndef TARGET_FORCE_SHMLBA
3918 /* For most architectures, SHMLBA is the same as the page size;
3919  * some architectures have larger values, in which case they should
3920  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3921  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3922  * and defining its own value for SHMLBA.
3923  *
3924  * The kernel also permits SHMLBA to be set by the architecture to a
3925  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3926  * this means that addresses are rounded to the large size if
3927  * SHM_RND is set but addresses not aligned to that size are not rejected
3928  * as long as they are at least page-aligned. Since the only architecture
3929  * which uses this is ia64 this code doesn't provide for that oddity.
3930  */
3931 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3932 {
3933     return TARGET_PAGE_SIZE;
3934 }
3935 #endif
3936 
3937 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3938                                  int shmid, abi_ulong shmaddr, int shmflg)
3939 {
3940     abi_long raddr;
3941     void *host_raddr;
3942     struct shmid_ds shm_info;
3943     int i,ret;
3944     abi_ulong shmlba;
3945 
3946     /* find out the length of the shared memory segment */
3947     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3948     if (is_error(ret)) {
3949         /* can't get length, bail out */
3950         return ret;
3951     }
3952 
3953     shmlba = target_shmlba(cpu_env);
3954 
3955     if (shmaddr & (shmlba - 1)) {
3956         if (shmflg & SHM_RND) {
3957             shmaddr &= ~(shmlba - 1);
3958         } else {
3959             return -TARGET_EINVAL;
3960         }
3961     }
3962     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3963         return -TARGET_EINVAL;
3964     }
3965 
3966     mmap_lock();
3967 
3968     if (shmaddr)
3969         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3970     else {
3971         abi_ulong mmap_start;
3972 
3973         /* In order to use the host shmat, we need to honor host SHMLBA.  */
3974         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
3975 
3976         if (mmap_start == -1) {
3977             errno = ENOMEM;
3978             host_raddr = (void *)-1;
3979         } else
3980             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3981     }
3982 
3983     if (host_raddr == (void *)-1) {
3984         mmap_unlock();
3985         return get_errno((long)host_raddr);
3986     }
3987     raddr=h2g((unsigned long)host_raddr);
3988 
3989     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3990                    PAGE_VALID | PAGE_READ |
3991                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3992 
3993     for (i = 0; i < N_SHM_REGIONS; i++) {
3994         if (!shm_regions[i].in_use) {
3995             shm_regions[i].in_use = true;
3996             shm_regions[i].start = raddr;
3997             shm_regions[i].size = shm_info.shm_segsz;
3998             break;
3999         }
4000     }
4001 
4002     mmap_unlock();
4003     return raddr;
4004 
4005 }
4006 
4007 static inline abi_long do_shmdt(abi_ulong shmaddr)
4008 {
4009     int i;
4010     abi_long rv;
4011 
4012     mmap_lock();
4013 
4014     for (i = 0; i < N_SHM_REGIONS; ++i) {
4015         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4016             shm_regions[i].in_use = false;
4017             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4018             break;
4019         }
4020     }
4021     rv = get_errno(shmdt(g2h(shmaddr)));
4022 
4023     mmap_unlock();
4024 
4025     return rv;
4026 }
4027 
4028 #ifdef TARGET_NR_ipc
4029 /* ??? This only works with linear mappings.  */
4030 /* do_ipc() must return target values and target errnos. */
4031 static abi_long do_ipc(CPUArchState *cpu_env,
4032                        unsigned int call, abi_long first,
4033                        abi_long second, abi_long third,
4034                        abi_long ptr, abi_long fifth)
4035 {
4036     int version;
4037     abi_long ret = 0;
4038 
4039     version = call >> 16;
4040     call &= 0xffff;
4041 
4042     switch (call) {
4043     case IPCOP_semop:
4044         ret = do_semop(first, ptr, second);
4045         break;
4046 
4047     case IPCOP_semget:
4048         ret = get_errno(semget(first, second, third));
4049         break;
4050 
4051     case IPCOP_semctl: {
4052         /* The semun argument to semctl is passed by value, so dereference the
4053          * ptr argument. */
4054         abi_ulong atptr;
4055         get_user_ual(atptr, ptr);
4056         ret = do_semctl(first, second, third, atptr);
4057         break;
4058     }
4059 
4060     case IPCOP_msgget:
4061         ret = get_errno(msgget(first, second));
4062         break;
4063 
4064     case IPCOP_msgsnd:
4065         ret = do_msgsnd(first, ptr, second, third);
4066         break;
4067 
4068     case IPCOP_msgctl:
4069         ret = do_msgctl(first, second, ptr);
4070         break;
4071 
4072     case IPCOP_msgrcv:
4073         switch (version) {
4074         case 0:
4075             {
4076                 struct target_ipc_kludge {
4077                     abi_long msgp;
4078                     abi_long msgtyp;
4079                 } *tmp;
4080 
4081                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4082                     ret = -TARGET_EFAULT;
4083                     break;
4084                 }
4085 
4086                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4087 
4088                 unlock_user_struct(tmp, ptr, 0);
4089                 break;
4090             }
4091         default:
4092             ret = do_msgrcv(first, ptr, second, fifth, third);
4093         }
4094         break;
4095 
4096     case IPCOP_shmat:
4097         switch (version) {
4098         default:
4099         {
4100             abi_ulong raddr;
4101             raddr = do_shmat(cpu_env, first, ptr, second);
4102             if (is_error(raddr))
4103                 return get_errno(raddr);
4104             if (put_user_ual(raddr, third))
4105                 return -TARGET_EFAULT;
4106             break;
4107         }
4108         case 1:
4109             ret = -TARGET_EINVAL;
4110             break;
4111         }
4112 	break;
4113     case IPCOP_shmdt:
4114         ret = do_shmdt(ptr);
4115 	break;
4116 
4117     case IPCOP_shmget:
4118 	/* IPC_* flag values are the same on all linux platforms */
4119 	ret = get_errno(shmget(first, second, third));
4120 	break;
4121 
4122 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4123     case IPCOP_shmctl:
4124         ret = do_shmctl(first, second, ptr);
4125         break;
4126     default:
4127 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4128 	ret = -TARGET_ENOSYS;
4129 	break;
4130     }
4131     return ret;
4132 }
4133 #endif
4134 
4135 /* kernel structure types definitions */
4136 
4137 #define STRUCT(name, ...) STRUCT_ ## name,
4138 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4139 enum {
4140 #include "syscall_types.h"
4141 STRUCT_MAX
4142 };
4143 #undef STRUCT
4144 #undef STRUCT_SPECIAL
4145 
4146 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4147 #define STRUCT_SPECIAL(name)
4148 #include "syscall_types.h"
4149 #undef STRUCT
4150 #undef STRUCT_SPECIAL
4151 
4152 typedef struct IOCTLEntry IOCTLEntry;
4153 
4154 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4155                              int fd, int cmd, abi_long arg);
4156 
4157 struct IOCTLEntry {
4158     int target_cmd;
4159     unsigned int host_cmd;
4160     const char *name;
4161     int access;
4162     do_ioctl_fn *do_ioctl;
4163     const argtype arg_type[5];
4164 };
4165 
4166 #define IOC_R 0x0001
4167 #define IOC_W 0x0002
4168 #define IOC_RW (IOC_R | IOC_W)
4169 
4170 #define MAX_STRUCT_SIZE 4096
4171 
4172 #ifdef CONFIG_FIEMAP
4173 /* So fiemap access checks don't overflow on 32 bit systems.
4174  * This is very slightly smaller than the limit imposed by
4175  * the underlying kernel.
4176  */
4177 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4178                             / sizeof(struct fiemap_extent))
4179 
4180 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4181                                        int fd, int cmd, abi_long arg)
4182 {
4183     /* The parameter for this ioctl is a struct fiemap followed
4184      * by an array of struct fiemap_extent whose size is set
4185      * in fiemap->fm_extent_count. The array is filled in by the
4186      * ioctl.
4187      */
4188     int target_size_in, target_size_out;
4189     struct fiemap *fm;
4190     const argtype *arg_type = ie->arg_type;
4191     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4192     void *argptr, *p;
4193     abi_long ret;
4194     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4195     uint32_t outbufsz;
4196     int free_fm = 0;
4197 
4198     assert(arg_type[0] == TYPE_PTR);
4199     assert(ie->access == IOC_RW);
4200     arg_type++;
4201     target_size_in = thunk_type_size(arg_type, 0);
4202     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4203     if (!argptr) {
4204         return -TARGET_EFAULT;
4205     }
4206     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4207     unlock_user(argptr, arg, 0);
4208     fm = (struct fiemap *)buf_temp;
4209     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4210         return -TARGET_EINVAL;
4211     }
4212 
4213     outbufsz = sizeof (*fm) +
4214         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4215 
4216     if (outbufsz > MAX_STRUCT_SIZE) {
4217         /* We can't fit all the extents into the fixed size buffer.
4218          * Allocate one that is large enough and use it instead.
4219          */
4220         fm = g_try_malloc(outbufsz);
4221         if (!fm) {
4222             return -TARGET_ENOMEM;
4223         }
4224         memcpy(fm, buf_temp, sizeof(struct fiemap));
4225         free_fm = 1;
4226     }
4227     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4228     if (!is_error(ret)) {
4229         target_size_out = target_size_in;
4230         /* An extent_count of 0 means we were only counting the extents
4231          * so there are no structs to copy
4232          */
4233         if (fm->fm_extent_count != 0) {
4234             target_size_out += fm->fm_mapped_extents * extent_size;
4235         }
4236         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4237         if (!argptr) {
4238             ret = -TARGET_EFAULT;
4239         } else {
4240             /* Convert the struct fiemap */
4241             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4242             if (fm->fm_extent_count != 0) {
4243                 p = argptr + target_size_in;
4244                 /* ...and then all the struct fiemap_extents */
4245                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4246                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4247                                   THUNK_TARGET);
4248                     p += extent_size;
4249                 }
4250             }
4251             unlock_user(argptr, arg, target_size_out);
4252         }
4253     }
4254     if (free_fm) {
4255         g_free(fm);
4256     }
4257     return ret;
4258 }
4259 #endif
4260 
4261 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4262                                 int fd, int cmd, abi_long arg)
4263 {
4264     const argtype *arg_type = ie->arg_type;
4265     int target_size;
4266     void *argptr;
4267     int ret;
4268     struct ifconf *host_ifconf;
4269     uint32_t outbufsz;
4270     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4271     int target_ifreq_size;
4272     int nb_ifreq;
4273     int free_buf = 0;
4274     int i;
4275     int target_ifc_len;
4276     abi_long target_ifc_buf;
4277     int host_ifc_len;
4278     char *host_ifc_buf;
4279 
4280     assert(arg_type[0] == TYPE_PTR);
4281     assert(ie->access == IOC_RW);
4282 
4283     arg_type++;
4284     target_size = thunk_type_size(arg_type, 0);
4285 
4286     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4287     if (!argptr)
4288         return -TARGET_EFAULT;
4289     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4290     unlock_user(argptr, arg, 0);
4291 
4292     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4293     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4294     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4295 
4296     if (target_ifc_buf != 0) {
4297         target_ifc_len = host_ifconf->ifc_len;
4298         nb_ifreq = target_ifc_len / target_ifreq_size;
4299         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4300 
4301         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4302         if (outbufsz > MAX_STRUCT_SIZE) {
4303             /*
4304              * We can't fit all the extents into the fixed size buffer.
4305              * Allocate one that is large enough and use it instead.
4306              */
4307             host_ifconf = malloc(outbufsz);
4308             if (!host_ifconf) {
4309                 return -TARGET_ENOMEM;
4310             }
4311             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4312             free_buf = 1;
4313         }
4314         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4315 
4316         host_ifconf->ifc_len = host_ifc_len;
4317     } else {
4318       host_ifc_buf = NULL;
4319     }
4320     host_ifconf->ifc_buf = host_ifc_buf;
4321 
4322     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4323     if (!is_error(ret)) {
4324 	/* convert host ifc_len to target ifc_len */
4325 
4326         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4327         target_ifc_len = nb_ifreq * target_ifreq_size;
4328         host_ifconf->ifc_len = target_ifc_len;
4329 
4330 	/* restore target ifc_buf */
4331 
4332         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4333 
4334 	/* copy struct ifconf to target user */
4335 
4336         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4337         if (!argptr)
4338             return -TARGET_EFAULT;
4339         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4340         unlock_user(argptr, arg, target_size);
4341 
4342         if (target_ifc_buf != 0) {
4343             /* copy ifreq[] to target user */
4344             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4345             for (i = 0; i < nb_ifreq ; i++) {
4346                 thunk_convert(argptr + i * target_ifreq_size,
4347                               host_ifc_buf + i * sizeof(struct ifreq),
4348                               ifreq_arg_type, THUNK_TARGET);
4349             }
4350             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4351         }
4352     }
4353 
4354     if (free_buf) {
4355         free(host_ifconf);
4356     }
4357 
4358     return ret;
4359 }
4360 
4361 #if defined(CONFIG_USBFS)
4362 #if HOST_LONG_BITS > 64
4363 #error USBDEVFS thunks do not support >64 bit hosts yet.
4364 #endif
4365 struct live_urb {
4366     uint64_t target_urb_adr;
4367     uint64_t target_buf_adr;
4368     char *target_buf_ptr;
4369     struct usbdevfs_urb host_urb;
4370 };
4371 
4372 static GHashTable *usbdevfs_urb_hashtable(void)
4373 {
4374     static GHashTable *urb_hashtable;
4375 
4376     if (!urb_hashtable) {
4377         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4378     }
4379     return urb_hashtable;
4380 }
4381 
4382 static void urb_hashtable_insert(struct live_urb *urb)
4383 {
4384     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4385     g_hash_table_insert(urb_hashtable, urb, urb);
4386 }
4387 
4388 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4389 {
4390     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4391     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4392 }
4393 
4394 static void urb_hashtable_remove(struct live_urb *urb)
4395 {
4396     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4397     g_hash_table_remove(urb_hashtable, urb);
4398 }
4399 
4400 static abi_long
4401 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4402                           int fd, int cmd, abi_long arg)
4403 {
4404     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4405     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4406     struct live_urb *lurb;
4407     void *argptr;
4408     uint64_t hurb;
4409     int target_size;
4410     uintptr_t target_urb_adr;
4411     abi_long ret;
4412 
4413     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4414 
4415     memset(buf_temp, 0, sizeof(uint64_t));
4416     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4417     if (is_error(ret)) {
4418         return ret;
4419     }
4420 
4421     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4422     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4423     if (!lurb->target_urb_adr) {
4424         return -TARGET_EFAULT;
4425     }
4426     urb_hashtable_remove(lurb);
4427     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4428         lurb->host_urb.buffer_length);
4429     lurb->target_buf_ptr = NULL;
4430 
4431     /* restore the guest buffer pointer */
4432     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4433 
4434     /* update the guest urb struct */
4435     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4436     if (!argptr) {
4437         g_free(lurb);
4438         return -TARGET_EFAULT;
4439     }
4440     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4441     unlock_user(argptr, lurb->target_urb_adr, target_size);
4442 
4443     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4444     /* write back the urb handle */
4445     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4446     if (!argptr) {
4447         g_free(lurb);
4448         return -TARGET_EFAULT;
4449     }
4450 
4451     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4452     target_urb_adr = lurb->target_urb_adr;
4453     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4454     unlock_user(argptr, arg, target_size);
4455 
4456     g_free(lurb);
4457     return ret;
4458 }
4459 
4460 static abi_long
4461 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4462                              uint8_t *buf_temp __attribute__((unused)),
4463                              int fd, int cmd, abi_long arg)
4464 {
4465     struct live_urb *lurb;
4466 
4467     /* map target address back to host URB with metadata. */
4468     lurb = urb_hashtable_lookup(arg);
4469     if (!lurb) {
4470         return -TARGET_EFAULT;
4471     }
4472     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4473 }
4474 
4475 static abi_long
4476 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4477                             int fd, int cmd, abi_long arg)
4478 {
4479     const argtype *arg_type = ie->arg_type;
4480     int target_size;
4481     abi_long ret;
4482     void *argptr;
4483     int rw_dir;
4484     struct live_urb *lurb;
4485 
4486     /*
4487      * each submitted URB needs to map to a unique ID for the
4488      * kernel, and that unique ID needs to be a pointer to
4489      * host memory.  hence, we need to malloc for each URB.
4490      * isochronous transfers have a variable length struct.
4491      */
4492     arg_type++;
4493     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4494 
4495     /* construct host copy of urb and metadata */
4496     lurb = g_try_malloc0(sizeof(struct live_urb));
4497     if (!lurb) {
4498         return -TARGET_ENOMEM;
4499     }
4500 
4501     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4502     if (!argptr) {
4503         g_free(lurb);
4504         return -TARGET_EFAULT;
4505     }
4506     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4507     unlock_user(argptr, arg, 0);
4508 
4509     lurb->target_urb_adr = arg;
4510     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4511 
4512     /* buffer space used depends on endpoint type so lock the entire buffer */
4513     /* control type urbs should check the buffer contents for true direction */
4514     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4515     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4516         lurb->host_urb.buffer_length, 1);
4517     if (lurb->target_buf_ptr == NULL) {
4518         g_free(lurb);
4519         return -TARGET_EFAULT;
4520     }
4521 
4522     /* update buffer pointer in host copy */
4523     lurb->host_urb.buffer = lurb->target_buf_ptr;
4524 
4525     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4526     if (is_error(ret)) {
4527         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4528         g_free(lurb);
4529     } else {
4530         urb_hashtable_insert(lurb);
4531     }
4532 
4533     return ret;
4534 }
4535 #endif /* CONFIG_USBFS */
4536 
4537 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4538                             int cmd, abi_long arg)
4539 {
4540     void *argptr;
4541     struct dm_ioctl *host_dm;
4542     abi_long guest_data;
4543     uint32_t guest_data_size;
4544     int target_size;
4545     const argtype *arg_type = ie->arg_type;
4546     abi_long ret;
4547     void *big_buf = NULL;
4548     char *host_data;
4549 
4550     arg_type++;
4551     target_size = thunk_type_size(arg_type, 0);
4552     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4553     if (!argptr) {
4554         ret = -TARGET_EFAULT;
4555         goto out;
4556     }
4557     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4558     unlock_user(argptr, arg, 0);
4559 
4560     /* buf_temp is too small, so fetch things into a bigger buffer */
4561     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4562     memcpy(big_buf, buf_temp, target_size);
4563     buf_temp = big_buf;
4564     host_dm = big_buf;
4565 
4566     guest_data = arg + host_dm->data_start;
4567     if ((guest_data - arg) < 0) {
4568         ret = -TARGET_EINVAL;
4569         goto out;
4570     }
4571     guest_data_size = host_dm->data_size - host_dm->data_start;
4572     host_data = (char*)host_dm + host_dm->data_start;
4573 
4574     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4575     if (!argptr) {
4576         ret = -TARGET_EFAULT;
4577         goto out;
4578     }
4579 
4580     switch (ie->host_cmd) {
4581     case DM_REMOVE_ALL:
4582     case DM_LIST_DEVICES:
4583     case DM_DEV_CREATE:
4584     case DM_DEV_REMOVE:
4585     case DM_DEV_SUSPEND:
4586     case DM_DEV_STATUS:
4587     case DM_DEV_WAIT:
4588     case DM_TABLE_STATUS:
4589     case DM_TABLE_CLEAR:
4590     case DM_TABLE_DEPS:
4591     case DM_LIST_VERSIONS:
4592         /* no input data */
4593         break;
4594     case DM_DEV_RENAME:
4595     case DM_DEV_SET_GEOMETRY:
4596         /* data contains only strings */
4597         memcpy(host_data, argptr, guest_data_size);
4598         break;
4599     case DM_TARGET_MSG:
4600         memcpy(host_data, argptr, guest_data_size);
4601         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4602         break;
4603     case DM_TABLE_LOAD:
4604     {
4605         void *gspec = argptr;
4606         void *cur_data = host_data;
4607         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4608         int spec_size = thunk_type_size(arg_type, 0);
4609         int i;
4610 
4611         for (i = 0; i < host_dm->target_count; i++) {
4612             struct dm_target_spec *spec = cur_data;
4613             uint32_t next;
4614             int slen;
4615 
4616             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4617             slen = strlen((char*)gspec + spec_size) + 1;
4618             next = spec->next;
4619             spec->next = sizeof(*spec) + slen;
4620             strcpy((char*)&spec[1], gspec + spec_size);
4621             gspec += next;
4622             cur_data += spec->next;
4623         }
4624         break;
4625     }
4626     default:
4627         ret = -TARGET_EINVAL;
4628         unlock_user(argptr, guest_data, 0);
4629         goto out;
4630     }
4631     unlock_user(argptr, guest_data, 0);
4632 
4633     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4634     if (!is_error(ret)) {
4635         guest_data = arg + host_dm->data_start;
4636         guest_data_size = host_dm->data_size - host_dm->data_start;
4637         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4638         switch (ie->host_cmd) {
4639         case DM_REMOVE_ALL:
4640         case DM_DEV_CREATE:
4641         case DM_DEV_REMOVE:
4642         case DM_DEV_RENAME:
4643         case DM_DEV_SUSPEND:
4644         case DM_DEV_STATUS:
4645         case DM_TABLE_LOAD:
4646         case DM_TABLE_CLEAR:
4647         case DM_TARGET_MSG:
4648         case DM_DEV_SET_GEOMETRY:
4649             /* no return data */
4650             break;
4651         case DM_LIST_DEVICES:
4652         {
4653             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4654             uint32_t remaining_data = guest_data_size;
4655             void *cur_data = argptr;
4656             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4657             int nl_size = 12; /* can't use thunk_size due to alignment */
4658 
4659             while (1) {
4660                 uint32_t next = nl->next;
4661                 if (next) {
4662                     nl->next = nl_size + (strlen(nl->name) + 1);
4663                 }
4664                 if (remaining_data < nl->next) {
4665                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4666                     break;
4667                 }
4668                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4669                 strcpy(cur_data + nl_size, nl->name);
4670                 cur_data += nl->next;
4671                 remaining_data -= nl->next;
4672                 if (!next) {
4673                     break;
4674                 }
4675                 nl = (void*)nl + next;
4676             }
4677             break;
4678         }
4679         case DM_DEV_WAIT:
4680         case DM_TABLE_STATUS:
4681         {
4682             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4683             void *cur_data = argptr;
4684             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4685             int spec_size = thunk_type_size(arg_type, 0);
4686             int i;
4687 
4688             for (i = 0; i < host_dm->target_count; i++) {
4689                 uint32_t next = spec->next;
4690                 int slen = strlen((char*)&spec[1]) + 1;
4691                 spec->next = (cur_data - argptr) + spec_size + slen;
4692                 if (guest_data_size < spec->next) {
4693                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4694                     break;
4695                 }
4696                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4697                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4698                 cur_data = argptr + spec->next;
4699                 spec = (void*)host_dm + host_dm->data_start + next;
4700             }
4701             break;
4702         }
4703         case DM_TABLE_DEPS:
4704         {
4705             void *hdata = (void*)host_dm + host_dm->data_start;
4706             int count = *(uint32_t*)hdata;
4707             uint64_t *hdev = hdata + 8;
4708             uint64_t *gdev = argptr + 8;
4709             int i;
4710 
4711             *(uint32_t*)argptr = tswap32(count);
4712             for (i = 0; i < count; i++) {
4713                 *gdev = tswap64(*hdev);
4714                 gdev++;
4715                 hdev++;
4716             }
4717             break;
4718         }
4719         case DM_LIST_VERSIONS:
4720         {
4721             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4722             uint32_t remaining_data = guest_data_size;
4723             void *cur_data = argptr;
4724             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4725             int vers_size = thunk_type_size(arg_type, 0);
4726 
4727             while (1) {
4728                 uint32_t next = vers->next;
4729                 if (next) {
4730                     vers->next = vers_size + (strlen(vers->name) + 1);
4731                 }
4732                 if (remaining_data < vers->next) {
4733                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4734                     break;
4735                 }
4736                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4737                 strcpy(cur_data + vers_size, vers->name);
4738                 cur_data += vers->next;
4739                 remaining_data -= vers->next;
4740                 if (!next) {
4741                     break;
4742                 }
4743                 vers = (void*)vers + next;
4744             }
4745             break;
4746         }
4747         default:
4748             unlock_user(argptr, guest_data, 0);
4749             ret = -TARGET_EINVAL;
4750             goto out;
4751         }
4752         unlock_user(argptr, guest_data, guest_data_size);
4753 
4754         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4755         if (!argptr) {
4756             ret = -TARGET_EFAULT;
4757             goto out;
4758         }
4759         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4760         unlock_user(argptr, arg, target_size);
4761     }
4762 out:
4763     g_free(big_buf);
4764     return ret;
4765 }
4766 
4767 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4768                                int cmd, abi_long arg)
4769 {
4770     void *argptr;
4771     int target_size;
4772     const argtype *arg_type = ie->arg_type;
4773     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4774     abi_long ret;
4775 
4776     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4777     struct blkpg_partition host_part;
4778 
4779     /* Read and convert blkpg */
4780     arg_type++;
4781     target_size = thunk_type_size(arg_type, 0);
4782     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4783     if (!argptr) {
4784         ret = -TARGET_EFAULT;
4785         goto out;
4786     }
4787     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4788     unlock_user(argptr, arg, 0);
4789 
4790     switch (host_blkpg->op) {
4791     case BLKPG_ADD_PARTITION:
4792     case BLKPG_DEL_PARTITION:
4793         /* payload is struct blkpg_partition */
4794         break;
4795     default:
4796         /* Unknown opcode */
4797         ret = -TARGET_EINVAL;
4798         goto out;
4799     }
4800 
4801     /* Read and convert blkpg->data */
4802     arg = (abi_long)(uintptr_t)host_blkpg->data;
4803     target_size = thunk_type_size(part_arg_type, 0);
4804     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4805     if (!argptr) {
4806         ret = -TARGET_EFAULT;
4807         goto out;
4808     }
4809     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4810     unlock_user(argptr, arg, 0);
4811 
4812     /* Swizzle the data pointer to our local copy and call! */
4813     host_blkpg->data = &host_part;
4814     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4815 
4816 out:
4817     return ret;
4818 }
4819 
4820 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4821                                 int fd, int cmd, abi_long arg)
4822 {
4823     const argtype *arg_type = ie->arg_type;
4824     const StructEntry *se;
4825     const argtype *field_types;
4826     const int *dst_offsets, *src_offsets;
4827     int target_size;
4828     void *argptr;
4829     abi_ulong *target_rt_dev_ptr = NULL;
4830     unsigned long *host_rt_dev_ptr = NULL;
4831     abi_long ret;
4832     int i;
4833 
4834     assert(ie->access == IOC_W);
4835     assert(*arg_type == TYPE_PTR);
4836     arg_type++;
4837     assert(*arg_type == TYPE_STRUCT);
4838     target_size = thunk_type_size(arg_type, 0);
4839     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4840     if (!argptr) {
4841         return -TARGET_EFAULT;
4842     }
4843     arg_type++;
4844     assert(*arg_type == (int)STRUCT_rtentry);
4845     se = struct_entries + *arg_type++;
4846     assert(se->convert[0] == NULL);
4847     /* convert struct here to be able to catch rt_dev string */
4848     field_types = se->field_types;
4849     dst_offsets = se->field_offsets[THUNK_HOST];
4850     src_offsets = se->field_offsets[THUNK_TARGET];
4851     for (i = 0; i < se->nb_fields; i++) {
4852         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4853             assert(*field_types == TYPE_PTRVOID);
4854             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4855             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4856             if (*target_rt_dev_ptr != 0) {
4857                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4858                                                   tswapal(*target_rt_dev_ptr));
4859                 if (!*host_rt_dev_ptr) {
4860                     unlock_user(argptr, arg, 0);
4861                     return -TARGET_EFAULT;
4862                 }
4863             } else {
4864                 *host_rt_dev_ptr = 0;
4865             }
4866             field_types++;
4867             continue;
4868         }
4869         field_types = thunk_convert(buf_temp + dst_offsets[i],
4870                                     argptr + src_offsets[i],
4871                                     field_types, THUNK_HOST);
4872     }
4873     unlock_user(argptr, arg, 0);
4874 
4875     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4876 
4877     assert(host_rt_dev_ptr != NULL);
4878     assert(target_rt_dev_ptr != NULL);
4879     if (*host_rt_dev_ptr != 0) {
4880         unlock_user((void *)*host_rt_dev_ptr,
4881                     *target_rt_dev_ptr, 0);
4882     }
4883     return ret;
4884 }
4885 
4886 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4887                                      int fd, int cmd, abi_long arg)
4888 {
4889     int sig = target_to_host_signal(arg);
4890     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4891 }
4892 
4893 #ifdef TIOCGPTPEER
4894 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4895                                      int fd, int cmd, abi_long arg)
4896 {
4897     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4898     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4899 }
4900 #endif
4901 
4902 static IOCTLEntry ioctl_entries[] = {
4903 #define IOCTL(cmd, access, ...) \
4904     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4905 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4906     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4907 #define IOCTL_IGNORE(cmd) \
4908     { TARGET_ ## cmd, 0, #cmd },
4909 #include "ioctls.h"
4910     { 0, 0, },
4911 };
4912 
4913 /* ??? Implement proper locking for ioctls.  */
4914 /* do_ioctl() Must return target values and target errnos. */
4915 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4916 {
4917     const IOCTLEntry *ie;
4918     const argtype *arg_type;
4919     abi_long ret;
4920     uint8_t buf_temp[MAX_STRUCT_SIZE];
4921     int target_size;
4922     void *argptr;
4923 
4924     ie = ioctl_entries;
4925     for(;;) {
4926         if (ie->target_cmd == 0) {
4927             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4928             return -TARGET_ENOSYS;
4929         }
4930         if (ie->target_cmd == cmd)
4931             break;
4932         ie++;
4933     }
4934     arg_type = ie->arg_type;
4935     if (ie->do_ioctl) {
4936         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4937     } else if (!ie->host_cmd) {
4938         /* Some architectures define BSD ioctls in their headers
4939            that are not implemented in Linux.  */
4940         return -TARGET_ENOSYS;
4941     }
4942 
4943     switch(arg_type[0]) {
4944     case TYPE_NULL:
4945         /* no argument */
4946         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4947         break;
4948     case TYPE_PTRVOID:
4949     case TYPE_INT:
4950         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4951         break;
4952     case TYPE_PTR:
4953         arg_type++;
4954         target_size = thunk_type_size(arg_type, 0);
4955         switch(ie->access) {
4956         case IOC_R:
4957             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4958             if (!is_error(ret)) {
4959                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4960                 if (!argptr)
4961                     return -TARGET_EFAULT;
4962                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4963                 unlock_user(argptr, arg, target_size);
4964             }
4965             break;
4966         case IOC_W:
4967             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4968             if (!argptr)
4969                 return -TARGET_EFAULT;
4970             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4971             unlock_user(argptr, arg, 0);
4972             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4973             break;
4974         default:
4975         case IOC_RW:
4976             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4977             if (!argptr)
4978                 return -TARGET_EFAULT;
4979             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4980             unlock_user(argptr, arg, 0);
4981             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4982             if (!is_error(ret)) {
4983                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4984                 if (!argptr)
4985                     return -TARGET_EFAULT;
4986                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4987                 unlock_user(argptr, arg, target_size);
4988             }
4989             break;
4990         }
4991         break;
4992     default:
4993         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4994                  (long)cmd, arg_type[0]);
4995         ret = -TARGET_ENOSYS;
4996         break;
4997     }
4998     return ret;
4999 }
5000 
5001 static const bitmask_transtbl iflag_tbl[] = {
5002         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5003         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5004         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5005         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5006         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5007         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5008         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5009         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5010         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5011         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5012         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5013         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5014         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5015         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5016         { 0, 0, 0, 0 }
5017 };
5018 
5019 static const bitmask_transtbl oflag_tbl[] = {
5020 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5021 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5022 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5023 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5024 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5025 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5026 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5027 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5028 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5029 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5030 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5031 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5032 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5033 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5034 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5035 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5036 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5037 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5038 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5039 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5040 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5041 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5042 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5043 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5044 	{ 0, 0, 0, 0 }
5045 };
5046 
5047 static const bitmask_transtbl cflag_tbl[] = {
5048 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5049 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5050 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5051 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5052 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5053 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5054 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5055 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5056 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5057 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5058 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5059 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5060 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5061 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5062 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5063 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5064 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5065 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5066 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5067 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5068 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5069 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5070 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5071 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5072 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5073 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5074 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5075 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5076 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5077 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5078 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5079 	{ 0, 0, 0, 0 }
5080 };
5081 
5082 static const bitmask_transtbl lflag_tbl[] = {
5083 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5084 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5085 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5086 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5087 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5088 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5089 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5090 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5091 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5092 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5093 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5094 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5095 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5096 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5097 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5098 	{ 0, 0, 0, 0 }
5099 };
5100 
5101 static void target_to_host_termios (void *dst, const void *src)
5102 {
5103     struct host_termios *host = dst;
5104     const struct target_termios *target = src;
5105 
5106     host->c_iflag =
5107         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5108     host->c_oflag =
5109         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5110     host->c_cflag =
5111         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5112     host->c_lflag =
5113         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5114     host->c_line = target->c_line;
5115 
5116     memset(host->c_cc, 0, sizeof(host->c_cc));
5117     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5118     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5119     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5120     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5121     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5122     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5123     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5124     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5125     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5126     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5127     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5128     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5129     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5130     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5131     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5132     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5133     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5134 }
5135 
5136 static void host_to_target_termios (void *dst, const void *src)
5137 {
5138     struct target_termios *target = dst;
5139     const struct host_termios *host = src;
5140 
5141     target->c_iflag =
5142         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5143     target->c_oflag =
5144         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5145     target->c_cflag =
5146         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5147     target->c_lflag =
5148         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5149     target->c_line = host->c_line;
5150 
5151     memset(target->c_cc, 0, sizeof(target->c_cc));
5152     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5153     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5154     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5155     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5156     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5157     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5158     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5159     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5160     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5161     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5162     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5163     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5164     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5165     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5166     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5167     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5168     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5169 }
5170 
5171 static const StructEntry struct_termios_def = {
5172     .convert = { host_to_target_termios, target_to_host_termios },
5173     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5174     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5175 };
5176 
5177 static bitmask_transtbl mmap_flags_tbl[] = {
5178     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5179     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5180     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5181     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5182       MAP_ANONYMOUS, MAP_ANONYMOUS },
5183     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5184       MAP_GROWSDOWN, MAP_GROWSDOWN },
5185     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5186       MAP_DENYWRITE, MAP_DENYWRITE },
5187     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5188       MAP_EXECUTABLE, MAP_EXECUTABLE },
5189     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5190     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5191       MAP_NORESERVE, MAP_NORESERVE },
5192     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5193     /* MAP_STACK had been ignored by the kernel for quite some time.
5194        Recognize it for the target insofar as we do not want to pass
5195        it through to the host.  */
5196     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5197     { 0, 0, 0, 0 }
5198 };
5199 
5200 #if defined(TARGET_I386)
5201 
5202 /* NOTE: there is really one LDT for all the threads */
5203 static uint8_t *ldt_table;
5204 
5205 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5206 {
5207     int size;
5208     void *p;
5209 
5210     if (!ldt_table)
5211         return 0;
5212     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5213     if (size > bytecount)
5214         size = bytecount;
5215     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5216     if (!p)
5217         return -TARGET_EFAULT;
5218     /* ??? Should this by byteswapped?  */
5219     memcpy(p, ldt_table, size);
5220     unlock_user(p, ptr, size);
5221     return size;
5222 }
5223 
5224 /* XXX: add locking support */
5225 static abi_long write_ldt(CPUX86State *env,
5226                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5227 {
5228     struct target_modify_ldt_ldt_s ldt_info;
5229     struct target_modify_ldt_ldt_s *target_ldt_info;
5230     int seg_32bit, contents, read_exec_only, limit_in_pages;
5231     int seg_not_present, useable, lm;
5232     uint32_t *lp, entry_1, entry_2;
5233 
5234     if (bytecount != sizeof(ldt_info))
5235         return -TARGET_EINVAL;
5236     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5237         return -TARGET_EFAULT;
5238     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5239     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5240     ldt_info.limit = tswap32(target_ldt_info->limit);
5241     ldt_info.flags = tswap32(target_ldt_info->flags);
5242     unlock_user_struct(target_ldt_info, ptr, 0);
5243 
5244     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5245         return -TARGET_EINVAL;
5246     seg_32bit = ldt_info.flags & 1;
5247     contents = (ldt_info.flags >> 1) & 3;
5248     read_exec_only = (ldt_info.flags >> 3) & 1;
5249     limit_in_pages = (ldt_info.flags >> 4) & 1;
5250     seg_not_present = (ldt_info.flags >> 5) & 1;
5251     useable = (ldt_info.flags >> 6) & 1;
5252 #ifdef TARGET_ABI32
5253     lm = 0;
5254 #else
5255     lm = (ldt_info.flags >> 7) & 1;
5256 #endif
5257     if (contents == 3) {
5258         if (oldmode)
5259             return -TARGET_EINVAL;
5260         if (seg_not_present == 0)
5261             return -TARGET_EINVAL;
5262     }
5263     /* allocate the LDT */
5264     if (!ldt_table) {
5265         env->ldt.base = target_mmap(0,
5266                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5267                                     PROT_READ|PROT_WRITE,
5268                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5269         if (env->ldt.base == -1)
5270             return -TARGET_ENOMEM;
5271         memset(g2h(env->ldt.base), 0,
5272                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5273         env->ldt.limit = 0xffff;
5274         ldt_table = g2h(env->ldt.base);
5275     }
5276 
5277     /* NOTE: same code as Linux kernel */
5278     /* Allow LDTs to be cleared by the user. */
5279     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5280         if (oldmode ||
5281             (contents == 0		&&
5282              read_exec_only == 1	&&
5283              seg_32bit == 0		&&
5284              limit_in_pages == 0	&&
5285              seg_not_present == 1	&&
5286              useable == 0 )) {
5287             entry_1 = 0;
5288             entry_2 = 0;
5289             goto install;
5290         }
5291     }
5292 
5293     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5294         (ldt_info.limit & 0x0ffff);
5295     entry_2 = (ldt_info.base_addr & 0xff000000) |
5296         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5297         (ldt_info.limit & 0xf0000) |
5298         ((read_exec_only ^ 1) << 9) |
5299         (contents << 10) |
5300         ((seg_not_present ^ 1) << 15) |
5301         (seg_32bit << 22) |
5302         (limit_in_pages << 23) |
5303         (lm << 21) |
5304         0x7000;
5305     if (!oldmode)
5306         entry_2 |= (useable << 20);
5307 
5308     /* Install the new entry ...  */
5309 install:
5310     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5311     lp[0] = tswap32(entry_1);
5312     lp[1] = tswap32(entry_2);
5313     return 0;
5314 }
5315 
5316 /* specific and weird i386 syscalls */
5317 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5318                               unsigned long bytecount)
5319 {
5320     abi_long ret;
5321 
5322     switch (func) {
5323     case 0:
5324         ret = read_ldt(ptr, bytecount);
5325         break;
5326     case 1:
5327         ret = write_ldt(env, ptr, bytecount, 1);
5328         break;
5329     case 0x11:
5330         ret = write_ldt(env, ptr, bytecount, 0);
5331         break;
5332     default:
5333         ret = -TARGET_ENOSYS;
5334         break;
5335     }
5336     return ret;
5337 }
5338 
5339 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5340 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5341 {
5342     uint64_t *gdt_table = g2h(env->gdt.base);
5343     struct target_modify_ldt_ldt_s ldt_info;
5344     struct target_modify_ldt_ldt_s *target_ldt_info;
5345     int seg_32bit, contents, read_exec_only, limit_in_pages;
5346     int seg_not_present, useable, lm;
5347     uint32_t *lp, entry_1, entry_2;
5348     int i;
5349 
5350     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5351     if (!target_ldt_info)
5352         return -TARGET_EFAULT;
5353     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5354     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5355     ldt_info.limit = tswap32(target_ldt_info->limit);
5356     ldt_info.flags = tswap32(target_ldt_info->flags);
5357     if (ldt_info.entry_number == -1) {
5358         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5359             if (gdt_table[i] == 0) {
5360                 ldt_info.entry_number = i;
5361                 target_ldt_info->entry_number = tswap32(i);
5362                 break;
5363             }
5364         }
5365     }
5366     unlock_user_struct(target_ldt_info, ptr, 1);
5367 
5368     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5369         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5370            return -TARGET_EINVAL;
5371     seg_32bit = ldt_info.flags & 1;
5372     contents = (ldt_info.flags >> 1) & 3;
5373     read_exec_only = (ldt_info.flags >> 3) & 1;
5374     limit_in_pages = (ldt_info.flags >> 4) & 1;
5375     seg_not_present = (ldt_info.flags >> 5) & 1;
5376     useable = (ldt_info.flags >> 6) & 1;
5377 #ifdef TARGET_ABI32
5378     lm = 0;
5379 #else
5380     lm = (ldt_info.flags >> 7) & 1;
5381 #endif
5382 
5383     if (contents == 3) {
5384         if (seg_not_present == 0)
5385             return -TARGET_EINVAL;
5386     }
5387 
5388     /* NOTE: same code as Linux kernel */
5389     /* Allow LDTs to be cleared by the user. */
5390     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5391         if ((contents == 0             &&
5392              read_exec_only == 1       &&
5393              seg_32bit == 0            &&
5394              limit_in_pages == 0       &&
5395              seg_not_present == 1      &&
5396              useable == 0 )) {
5397             entry_1 = 0;
5398             entry_2 = 0;
5399             goto install;
5400         }
5401     }
5402 
5403     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5404         (ldt_info.limit & 0x0ffff);
5405     entry_2 = (ldt_info.base_addr & 0xff000000) |
5406         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5407         (ldt_info.limit & 0xf0000) |
5408         ((read_exec_only ^ 1) << 9) |
5409         (contents << 10) |
5410         ((seg_not_present ^ 1) << 15) |
5411         (seg_32bit << 22) |
5412         (limit_in_pages << 23) |
5413         (useable << 20) |
5414         (lm << 21) |
5415         0x7000;
5416 
5417     /* Install the new entry ...  */
5418 install:
5419     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5420     lp[0] = tswap32(entry_1);
5421     lp[1] = tswap32(entry_2);
5422     return 0;
5423 }
5424 
5425 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5426 {
5427     struct target_modify_ldt_ldt_s *target_ldt_info;
5428     uint64_t *gdt_table = g2h(env->gdt.base);
5429     uint32_t base_addr, limit, flags;
5430     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5431     int seg_not_present, useable, lm;
5432     uint32_t *lp, entry_1, entry_2;
5433 
5434     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5435     if (!target_ldt_info)
5436         return -TARGET_EFAULT;
5437     idx = tswap32(target_ldt_info->entry_number);
5438     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5439         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5440         unlock_user_struct(target_ldt_info, ptr, 1);
5441         return -TARGET_EINVAL;
5442     }
5443     lp = (uint32_t *)(gdt_table + idx);
5444     entry_1 = tswap32(lp[0]);
5445     entry_2 = tswap32(lp[1]);
5446 
5447     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5448     contents = (entry_2 >> 10) & 3;
5449     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5450     seg_32bit = (entry_2 >> 22) & 1;
5451     limit_in_pages = (entry_2 >> 23) & 1;
5452     useable = (entry_2 >> 20) & 1;
5453 #ifdef TARGET_ABI32
5454     lm = 0;
5455 #else
5456     lm = (entry_2 >> 21) & 1;
5457 #endif
5458     flags = (seg_32bit << 0) | (contents << 1) |
5459         (read_exec_only << 3) | (limit_in_pages << 4) |
5460         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5461     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5462     base_addr = (entry_1 >> 16) |
5463         (entry_2 & 0xff000000) |
5464         ((entry_2 & 0xff) << 16);
5465     target_ldt_info->base_addr = tswapal(base_addr);
5466     target_ldt_info->limit = tswap32(limit);
5467     target_ldt_info->flags = tswap32(flags);
5468     unlock_user_struct(target_ldt_info, ptr, 1);
5469     return 0;
5470 }
5471 #endif /* TARGET_I386 && TARGET_ABI32 */
5472 
5473 #ifndef TARGET_ABI32
5474 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5475 {
5476     abi_long ret = 0;
5477     abi_ulong val;
5478     int idx;
5479 
5480     switch(code) {
5481     case TARGET_ARCH_SET_GS:
5482     case TARGET_ARCH_SET_FS:
5483         if (code == TARGET_ARCH_SET_GS)
5484             idx = R_GS;
5485         else
5486             idx = R_FS;
5487         cpu_x86_load_seg(env, idx, 0);
5488         env->segs[idx].base = addr;
5489         break;
5490     case TARGET_ARCH_GET_GS:
5491     case TARGET_ARCH_GET_FS:
5492         if (code == TARGET_ARCH_GET_GS)
5493             idx = R_GS;
5494         else
5495             idx = R_FS;
5496         val = env->segs[idx].base;
5497         if (put_user(val, addr, abi_ulong))
5498             ret = -TARGET_EFAULT;
5499         break;
5500     default:
5501         ret = -TARGET_EINVAL;
5502         break;
5503     }
5504     return ret;
5505 }
5506 #endif
5507 
5508 #endif /* defined(TARGET_I386) */
5509 
5510 #define NEW_STACK_SIZE 0x40000
5511 
5512 
5513 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5514 typedef struct {
5515     CPUArchState *env;
5516     pthread_mutex_t mutex;
5517     pthread_cond_t cond;
5518     pthread_t thread;
5519     uint32_t tid;
5520     abi_ulong child_tidptr;
5521     abi_ulong parent_tidptr;
5522     sigset_t sigmask;
5523 } new_thread_info;
5524 
5525 static void *clone_func(void *arg)
5526 {
5527     new_thread_info *info = arg;
5528     CPUArchState *env;
5529     CPUState *cpu;
5530     TaskState *ts;
5531 
5532     rcu_register_thread();
5533     tcg_register_thread();
5534     env = info->env;
5535     cpu = env_cpu(env);
5536     thread_cpu = cpu;
5537     ts = (TaskState *)cpu->opaque;
5538     info->tid = sys_gettid();
5539     task_settid(ts);
5540     if (info->child_tidptr)
5541         put_user_u32(info->tid, info->child_tidptr);
5542     if (info->parent_tidptr)
5543         put_user_u32(info->tid, info->parent_tidptr);
5544     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5545     /* Enable signals.  */
5546     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5547     /* Signal to the parent that we're ready.  */
5548     pthread_mutex_lock(&info->mutex);
5549     pthread_cond_broadcast(&info->cond);
5550     pthread_mutex_unlock(&info->mutex);
5551     /* Wait until the parent has finished initializing the tls state.  */
5552     pthread_mutex_lock(&clone_lock);
5553     pthread_mutex_unlock(&clone_lock);
5554     cpu_loop(env);
5555     /* never exits */
5556     return NULL;
5557 }
5558 
5559 /* do_fork() Must return host values and target errnos (unlike most
5560    do_*() functions). */
5561 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5562                    abi_ulong parent_tidptr, target_ulong newtls,
5563                    abi_ulong child_tidptr)
5564 {
5565     CPUState *cpu = env_cpu(env);
5566     int ret;
5567     TaskState *ts;
5568     CPUState *new_cpu;
5569     CPUArchState *new_env;
5570     sigset_t sigmask;
5571 
5572     flags &= ~CLONE_IGNORED_FLAGS;
5573 
5574     /* Emulate vfork() with fork() */
5575     if (flags & CLONE_VFORK)
5576         flags &= ~(CLONE_VFORK | CLONE_VM);
5577 
5578     if (flags & CLONE_VM) {
5579         TaskState *parent_ts = (TaskState *)cpu->opaque;
5580         new_thread_info info;
5581         pthread_attr_t attr;
5582 
5583         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5584             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5585             return -TARGET_EINVAL;
5586         }
5587 
5588         ts = g_new0(TaskState, 1);
5589         init_task_state(ts);
5590 
5591         /* Grab a mutex so that thread setup appears atomic.  */
5592         pthread_mutex_lock(&clone_lock);
5593 
5594         /* we create a new CPU instance. */
5595         new_env = cpu_copy(env);
5596         /* Init regs that differ from the parent.  */
5597         cpu_clone_regs(new_env, newsp);
5598         new_cpu = env_cpu(new_env);
5599         new_cpu->opaque = ts;
5600         ts->bprm = parent_ts->bprm;
5601         ts->info = parent_ts->info;
5602         ts->signal_mask = parent_ts->signal_mask;
5603 
5604         if (flags & CLONE_CHILD_CLEARTID) {
5605             ts->child_tidptr = child_tidptr;
5606         }
5607 
5608         if (flags & CLONE_SETTLS) {
5609             cpu_set_tls (new_env, newtls);
5610         }
5611 
5612         memset(&info, 0, sizeof(info));
5613         pthread_mutex_init(&info.mutex, NULL);
5614         pthread_mutex_lock(&info.mutex);
5615         pthread_cond_init(&info.cond, NULL);
5616         info.env = new_env;
5617         if (flags & CLONE_CHILD_SETTID) {
5618             info.child_tidptr = child_tidptr;
5619         }
5620         if (flags & CLONE_PARENT_SETTID) {
5621             info.parent_tidptr = parent_tidptr;
5622         }
5623 
5624         ret = pthread_attr_init(&attr);
5625         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5626         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5627         /* It is not safe to deliver signals until the child has finished
5628            initializing, so temporarily block all signals.  */
5629         sigfillset(&sigmask);
5630         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5631         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5632 
5633         /* If this is our first additional thread, we need to ensure we
5634          * generate code for parallel execution and flush old translations.
5635          */
5636         if (!parallel_cpus) {
5637             parallel_cpus = true;
5638             tb_flush(cpu);
5639         }
5640 
5641         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5642         /* TODO: Free new CPU state if thread creation failed.  */
5643 
5644         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5645         pthread_attr_destroy(&attr);
5646         if (ret == 0) {
5647             /* Wait for the child to initialize.  */
5648             pthread_cond_wait(&info.cond, &info.mutex);
5649             ret = info.tid;
5650         } else {
5651             ret = -1;
5652         }
5653         pthread_mutex_unlock(&info.mutex);
5654         pthread_cond_destroy(&info.cond);
5655         pthread_mutex_destroy(&info.mutex);
5656         pthread_mutex_unlock(&clone_lock);
5657     } else {
5658         /* if no CLONE_VM, we consider it is a fork */
5659         if (flags & CLONE_INVALID_FORK_FLAGS) {
5660             return -TARGET_EINVAL;
5661         }
5662 
5663         /* We can't support custom termination signals */
5664         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5665             return -TARGET_EINVAL;
5666         }
5667 
5668         if (block_signals()) {
5669             return -TARGET_ERESTARTSYS;
5670         }
5671 
5672         fork_start();
5673         ret = fork();
5674         if (ret == 0) {
5675             /* Child Process.  */
5676             cpu_clone_regs(env, newsp);
5677             fork_end(1);
5678             /* There is a race condition here.  The parent process could
5679                theoretically read the TID in the child process before the child
5680                tid is set.  This would require using either ptrace
5681                (not implemented) or having *_tidptr to point at a shared memory
5682                mapping.  We can't repeat the spinlock hack used above because
5683                the child process gets its own copy of the lock.  */
5684             if (flags & CLONE_CHILD_SETTID)
5685                 put_user_u32(sys_gettid(), child_tidptr);
5686             if (flags & CLONE_PARENT_SETTID)
5687                 put_user_u32(sys_gettid(), parent_tidptr);
5688             ts = (TaskState *)cpu->opaque;
5689             if (flags & CLONE_SETTLS)
5690                 cpu_set_tls (env, newtls);
5691             if (flags & CLONE_CHILD_CLEARTID)
5692                 ts->child_tidptr = child_tidptr;
5693         } else {
5694             fork_end(0);
5695         }
5696     }
5697     return ret;
5698 }
5699 
5700 /* warning : doesn't handle linux specific flags... */
5701 static int target_to_host_fcntl_cmd(int cmd)
5702 {
5703     int ret;
5704 
5705     switch(cmd) {
5706     case TARGET_F_DUPFD:
5707     case TARGET_F_GETFD:
5708     case TARGET_F_SETFD:
5709     case TARGET_F_GETFL:
5710     case TARGET_F_SETFL:
5711         ret = cmd;
5712         break;
5713     case TARGET_F_GETLK:
5714         ret = F_GETLK64;
5715         break;
5716     case TARGET_F_SETLK:
5717         ret = F_SETLK64;
5718         break;
5719     case TARGET_F_SETLKW:
5720         ret = F_SETLKW64;
5721         break;
5722     case TARGET_F_GETOWN:
5723         ret = F_GETOWN;
5724         break;
5725     case TARGET_F_SETOWN:
5726         ret = F_SETOWN;
5727         break;
5728     case TARGET_F_GETSIG:
5729         ret = F_GETSIG;
5730         break;
5731     case TARGET_F_SETSIG:
5732         ret = F_SETSIG;
5733         break;
5734 #if TARGET_ABI_BITS == 32
5735     case TARGET_F_GETLK64:
5736         ret = F_GETLK64;
5737         break;
5738     case TARGET_F_SETLK64:
5739         ret = F_SETLK64;
5740         break;
5741     case TARGET_F_SETLKW64:
5742         ret = F_SETLKW64;
5743         break;
5744 #endif
5745     case TARGET_F_SETLEASE:
5746         ret = F_SETLEASE;
5747         break;
5748     case TARGET_F_GETLEASE:
5749         ret = F_GETLEASE;
5750         break;
5751 #ifdef F_DUPFD_CLOEXEC
5752     case TARGET_F_DUPFD_CLOEXEC:
5753         ret = F_DUPFD_CLOEXEC;
5754         break;
5755 #endif
5756     case TARGET_F_NOTIFY:
5757         ret = F_NOTIFY;
5758         break;
5759 #ifdef F_GETOWN_EX
5760     case TARGET_F_GETOWN_EX:
5761         ret = F_GETOWN_EX;
5762         break;
5763 #endif
5764 #ifdef F_SETOWN_EX
5765     case TARGET_F_SETOWN_EX:
5766         ret = F_SETOWN_EX;
5767         break;
5768 #endif
5769 #ifdef F_SETPIPE_SZ
5770     case TARGET_F_SETPIPE_SZ:
5771         ret = F_SETPIPE_SZ;
5772         break;
5773     case TARGET_F_GETPIPE_SZ:
5774         ret = F_GETPIPE_SZ;
5775         break;
5776 #endif
5777     default:
5778         ret = -TARGET_EINVAL;
5779         break;
5780     }
5781 
5782 #if defined(__powerpc64__)
5783     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5784      * is not supported by kernel. The glibc fcntl call actually adjusts
5785      * them to 5, 6 and 7 before making the syscall(). Since we make the
5786      * syscall directly, adjust to what is supported by the kernel.
5787      */
5788     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5789         ret -= F_GETLK64 - 5;
5790     }
5791 #endif
5792 
5793     return ret;
5794 }
5795 
5796 #define FLOCK_TRANSTBL \
5797     switch (type) { \
5798     TRANSTBL_CONVERT(F_RDLCK); \
5799     TRANSTBL_CONVERT(F_WRLCK); \
5800     TRANSTBL_CONVERT(F_UNLCK); \
5801     TRANSTBL_CONVERT(F_EXLCK); \
5802     TRANSTBL_CONVERT(F_SHLCK); \
5803     }
5804 
5805 static int target_to_host_flock(int type)
5806 {
5807 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5808     FLOCK_TRANSTBL
5809 #undef  TRANSTBL_CONVERT
5810     return -TARGET_EINVAL;
5811 }
5812 
5813 static int host_to_target_flock(int type)
5814 {
5815 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5816     FLOCK_TRANSTBL
5817 #undef  TRANSTBL_CONVERT
5818     /* if we don't know how to convert the value coming
5819      * from the host we copy to the target field as-is
5820      */
5821     return type;
5822 }
5823 
5824 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5825                                             abi_ulong target_flock_addr)
5826 {
5827     struct target_flock *target_fl;
5828     int l_type;
5829 
5830     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5831         return -TARGET_EFAULT;
5832     }
5833 
5834     __get_user(l_type, &target_fl->l_type);
5835     l_type = target_to_host_flock(l_type);
5836     if (l_type < 0) {
5837         return l_type;
5838     }
5839     fl->l_type = l_type;
5840     __get_user(fl->l_whence, &target_fl->l_whence);
5841     __get_user(fl->l_start, &target_fl->l_start);
5842     __get_user(fl->l_len, &target_fl->l_len);
5843     __get_user(fl->l_pid, &target_fl->l_pid);
5844     unlock_user_struct(target_fl, target_flock_addr, 0);
5845     return 0;
5846 }
5847 
5848 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5849                                           const struct flock64 *fl)
5850 {
5851     struct target_flock *target_fl;
5852     short l_type;
5853 
5854     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5855         return -TARGET_EFAULT;
5856     }
5857 
5858     l_type = host_to_target_flock(fl->l_type);
5859     __put_user(l_type, &target_fl->l_type);
5860     __put_user(fl->l_whence, &target_fl->l_whence);
5861     __put_user(fl->l_start, &target_fl->l_start);
5862     __put_user(fl->l_len, &target_fl->l_len);
5863     __put_user(fl->l_pid, &target_fl->l_pid);
5864     unlock_user_struct(target_fl, target_flock_addr, 1);
5865     return 0;
5866 }
5867 
5868 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5869 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5870 
5871 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5872 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5873                                                    abi_ulong target_flock_addr)
5874 {
5875     struct target_oabi_flock64 *target_fl;
5876     int l_type;
5877 
5878     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5879         return -TARGET_EFAULT;
5880     }
5881 
5882     __get_user(l_type, &target_fl->l_type);
5883     l_type = target_to_host_flock(l_type);
5884     if (l_type < 0) {
5885         return l_type;
5886     }
5887     fl->l_type = l_type;
5888     __get_user(fl->l_whence, &target_fl->l_whence);
5889     __get_user(fl->l_start, &target_fl->l_start);
5890     __get_user(fl->l_len, &target_fl->l_len);
5891     __get_user(fl->l_pid, &target_fl->l_pid);
5892     unlock_user_struct(target_fl, target_flock_addr, 0);
5893     return 0;
5894 }
5895 
5896 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5897                                                  const struct flock64 *fl)
5898 {
5899     struct target_oabi_flock64 *target_fl;
5900     short l_type;
5901 
5902     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5903         return -TARGET_EFAULT;
5904     }
5905 
5906     l_type = host_to_target_flock(fl->l_type);
5907     __put_user(l_type, &target_fl->l_type);
5908     __put_user(fl->l_whence, &target_fl->l_whence);
5909     __put_user(fl->l_start, &target_fl->l_start);
5910     __put_user(fl->l_len, &target_fl->l_len);
5911     __put_user(fl->l_pid, &target_fl->l_pid);
5912     unlock_user_struct(target_fl, target_flock_addr, 1);
5913     return 0;
5914 }
5915 #endif
5916 
5917 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5918                                               abi_ulong target_flock_addr)
5919 {
5920     struct target_flock64 *target_fl;
5921     int l_type;
5922 
5923     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5924         return -TARGET_EFAULT;
5925     }
5926 
5927     __get_user(l_type, &target_fl->l_type);
5928     l_type = target_to_host_flock(l_type);
5929     if (l_type < 0) {
5930         return l_type;
5931     }
5932     fl->l_type = l_type;
5933     __get_user(fl->l_whence, &target_fl->l_whence);
5934     __get_user(fl->l_start, &target_fl->l_start);
5935     __get_user(fl->l_len, &target_fl->l_len);
5936     __get_user(fl->l_pid, &target_fl->l_pid);
5937     unlock_user_struct(target_fl, target_flock_addr, 0);
5938     return 0;
5939 }
5940 
5941 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5942                                             const struct flock64 *fl)
5943 {
5944     struct target_flock64 *target_fl;
5945     short l_type;
5946 
5947     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5948         return -TARGET_EFAULT;
5949     }
5950 
5951     l_type = host_to_target_flock(fl->l_type);
5952     __put_user(l_type, &target_fl->l_type);
5953     __put_user(fl->l_whence, &target_fl->l_whence);
5954     __put_user(fl->l_start, &target_fl->l_start);
5955     __put_user(fl->l_len, &target_fl->l_len);
5956     __put_user(fl->l_pid, &target_fl->l_pid);
5957     unlock_user_struct(target_fl, target_flock_addr, 1);
5958     return 0;
5959 }
5960 
5961 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5962 {
5963     struct flock64 fl64;
5964 #ifdef F_GETOWN_EX
5965     struct f_owner_ex fox;
5966     struct target_f_owner_ex *target_fox;
5967 #endif
5968     abi_long ret;
5969     int host_cmd = target_to_host_fcntl_cmd(cmd);
5970 
5971     if (host_cmd == -TARGET_EINVAL)
5972 	    return host_cmd;
5973 
5974     switch(cmd) {
5975     case TARGET_F_GETLK:
5976         ret = copy_from_user_flock(&fl64, arg);
5977         if (ret) {
5978             return ret;
5979         }
5980         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5981         if (ret == 0) {
5982             ret = copy_to_user_flock(arg, &fl64);
5983         }
5984         break;
5985 
5986     case TARGET_F_SETLK:
5987     case TARGET_F_SETLKW:
5988         ret = copy_from_user_flock(&fl64, arg);
5989         if (ret) {
5990             return ret;
5991         }
5992         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5993         break;
5994 
5995     case TARGET_F_GETLK64:
5996         ret = copy_from_user_flock64(&fl64, arg);
5997         if (ret) {
5998             return ret;
5999         }
6000         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6001         if (ret == 0) {
6002             ret = copy_to_user_flock64(arg, &fl64);
6003         }
6004         break;
6005     case TARGET_F_SETLK64:
6006     case TARGET_F_SETLKW64:
6007         ret = copy_from_user_flock64(&fl64, arg);
6008         if (ret) {
6009             return ret;
6010         }
6011         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6012         break;
6013 
6014     case TARGET_F_GETFL:
6015         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6016         if (ret >= 0) {
6017             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6018         }
6019         break;
6020 
6021     case TARGET_F_SETFL:
6022         ret = get_errno(safe_fcntl(fd, host_cmd,
6023                                    target_to_host_bitmask(arg,
6024                                                           fcntl_flags_tbl)));
6025         break;
6026 
6027 #ifdef F_GETOWN_EX
6028     case TARGET_F_GETOWN_EX:
6029         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6030         if (ret >= 0) {
6031             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6032                 return -TARGET_EFAULT;
6033             target_fox->type = tswap32(fox.type);
6034             target_fox->pid = tswap32(fox.pid);
6035             unlock_user_struct(target_fox, arg, 1);
6036         }
6037         break;
6038 #endif
6039 
6040 #ifdef F_SETOWN_EX
6041     case TARGET_F_SETOWN_EX:
6042         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6043             return -TARGET_EFAULT;
6044         fox.type = tswap32(target_fox->type);
6045         fox.pid = tswap32(target_fox->pid);
6046         unlock_user_struct(target_fox, arg, 0);
6047         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6048         break;
6049 #endif
6050 
6051     case TARGET_F_SETOWN:
6052     case TARGET_F_GETOWN:
6053     case TARGET_F_SETSIG:
6054     case TARGET_F_GETSIG:
6055     case TARGET_F_SETLEASE:
6056     case TARGET_F_GETLEASE:
6057     case TARGET_F_SETPIPE_SZ:
6058     case TARGET_F_GETPIPE_SZ:
6059         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6060         break;
6061 
6062     default:
6063         ret = get_errno(safe_fcntl(fd, cmd, arg));
6064         break;
6065     }
6066     return ret;
6067 }
6068 
6069 #ifdef USE_UID16
6070 
6071 static inline int high2lowuid(int uid)
6072 {
6073     if (uid > 65535)
6074         return 65534;
6075     else
6076         return uid;
6077 }
6078 
6079 static inline int high2lowgid(int gid)
6080 {
6081     if (gid > 65535)
6082         return 65534;
6083     else
6084         return gid;
6085 }
6086 
6087 static inline int low2highuid(int uid)
6088 {
6089     if ((int16_t)uid == -1)
6090         return -1;
6091     else
6092         return uid;
6093 }
6094 
6095 static inline int low2highgid(int gid)
6096 {
6097     if ((int16_t)gid == -1)
6098         return -1;
6099     else
6100         return gid;
6101 }
6102 static inline int tswapid(int id)
6103 {
6104     return tswap16(id);
6105 }
6106 
6107 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6108 
6109 #else /* !USE_UID16 */
6110 static inline int high2lowuid(int uid)
6111 {
6112     return uid;
6113 }
6114 static inline int high2lowgid(int gid)
6115 {
6116     return gid;
6117 }
6118 static inline int low2highuid(int uid)
6119 {
6120     return uid;
6121 }
6122 static inline int low2highgid(int gid)
6123 {
6124     return gid;
6125 }
6126 static inline int tswapid(int id)
6127 {
6128     return tswap32(id);
6129 }
6130 
6131 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6132 
6133 #endif /* USE_UID16 */
6134 
6135 /* We must do direct syscalls for setting UID/GID, because we want to
6136  * implement the Linux system call semantics of "change only for this thread",
6137  * not the libc/POSIX semantics of "change for all threads in process".
6138  * (See http://ewontfix.com/17/ for more details.)
6139  * We use the 32-bit version of the syscalls if present; if it is not
6140  * then either the host architecture supports 32-bit UIDs natively with
6141  * the standard syscall, or the 16-bit UID is the best we can do.
6142  */
6143 #ifdef __NR_setuid32
6144 #define __NR_sys_setuid __NR_setuid32
6145 #else
6146 #define __NR_sys_setuid __NR_setuid
6147 #endif
6148 #ifdef __NR_setgid32
6149 #define __NR_sys_setgid __NR_setgid32
6150 #else
6151 #define __NR_sys_setgid __NR_setgid
6152 #endif
6153 #ifdef __NR_setresuid32
6154 #define __NR_sys_setresuid __NR_setresuid32
6155 #else
6156 #define __NR_sys_setresuid __NR_setresuid
6157 #endif
6158 #ifdef __NR_setresgid32
6159 #define __NR_sys_setresgid __NR_setresgid32
6160 #else
6161 #define __NR_sys_setresgid __NR_setresgid
6162 #endif
6163 
6164 _syscall1(int, sys_setuid, uid_t, uid)
6165 _syscall1(int, sys_setgid, gid_t, gid)
6166 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6167 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6168 
6169 void syscall_init(void)
6170 {
6171     IOCTLEntry *ie;
6172     const argtype *arg_type;
6173     int size;
6174     int i;
6175 
6176     thunk_init(STRUCT_MAX);
6177 
6178 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6179 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6180 #include "syscall_types.h"
6181 #undef STRUCT
6182 #undef STRUCT_SPECIAL
6183 
6184     /* Build target_to_host_errno_table[] table from
6185      * host_to_target_errno_table[]. */
6186     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6187         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6188     }
6189 
6190     /* we patch the ioctl size if necessary. We rely on the fact that
6191        no ioctl has all the bits at '1' in the size field */
6192     ie = ioctl_entries;
6193     while (ie->target_cmd != 0) {
6194         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6195             TARGET_IOC_SIZEMASK) {
6196             arg_type = ie->arg_type;
6197             if (arg_type[0] != TYPE_PTR) {
6198                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6199                         ie->target_cmd);
6200                 exit(1);
6201             }
6202             arg_type++;
6203             size = thunk_type_size(arg_type, 0);
6204             ie->target_cmd = (ie->target_cmd &
6205                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6206                 (size << TARGET_IOC_SIZESHIFT);
6207         }
6208 
6209         /* automatic consistency check if same arch */
6210 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6211     (defined(__x86_64__) && defined(TARGET_X86_64))
6212         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6213             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6214                     ie->name, ie->target_cmd, ie->host_cmd);
6215         }
6216 #endif
6217         ie++;
6218     }
6219 }
6220 
6221 #if TARGET_ABI_BITS == 32
6222 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6223 {
6224 #ifdef TARGET_WORDS_BIGENDIAN
6225     return ((uint64_t)word0 << 32) | word1;
6226 #else
6227     return ((uint64_t)word1 << 32) | word0;
6228 #endif
6229 }
6230 #else /* TARGET_ABI_BITS == 32 */
6231 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6232 {
6233     return word0;
6234 }
6235 #endif /* TARGET_ABI_BITS != 32 */
6236 
6237 #ifdef TARGET_NR_truncate64
6238 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6239                                          abi_long arg2,
6240                                          abi_long arg3,
6241                                          abi_long arg4)
6242 {
6243     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6244         arg2 = arg3;
6245         arg3 = arg4;
6246     }
6247     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6248 }
6249 #endif
6250 
6251 #ifdef TARGET_NR_ftruncate64
6252 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6253                                           abi_long arg2,
6254                                           abi_long arg3,
6255                                           abi_long arg4)
6256 {
6257     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6258         arg2 = arg3;
6259         arg3 = arg4;
6260     }
6261     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6262 }
6263 #endif
6264 
6265 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6266                                                abi_ulong target_addr)
6267 {
6268     struct target_timespec *target_ts;
6269 
6270     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6271         return -TARGET_EFAULT;
6272     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6273     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6274     unlock_user_struct(target_ts, target_addr, 0);
6275     return 0;
6276 }
6277 
6278 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6279                                                struct timespec *host_ts)
6280 {
6281     struct target_timespec *target_ts;
6282 
6283     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6284         return -TARGET_EFAULT;
6285     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6286     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6287     unlock_user_struct(target_ts, target_addr, 1);
6288     return 0;
6289 }
6290 
6291 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6292                                                  abi_ulong target_addr)
6293 {
6294     struct target_itimerspec *target_itspec;
6295 
6296     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6297         return -TARGET_EFAULT;
6298     }
6299 
6300     host_itspec->it_interval.tv_sec =
6301                             tswapal(target_itspec->it_interval.tv_sec);
6302     host_itspec->it_interval.tv_nsec =
6303                             tswapal(target_itspec->it_interval.tv_nsec);
6304     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6305     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6306 
6307     unlock_user_struct(target_itspec, target_addr, 1);
6308     return 0;
6309 }
6310 
6311 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6312                                                struct itimerspec *host_its)
6313 {
6314     struct target_itimerspec *target_itspec;
6315 
6316     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6317         return -TARGET_EFAULT;
6318     }
6319 
6320     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6321     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6322 
6323     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6324     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6325 
6326     unlock_user_struct(target_itspec, target_addr, 0);
6327     return 0;
6328 }
6329 
6330 static inline abi_long target_to_host_timex(struct timex *host_tx,
6331                                             abi_long target_addr)
6332 {
6333     struct target_timex *target_tx;
6334 
6335     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6336         return -TARGET_EFAULT;
6337     }
6338 
6339     __get_user(host_tx->modes, &target_tx->modes);
6340     __get_user(host_tx->offset, &target_tx->offset);
6341     __get_user(host_tx->freq, &target_tx->freq);
6342     __get_user(host_tx->maxerror, &target_tx->maxerror);
6343     __get_user(host_tx->esterror, &target_tx->esterror);
6344     __get_user(host_tx->status, &target_tx->status);
6345     __get_user(host_tx->constant, &target_tx->constant);
6346     __get_user(host_tx->precision, &target_tx->precision);
6347     __get_user(host_tx->tolerance, &target_tx->tolerance);
6348     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6349     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6350     __get_user(host_tx->tick, &target_tx->tick);
6351     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6352     __get_user(host_tx->jitter, &target_tx->jitter);
6353     __get_user(host_tx->shift, &target_tx->shift);
6354     __get_user(host_tx->stabil, &target_tx->stabil);
6355     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6356     __get_user(host_tx->calcnt, &target_tx->calcnt);
6357     __get_user(host_tx->errcnt, &target_tx->errcnt);
6358     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6359     __get_user(host_tx->tai, &target_tx->tai);
6360 
6361     unlock_user_struct(target_tx, target_addr, 0);
6362     return 0;
6363 }
6364 
6365 static inline abi_long host_to_target_timex(abi_long target_addr,
6366                                             struct timex *host_tx)
6367 {
6368     struct target_timex *target_tx;
6369 
6370     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6371         return -TARGET_EFAULT;
6372     }
6373 
6374     __put_user(host_tx->modes, &target_tx->modes);
6375     __put_user(host_tx->offset, &target_tx->offset);
6376     __put_user(host_tx->freq, &target_tx->freq);
6377     __put_user(host_tx->maxerror, &target_tx->maxerror);
6378     __put_user(host_tx->esterror, &target_tx->esterror);
6379     __put_user(host_tx->status, &target_tx->status);
6380     __put_user(host_tx->constant, &target_tx->constant);
6381     __put_user(host_tx->precision, &target_tx->precision);
6382     __put_user(host_tx->tolerance, &target_tx->tolerance);
6383     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6384     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6385     __put_user(host_tx->tick, &target_tx->tick);
6386     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6387     __put_user(host_tx->jitter, &target_tx->jitter);
6388     __put_user(host_tx->shift, &target_tx->shift);
6389     __put_user(host_tx->stabil, &target_tx->stabil);
6390     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6391     __put_user(host_tx->calcnt, &target_tx->calcnt);
6392     __put_user(host_tx->errcnt, &target_tx->errcnt);
6393     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6394     __put_user(host_tx->tai, &target_tx->tai);
6395 
6396     unlock_user_struct(target_tx, target_addr, 1);
6397     return 0;
6398 }
6399 
6400 
6401 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6402                                                abi_ulong target_addr)
6403 {
6404     struct target_sigevent *target_sevp;
6405 
6406     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6407         return -TARGET_EFAULT;
6408     }
6409 
6410     /* This union is awkward on 64 bit systems because it has a 32 bit
6411      * integer and a pointer in it; we follow the conversion approach
6412      * used for handling sigval types in signal.c so the guest should get
6413      * the correct value back even if we did a 64 bit byteswap and it's
6414      * using the 32 bit integer.
6415      */
6416     host_sevp->sigev_value.sival_ptr =
6417         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6418     host_sevp->sigev_signo =
6419         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6420     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6421     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6422 
6423     unlock_user_struct(target_sevp, target_addr, 1);
6424     return 0;
6425 }
6426 
6427 #if defined(TARGET_NR_mlockall)
6428 static inline int target_to_host_mlockall_arg(int arg)
6429 {
6430     int result = 0;
6431 
6432     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6433         result |= MCL_CURRENT;
6434     }
6435     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6436         result |= MCL_FUTURE;
6437     }
6438     return result;
6439 }
6440 #endif
6441 
6442 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6443      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6444      defined(TARGET_NR_newfstatat))
6445 static inline abi_long host_to_target_stat64(void *cpu_env,
6446                                              abi_ulong target_addr,
6447                                              struct stat *host_st)
6448 {
6449 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6450     if (((CPUARMState *)cpu_env)->eabi) {
6451         struct target_eabi_stat64 *target_st;
6452 
6453         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6454             return -TARGET_EFAULT;
6455         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6456         __put_user(host_st->st_dev, &target_st->st_dev);
6457         __put_user(host_st->st_ino, &target_st->st_ino);
6458 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6459         __put_user(host_st->st_ino, &target_st->__st_ino);
6460 #endif
6461         __put_user(host_st->st_mode, &target_st->st_mode);
6462         __put_user(host_st->st_nlink, &target_st->st_nlink);
6463         __put_user(host_st->st_uid, &target_st->st_uid);
6464         __put_user(host_st->st_gid, &target_st->st_gid);
6465         __put_user(host_st->st_rdev, &target_st->st_rdev);
6466         __put_user(host_st->st_size, &target_st->st_size);
6467         __put_user(host_st->st_blksize, &target_st->st_blksize);
6468         __put_user(host_st->st_blocks, &target_st->st_blocks);
6469         __put_user(host_st->st_atime, &target_st->target_st_atime);
6470         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6471         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6472 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6473         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6474         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6475         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6476 #endif
6477         unlock_user_struct(target_st, target_addr, 1);
6478     } else
6479 #endif
6480     {
6481 #if defined(TARGET_HAS_STRUCT_STAT64)
6482         struct target_stat64 *target_st;
6483 #else
6484         struct target_stat *target_st;
6485 #endif
6486 
6487         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6488             return -TARGET_EFAULT;
6489         memset(target_st, 0, sizeof(*target_st));
6490         __put_user(host_st->st_dev, &target_st->st_dev);
6491         __put_user(host_st->st_ino, &target_st->st_ino);
6492 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6493         __put_user(host_st->st_ino, &target_st->__st_ino);
6494 #endif
6495         __put_user(host_st->st_mode, &target_st->st_mode);
6496         __put_user(host_st->st_nlink, &target_st->st_nlink);
6497         __put_user(host_st->st_uid, &target_st->st_uid);
6498         __put_user(host_st->st_gid, &target_st->st_gid);
6499         __put_user(host_st->st_rdev, &target_st->st_rdev);
6500         /* XXX: better use of kernel struct */
6501         __put_user(host_st->st_size, &target_st->st_size);
6502         __put_user(host_st->st_blksize, &target_st->st_blksize);
6503         __put_user(host_st->st_blocks, &target_st->st_blocks);
6504         __put_user(host_st->st_atime, &target_st->target_st_atime);
6505         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6506         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6507 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6508         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6509         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6510         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6511 #endif
6512         unlock_user_struct(target_st, target_addr, 1);
6513     }
6514 
6515     return 0;
6516 }
6517 #endif
6518 
6519 /* ??? Using host futex calls even when target atomic operations
6520    are not really atomic probably breaks things.  However implementing
6521    futexes locally would make futexes shared between multiple processes
6522    tricky.  However they're probably useless because guest atomic
6523    operations won't work either.  */
6524 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6525                     target_ulong uaddr2, int val3)
6526 {
6527     struct timespec ts, *pts;
6528     int base_op;
6529 
6530     /* ??? We assume FUTEX_* constants are the same on both host
6531        and target.  */
6532 #ifdef FUTEX_CMD_MASK
6533     base_op = op & FUTEX_CMD_MASK;
6534 #else
6535     base_op = op;
6536 #endif
6537     switch (base_op) {
6538     case FUTEX_WAIT:
6539     case FUTEX_WAIT_BITSET:
6540         if (timeout) {
6541             pts = &ts;
6542             target_to_host_timespec(pts, timeout);
6543         } else {
6544             pts = NULL;
6545         }
6546         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6547                          pts, NULL, val3));
6548     case FUTEX_WAKE:
6549         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6550     case FUTEX_FD:
6551         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6552     case FUTEX_REQUEUE:
6553     case FUTEX_CMP_REQUEUE:
6554     case FUTEX_WAKE_OP:
6555         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6556            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6557            But the prototype takes a `struct timespec *'; insert casts
6558            to satisfy the compiler.  We do not need to tswap TIMEOUT
6559            since it's not compared to guest memory.  */
6560         pts = (struct timespec *)(uintptr_t) timeout;
6561         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6562                                     g2h(uaddr2),
6563                                     (base_op == FUTEX_CMP_REQUEUE
6564                                      ? tswap32(val3)
6565                                      : val3)));
6566     default:
6567         return -TARGET_ENOSYS;
6568     }
6569 }
6570 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6571 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6572                                      abi_long handle, abi_long mount_id,
6573                                      abi_long flags)
6574 {
6575     struct file_handle *target_fh;
6576     struct file_handle *fh;
6577     int mid = 0;
6578     abi_long ret;
6579     char *name;
6580     unsigned int size, total_size;
6581 
6582     if (get_user_s32(size, handle)) {
6583         return -TARGET_EFAULT;
6584     }
6585 
6586     name = lock_user_string(pathname);
6587     if (!name) {
6588         return -TARGET_EFAULT;
6589     }
6590 
6591     total_size = sizeof(struct file_handle) + size;
6592     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6593     if (!target_fh) {
6594         unlock_user(name, pathname, 0);
6595         return -TARGET_EFAULT;
6596     }
6597 
6598     fh = g_malloc0(total_size);
6599     fh->handle_bytes = size;
6600 
6601     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6602     unlock_user(name, pathname, 0);
6603 
6604     /* man name_to_handle_at(2):
6605      * Other than the use of the handle_bytes field, the caller should treat
6606      * the file_handle structure as an opaque data type
6607      */
6608 
6609     memcpy(target_fh, fh, total_size);
6610     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6611     target_fh->handle_type = tswap32(fh->handle_type);
6612     g_free(fh);
6613     unlock_user(target_fh, handle, total_size);
6614 
6615     if (put_user_s32(mid, mount_id)) {
6616         return -TARGET_EFAULT;
6617     }
6618 
6619     return ret;
6620 
6621 }
6622 #endif
6623 
6624 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6625 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6626                                      abi_long flags)
6627 {
6628     struct file_handle *target_fh;
6629     struct file_handle *fh;
6630     unsigned int size, total_size;
6631     abi_long ret;
6632 
6633     if (get_user_s32(size, handle)) {
6634         return -TARGET_EFAULT;
6635     }
6636 
6637     total_size = sizeof(struct file_handle) + size;
6638     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6639     if (!target_fh) {
6640         return -TARGET_EFAULT;
6641     }
6642 
6643     fh = g_memdup(target_fh, total_size);
6644     fh->handle_bytes = size;
6645     fh->handle_type = tswap32(target_fh->handle_type);
6646 
6647     ret = get_errno(open_by_handle_at(mount_fd, fh,
6648                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6649 
6650     g_free(fh);
6651 
6652     unlock_user(target_fh, handle, total_size);
6653 
6654     return ret;
6655 }
6656 #endif
6657 
6658 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6659 
6660 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6661 {
6662     int host_flags;
6663     target_sigset_t *target_mask;
6664     sigset_t host_mask;
6665     abi_long ret;
6666 
6667     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6668         return -TARGET_EINVAL;
6669     }
6670     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6671         return -TARGET_EFAULT;
6672     }
6673 
6674     target_to_host_sigset(&host_mask, target_mask);
6675 
6676     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6677 
6678     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6679     if (ret >= 0) {
6680         fd_trans_register(ret, &target_signalfd_trans);
6681     }
6682 
6683     unlock_user_struct(target_mask, mask, 0);
6684 
6685     return ret;
6686 }
6687 #endif
6688 
6689 /* Map host to target signal numbers for the wait family of syscalls.
6690    Assume all other status bits are the same.  */
6691 int host_to_target_waitstatus(int status)
6692 {
6693     if (WIFSIGNALED(status)) {
6694         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6695     }
6696     if (WIFSTOPPED(status)) {
6697         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6698                | (status & 0xff);
6699     }
6700     return status;
6701 }
6702 
6703 static int open_self_cmdline(void *cpu_env, int fd)
6704 {
6705     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6706     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6707     int i;
6708 
6709     for (i = 0; i < bprm->argc; i++) {
6710         size_t len = strlen(bprm->argv[i]) + 1;
6711 
6712         if (write(fd, bprm->argv[i], len) != len) {
6713             return -1;
6714         }
6715     }
6716 
6717     return 0;
6718 }
6719 
6720 static int open_self_maps(void *cpu_env, int fd)
6721 {
6722     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6723     TaskState *ts = cpu->opaque;
6724     FILE *fp;
6725     char *line = NULL;
6726     size_t len = 0;
6727     ssize_t read;
6728 
6729     fp = fopen("/proc/self/maps", "r");
6730     if (fp == NULL) {
6731         return -1;
6732     }
6733 
6734     while ((read = getline(&line, &len, fp)) != -1) {
6735         int fields, dev_maj, dev_min, inode;
6736         uint64_t min, max, offset;
6737         char flag_r, flag_w, flag_x, flag_p;
6738         char path[512] = "";
6739         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6740                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6741                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6742 
6743         if ((fields < 10) || (fields > 11)) {
6744             continue;
6745         }
6746         if (h2g_valid(min)) {
6747             int flags = page_get_flags(h2g(min));
6748             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6749             if (page_check_range(h2g(min), max - min, flags) == -1) {
6750                 continue;
6751             }
6752             if (h2g(min) == ts->info->stack_limit) {
6753                 pstrcpy(path, sizeof(path), "      [stack]");
6754             }
6755             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6756                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6757                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6758                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6759                     path[0] ? "         " : "", path);
6760         }
6761     }
6762 
6763     free(line);
6764     fclose(fp);
6765 
6766     return 0;
6767 }
6768 
6769 static int open_self_stat(void *cpu_env, int fd)
6770 {
6771     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6772     TaskState *ts = cpu->opaque;
6773     abi_ulong start_stack = ts->info->start_stack;
6774     int i;
6775 
6776     for (i = 0; i < 44; i++) {
6777       char buf[128];
6778       int len;
6779       uint64_t val = 0;
6780 
6781       if (i == 0) {
6782         /* pid */
6783         val = getpid();
6784         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6785       } else if (i == 1) {
6786         /* app name */
6787         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6788       } else if (i == 27) {
6789         /* stack bottom */
6790         val = start_stack;
6791         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6792       } else {
6793         /* for the rest, there is MasterCard */
6794         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6795       }
6796 
6797       len = strlen(buf);
6798       if (write(fd, buf, len) != len) {
6799           return -1;
6800       }
6801     }
6802 
6803     return 0;
6804 }
6805 
6806 static int open_self_auxv(void *cpu_env, int fd)
6807 {
6808     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6809     TaskState *ts = cpu->opaque;
6810     abi_ulong auxv = ts->info->saved_auxv;
6811     abi_ulong len = ts->info->auxv_len;
6812     char *ptr;
6813 
6814     /*
6815      * Auxiliary vector is stored in target process stack.
6816      * read in whole auxv vector and copy it to file
6817      */
6818     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6819     if (ptr != NULL) {
6820         while (len > 0) {
6821             ssize_t r;
6822             r = write(fd, ptr, len);
6823             if (r <= 0) {
6824                 break;
6825             }
6826             len -= r;
6827             ptr += r;
6828         }
6829         lseek(fd, 0, SEEK_SET);
6830         unlock_user(ptr, auxv, len);
6831     }
6832 
6833     return 0;
6834 }
6835 
6836 static int is_proc_myself(const char *filename, const char *entry)
6837 {
6838     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6839         filename += strlen("/proc/");
6840         if (!strncmp(filename, "self/", strlen("self/"))) {
6841             filename += strlen("self/");
6842         } else if (*filename >= '1' && *filename <= '9') {
6843             char myself[80];
6844             snprintf(myself, sizeof(myself), "%d/", getpid());
6845             if (!strncmp(filename, myself, strlen(myself))) {
6846                 filename += strlen(myself);
6847             } else {
6848                 return 0;
6849             }
6850         } else {
6851             return 0;
6852         }
6853         if (!strcmp(filename, entry)) {
6854             return 1;
6855         }
6856     }
6857     return 0;
6858 }
6859 
6860 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6861     defined(TARGET_SPARC) || defined(TARGET_M68K)
6862 static int is_proc(const char *filename, const char *entry)
6863 {
6864     return strcmp(filename, entry) == 0;
6865 }
6866 #endif
6867 
6868 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6869 static int open_net_route(void *cpu_env, int fd)
6870 {
6871     FILE *fp;
6872     char *line = NULL;
6873     size_t len = 0;
6874     ssize_t read;
6875 
6876     fp = fopen("/proc/net/route", "r");
6877     if (fp == NULL) {
6878         return -1;
6879     }
6880 
6881     /* read header */
6882 
6883     read = getline(&line, &len, fp);
6884     dprintf(fd, "%s", line);
6885 
6886     /* read routes */
6887 
6888     while ((read = getline(&line, &len, fp)) != -1) {
6889         char iface[16];
6890         uint32_t dest, gw, mask;
6891         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6892         int fields;
6893 
6894         fields = sscanf(line,
6895                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6896                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6897                         &mask, &mtu, &window, &irtt);
6898         if (fields != 11) {
6899             continue;
6900         }
6901         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6902                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6903                 metric, tswap32(mask), mtu, window, irtt);
6904     }
6905 
6906     free(line);
6907     fclose(fp);
6908 
6909     return 0;
6910 }
6911 #endif
6912 
6913 #if defined(TARGET_SPARC)
6914 static int open_cpuinfo(void *cpu_env, int fd)
6915 {
6916     dprintf(fd, "type\t\t: sun4u\n");
6917     return 0;
6918 }
6919 #endif
6920 
6921 #if defined(TARGET_M68K)
6922 static int open_hardware(void *cpu_env, int fd)
6923 {
6924     dprintf(fd, "Model:\t\tqemu-m68k\n");
6925     return 0;
6926 }
6927 #endif
6928 
6929 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6930 {
6931     struct fake_open {
6932         const char *filename;
6933         int (*fill)(void *cpu_env, int fd);
6934         int (*cmp)(const char *s1, const char *s2);
6935     };
6936     const struct fake_open *fake_open;
6937     static const struct fake_open fakes[] = {
6938         { "maps", open_self_maps, is_proc_myself },
6939         { "stat", open_self_stat, is_proc_myself },
6940         { "auxv", open_self_auxv, is_proc_myself },
6941         { "cmdline", open_self_cmdline, is_proc_myself },
6942 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6943         { "/proc/net/route", open_net_route, is_proc },
6944 #endif
6945 #if defined(TARGET_SPARC)
6946         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6947 #endif
6948 #if defined(TARGET_M68K)
6949         { "/proc/hardware", open_hardware, is_proc },
6950 #endif
6951         { NULL, NULL, NULL }
6952     };
6953 
6954     if (is_proc_myself(pathname, "exe")) {
6955         int execfd = qemu_getauxval(AT_EXECFD);
6956         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6957     }
6958 
6959     for (fake_open = fakes; fake_open->filename; fake_open++) {
6960         if (fake_open->cmp(pathname, fake_open->filename)) {
6961             break;
6962         }
6963     }
6964 
6965     if (fake_open->filename) {
6966         const char *tmpdir;
6967         char filename[PATH_MAX];
6968         int fd, r;
6969 
6970         /* create temporary file to map stat to */
6971         tmpdir = getenv("TMPDIR");
6972         if (!tmpdir)
6973             tmpdir = "/tmp";
6974         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6975         fd = mkstemp(filename);
6976         if (fd < 0) {
6977             return fd;
6978         }
6979         unlink(filename);
6980 
6981         if ((r = fake_open->fill(cpu_env, fd))) {
6982             int e = errno;
6983             close(fd);
6984             errno = e;
6985             return r;
6986         }
6987         lseek(fd, 0, SEEK_SET);
6988 
6989         return fd;
6990     }
6991 
6992     return safe_openat(dirfd, path(pathname), flags, mode);
6993 }
6994 
6995 #define TIMER_MAGIC 0x0caf0000
6996 #define TIMER_MAGIC_MASK 0xffff0000
6997 
6998 /* Convert QEMU provided timer ID back to internal 16bit index format */
6999 static target_timer_t get_timer_id(abi_long arg)
7000 {
7001     target_timer_t timerid = arg;
7002 
7003     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7004         return -TARGET_EINVAL;
7005     }
7006 
7007     timerid &= 0xffff;
7008 
7009     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7010         return -TARGET_EINVAL;
7011     }
7012 
7013     return timerid;
7014 }
7015 
7016 static int target_to_host_cpu_mask(unsigned long *host_mask,
7017                                    size_t host_size,
7018                                    abi_ulong target_addr,
7019                                    size_t target_size)
7020 {
7021     unsigned target_bits = sizeof(abi_ulong) * 8;
7022     unsigned host_bits = sizeof(*host_mask) * 8;
7023     abi_ulong *target_mask;
7024     unsigned i, j;
7025 
7026     assert(host_size >= target_size);
7027 
7028     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7029     if (!target_mask) {
7030         return -TARGET_EFAULT;
7031     }
7032     memset(host_mask, 0, host_size);
7033 
7034     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7035         unsigned bit = i * target_bits;
7036         abi_ulong val;
7037 
7038         __get_user(val, &target_mask[i]);
7039         for (j = 0; j < target_bits; j++, bit++) {
7040             if (val & (1UL << j)) {
7041                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7042             }
7043         }
7044     }
7045 
7046     unlock_user(target_mask, target_addr, 0);
7047     return 0;
7048 }
7049 
7050 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7051                                    size_t host_size,
7052                                    abi_ulong target_addr,
7053                                    size_t target_size)
7054 {
7055     unsigned target_bits = sizeof(abi_ulong) * 8;
7056     unsigned host_bits = sizeof(*host_mask) * 8;
7057     abi_ulong *target_mask;
7058     unsigned i, j;
7059 
7060     assert(host_size >= target_size);
7061 
7062     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7063     if (!target_mask) {
7064         return -TARGET_EFAULT;
7065     }
7066 
7067     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7068         unsigned bit = i * target_bits;
7069         abi_ulong val = 0;
7070 
7071         for (j = 0; j < target_bits; j++, bit++) {
7072             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7073                 val |= 1UL << j;
7074             }
7075         }
7076         __put_user(val, &target_mask[i]);
7077     }
7078 
7079     unlock_user(target_mask, target_addr, target_size);
7080     return 0;
7081 }
7082 
7083 /* This is an internal helper for do_syscall so that it is easier
7084  * to have a single return point, so that actions, such as logging
7085  * of syscall results, can be performed.
7086  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7087  */
7088 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7089                             abi_long arg2, abi_long arg3, abi_long arg4,
7090                             abi_long arg5, abi_long arg6, abi_long arg7,
7091                             abi_long arg8)
7092 {
7093     CPUState *cpu = env_cpu(cpu_env);
7094     abi_long ret;
7095 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7096     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7097     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7098     struct stat st;
7099 #endif
7100 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7101     || defined(TARGET_NR_fstatfs)
7102     struct statfs stfs;
7103 #endif
7104     void *p;
7105 
7106     switch(num) {
7107     case TARGET_NR_exit:
7108         /* In old applications this may be used to implement _exit(2).
7109            However in threaded applictions it is used for thread termination,
7110            and _exit_group is used for application termination.
7111            Do thread termination if we have more then one thread.  */
7112 
7113         if (block_signals()) {
7114             return -TARGET_ERESTARTSYS;
7115         }
7116 
7117         cpu_list_lock();
7118 
7119         if (CPU_NEXT(first_cpu)) {
7120             TaskState *ts;
7121 
7122             /* Remove the CPU from the list.  */
7123             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7124 
7125             cpu_list_unlock();
7126 
7127             ts = cpu->opaque;
7128             if (ts->child_tidptr) {
7129                 put_user_u32(0, ts->child_tidptr);
7130                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7131                           NULL, NULL, 0);
7132             }
7133             thread_cpu = NULL;
7134             object_unref(OBJECT(cpu));
7135             g_free(ts);
7136             rcu_unregister_thread();
7137             pthread_exit(NULL);
7138         }
7139 
7140         cpu_list_unlock();
7141         preexit_cleanup(cpu_env, arg1);
7142         _exit(arg1);
7143         return 0; /* avoid warning */
7144     case TARGET_NR_read:
7145         if (arg2 == 0 && arg3 == 0) {
7146             return get_errno(safe_read(arg1, 0, 0));
7147         } else {
7148             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7149                 return -TARGET_EFAULT;
7150             ret = get_errno(safe_read(arg1, p, arg3));
7151             if (ret >= 0 &&
7152                 fd_trans_host_to_target_data(arg1)) {
7153                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7154             }
7155             unlock_user(p, arg2, ret);
7156         }
7157         return ret;
7158     case TARGET_NR_write:
7159         if (arg2 == 0 && arg3 == 0) {
7160             return get_errno(safe_write(arg1, 0, 0));
7161         }
7162         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7163             return -TARGET_EFAULT;
7164         if (fd_trans_target_to_host_data(arg1)) {
7165             void *copy = g_malloc(arg3);
7166             memcpy(copy, p, arg3);
7167             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7168             if (ret >= 0) {
7169                 ret = get_errno(safe_write(arg1, copy, ret));
7170             }
7171             g_free(copy);
7172         } else {
7173             ret = get_errno(safe_write(arg1, p, arg3));
7174         }
7175         unlock_user(p, arg2, 0);
7176         return ret;
7177 
7178 #ifdef TARGET_NR_open
7179     case TARGET_NR_open:
7180         if (!(p = lock_user_string(arg1)))
7181             return -TARGET_EFAULT;
7182         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7183                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7184                                   arg3));
7185         fd_trans_unregister(ret);
7186         unlock_user(p, arg1, 0);
7187         return ret;
7188 #endif
7189     case TARGET_NR_openat:
7190         if (!(p = lock_user_string(arg2)))
7191             return -TARGET_EFAULT;
7192         ret = get_errno(do_openat(cpu_env, arg1, p,
7193                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7194                                   arg4));
7195         fd_trans_unregister(ret);
7196         unlock_user(p, arg2, 0);
7197         return ret;
7198 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7199     case TARGET_NR_name_to_handle_at:
7200         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7201         return ret;
7202 #endif
7203 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7204     case TARGET_NR_open_by_handle_at:
7205         ret = do_open_by_handle_at(arg1, arg2, arg3);
7206         fd_trans_unregister(ret);
7207         return ret;
7208 #endif
7209     case TARGET_NR_close:
7210         fd_trans_unregister(arg1);
7211         return get_errno(close(arg1));
7212 
7213     case TARGET_NR_brk:
7214         return do_brk(arg1);
7215 #ifdef TARGET_NR_fork
7216     case TARGET_NR_fork:
7217         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7218 #endif
7219 #ifdef TARGET_NR_waitpid
7220     case TARGET_NR_waitpid:
7221         {
7222             int status;
7223             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7224             if (!is_error(ret) && arg2 && ret
7225                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7226                 return -TARGET_EFAULT;
7227         }
7228         return ret;
7229 #endif
7230 #ifdef TARGET_NR_waitid
7231     case TARGET_NR_waitid:
7232         {
7233             siginfo_t info;
7234             info.si_pid = 0;
7235             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7236             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7237                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7238                     return -TARGET_EFAULT;
7239                 host_to_target_siginfo(p, &info);
7240                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7241             }
7242         }
7243         return ret;
7244 #endif
7245 #ifdef TARGET_NR_creat /* not on alpha */
7246     case TARGET_NR_creat:
7247         if (!(p = lock_user_string(arg1)))
7248             return -TARGET_EFAULT;
7249         ret = get_errno(creat(p, arg2));
7250         fd_trans_unregister(ret);
7251         unlock_user(p, arg1, 0);
7252         return ret;
7253 #endif
7254 #ifdef TARGET_NR_link
7255     case TARGET_NR_link:
7256         {
7257             void * p2;
7258             p = lock_user_string(arg1);
7259             p2 = lock_user_string(arg2);
7260             if (!p || !p2)
7261                 ret = -TARGET_EFAULT;
7262             else
7263                 ret = get_errno(link(p, p2));
7264             unlock_user(p2, arg2, 0);
7265             unlock_user(p, arg1, 0);
7266         }
7267         return ret;
7268 #endif
7269 #if defined(TARGET_NR_linkat)
7270     case TARGET_NR_linkat:
7271         {
7272             void * p2 = NULL;
7273             if (!arg2 || !arg4)
7274                 return -TARGET_EFAULT;
7275             p  = lock_user_string(arg2);
7276             p2 = lock_user_string(arg4);
7277             if (!p || !p2)
7278                 ret = -TARGET_EFAULT;
7279             else
7280                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7281             unlock_user(p, arg2, 0);
7282             unlock_user(p2, arg4, 0);
7283         }
7284         return ret;
7285 #endif
7286 #ifdef TARGET_NR_unlink
7287     case TARGET_NR_unlink:
7288         if (!(p = lock_user_string(arg1)))
7289             return -TARGET_EFAULT;
7290         ret = get_errno(unlink(p));
7291         unlock_user(p, arg1, 0);
7292         return ret;
7293 #endif
7294 #if defined(TARGET_NR_unlinkat)
7295     case TARGET_NR_unlinkat:
7296         if (!(p = lock_user_string(arg2)))
7297             return -TARGET_EFAULT;
7298         ret = get_errno(unlinkat(arg1, p, arg3));
7299         unlock_user(p, arg2, 0);
7300         return ret;
7301 #endif
7302     case TARGET_NR_execve:
7303         {
7304             char **argp, **envp;
7305             int argc, envc;
7306             abi_ulong gp;
7307             abi_ulong guest_argp;
7308             abi_ulong guest_envp;
7309             abi_ulong addr;
7310             char **q;
7311             int total_size = 0;
7312 
7313             argc = 0;
7314             guest_argp = arg2;
7315             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7316                 if (get_user_ual(addr, gp))
7317                     return -TARGET_EFAULT;
7318                 if (!addr)
7319                     break;
7320                 argc++;
7321             }
7322             envc = 0;
7323             guest_envp = arg3;
7324             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7325                 if (get_user_ual(addr, gp))
7326                     return -TARGET_EFAULT;
7327                 if (!addr)
7328                     break;
7329                 envc++;
7330             }
7331 
7332             argp = g_new0(char *, argc + 1);
7333             envp = g_new0(char *, envc + 1);
7334 
7335             for (gp = guest_argp, q = argp; gp;
7336                   gp += sizeof(abi_ulong), q++) {
7337                 if (get_user_ual(addr, gp))
7338                     goto execve_efault;
7339                 if (!addr)
7340                     break;
7341                 if (!(*q = lock_user_string(addr)))
7342                     goto execve_efault;
7343                 total_size += strlen(*q) + 1;
7344             }
7345             *q = NULL;
7346 
7347             for (gp = guest_envp, q = envp; gp;
7348                   gp += sizeof(abi_ulong), q++) {
7349                 if (get_user_ual(addr, gp))
7350                     goto execve_efault;
7351                 if (!addr)
7352                     break;
7353                 if (!(*q = lock_user_string(addr)))
7354                     goto execve_efault;
7355                 total_size += strlen(*q) + 1;
7356             }
7357             *q = NULL;
7358 
7359             if (!(p = lock_user_string(arg1)))
7360                 goto execve_efault;
7361             /* Although execve() is not an interruptible syscall it is
7362              * a special case where we must use the safe_syscall wrapper:
7363              * if we allow a signal to happen before we make the host
7364              * syscall then we will 'lose' it, because at the point of
7365              * execve the process leaves QEMU's control. So we use the
7366              * safe syscall wrapper to ensure that we either take the
7367              * signal as a guest signal, or else it does not happen
7368              * before the execve completes and makes it the other
7369              * program's problem.
7370              */
7371             ret = get_errno(safe_execve(p, argp, envp));
7372             unlock_user(p, arg1, 0);
7373 
7374             goto execve_end;
7375 
7376         execve_efault:
7377             ret = -TARGET_EFAULT;
7378 
7379         execve_end:
7380             for (gp = guest_argp, q = argp; *q;
7381                   gp += sizeof(abi_ulong), q++) {
7382                 if (get_user_ual(addr, gp)
7383                     || !addr)
7384                     break;
7385                 unlock_user(*q, addr, 0);
7386             }
7387             for (gp = guest_envp, q = envp; *q;
7388                   gp += sizeof(abi_ulong), q++) {
7389                 if (get_user_ual(addr, gp)
7390                     || !addr)
7391                     break;
7392                 unlock_user(*q, addr, 0);
7393             }
7394 
7395             g_free(argp);
7396             g_free(envp);
7397         }
7398         return ret;
7399     case TARGET_NR_chdir:
7400         if (!(p = lock_user_string(arg1)))
7401             return -TARGET_EFAULT;
7402         ret = get_errno(chdir(p));
7403         unlock_user(p, arg1, 0);
7404         return ret;
7405 #ifdef TARGET_NR_time
7406     case TARGET_NR_time:
7407         {
7408             time_t host_time;
7409             ret = get_errno(time(&host_time));
7410             if (!is_error(ret)
7411                 && arg1
7412                 && put_user_sal(host_time, arg1))
7413                 return -TARGET_EFAULT;
7414         }
7415         return ret;
7416 #endif
7417 #ifdef TARGET_NR_mknod
7418     case TARGET_NR_mknod:
7419         if (!(p = lock_user_string(arg1)))
7420             return -TARGET_EFAULT;
7421         ret = get_errno(mknod(p, arg2, arg3));
7422         unlock_user(p, arg1, 0);
7423         return ret;
7424 #endif
7425 #if defined(TARGET_NR_mknodat)
7426     case TARGET_NR_mknodat:
7427         if (!(p = lock_user_string(arg2)))
7428             return -TARGET_EFAULT;
7429         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7430         unlock_user(p, arg2, 0);
7431         return ret;
7432 #endif
7433 #ifdef TARGET_NR_chmod
7434     case TARGET_NR_chmod:
7435         if (!(p = lock_user_string(arg1)))
7436             return -TARGET_EFAULT;
7437         ret = get_errno(chmod(p, arg2));
7438         unlock_user(p, arg1, 0);
7439         return ret;
7440 #endif
7441 #ifdef TARGET_NR_lseek
7442     case TARGET_NR_lseek:
7443         return get_errno(lseek(arg1, arg2, arg3));
7444 #endif
7445 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7446     /* Alpha specific */
7447     case TARGET_NR_getxpid:
7448         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7449         return get_errno(getpid());
7450 #endif
7451 #ifdef TARGET_NR_getpid
7452     case TARGET_NR_getpid:
7453         return get_errno(getpid());
7454 #endif
7455     case TARGET_NR_mount:
7456         {
7457             /* need to look at the data field */
7458             void *p2, *p3;
7459 
7460             if (arg1) {
7461                 p = lock_user_string(arg1);
7462                 if (!p) {
7463                     return -TARGET_EFAULT;
7464                 }
7465             } else {
7466                 p = NULL;
7467             }
7468 
7469             p2 = lock_user_string(arg2);
7470             if (!p2) {
7471                 if (arg1) {
7472                     unlock_user(p, arg1, 0);
7473                 }
7474                 return -TARGET_EFAULT;
7475             }
7476 
7477             if (arg3) {
7478                 p3 = lock_user_string(arg3);
7479                 if (!p3) {
7480                     if (arg1) {
7481                         unlock_user(p, arg1, 0);
7482                     }
7483                     unlock_user(p2, arg2, 0);
7484                     return -TARGET_EFAULT;
7485                 }
7486             } else {
7487                 p3 = NULL;
7488             }
7489 
7490             /* FIXME - arg5 should be locked, but it isn't clear how to
7491              * do that since it's not guaranteed to be a NULL-terminated
7492              * string.
7493              */
7494             if (!arg5) {
7495                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7496             } else {
7497                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7498             }
7499             ret = get_errno(ret);
7500 
7501             if (arg1) {
7502                 unlock_user(p, arg1, 0);
7503             }
7504             unlock_user(p2, arg2, 0);
7505             if (arg3) {
7506                 unlock_user(p3, arg3, 0);
7507             }
7508         }
7509         return ret;
7510 #ifdef TARGET_NR_umount
7511     case TARGET_NR_umount:
7512         if (!(p = lock_user_string(arg1)))
7513             return -TARGET_EFAULT;
7514         ret = get_errno(umount(p));
7515         unlock_user(p, arg1, 0);
7516         return ret;
7517 #endif
7518 #ifdef TARGET_NR_stime /* not on alpha */
7519     case TARGET_NR_stime:
7520         {
7521             time_t host_time;
7522             if (get_user_sal(host_time, arg1))
7523                 return -TARGET_EFAULT;
7524             return get_errno(stime(&host_time));
7525         }
7526 #endif
7527 #ifdef TARGET_NR_alarm /* not on alpha */
7528     case TARGET_NR_alarm:
7529         return alarm(arg1);
7530 #endif
7531 #ifdef TARGET_NR_pause /* not on alpha */
7532     case TARGET_NR_pause:
7533         if (!block_signals()) {
7534             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7535         }
7536         return -TARGET_EINTR;
7537 #endif
7538 #ifdef TARGET_NR_utime
7539     case TARGET_NR_utime:
7540         {
7541             struct utimbuf tbuf, *host_tbuf;
7542             struct target_utimbuf *target_tbuf;
7543             if (arg2) {
7544                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7545                     return -TARGET_EFAULT;
7546                 tbuf.actime = tswapal(target_tbuf->actime);
7547                 tbuf.modtime = tswapal(target_tbuf->modtime);
7548                 unlock_user_struct(target_tbuf, arg2, 0);
7549                 host_tbuf = &tbuf;
7550             } else {
7551                 host_tbuf = NULL;
7552             }
7553             if (!(p = lock_user_string(arg1)))
7554                 return -TARGET_EFAULT;
7555             ret = get_errno(utime(p, host_tbuf));
7556             unlock_user(p, arg1, 0);
7557         }
7558         return ret;
7559 #endif
7560 #ifdef TARGET_NR_utimes
7561     case TARGET_NR_utimes:
7562         {
7563             struct timeval *tvp, tv[2];
7564             if (arg2) {
7565                 if (copy_from_user_timeval(&tv[0], arg2)
7566                     || copy_from_user_timeval(&tv[1],
7567                                               arg2 + sizeof(struct target_timeval)))
7568                     return -TARGET_EFAULT;
7569                 tvp = tv;
7570             } else {
7571                 tvp = NULL;
7572             }
7573             if (!(p = lock_user_string(arg1)))
7574                 return -TARGET_EFAULT;
7575             ret = get_errno(utimes(p, tvp));
7576             unlock_user(p, arg1, 0);
7577         }
7578         return ret;
7579 #endif
7580 #if defined(TARGET_NR_futimesat)
7581     case TARGET_NR_futimesat:
7582         {
7583             struct timeval *tvp, tv[2];
7584             if (arg3) {
7585                 if (copy_from_user_timeval(&tv[0], arg3)
7586                     || copy_from_user_timeval(&tv[1],
7587                                               arg3 + sizeof(struct target_timeval)))
7588                     return -TARGET_EFAULT;
7589                 tvp = tv;
7590             } else {
7591                 tvp = NULL;
7592             }
7593             if (!(p = lock_user_string(arg2))) {
7594                 return -TARGET_EFAULT;
7595             }
7596             ret = get_errno(futimesat(arg1, path(p), tvp));
7597             unlock_user(p, arg2, 0);
7598         }
7599         return ret;
7600 #endif
7601 #ifdef TARGET_NR_access
7602     case TARGET_NR_access:
7603         if (!(p = lock_user_string(arg1))) {
7604             return -TARGET_EFAULT;
7605         }
7606         ret = get_errno(access(path(p), arg2));
7607         unlock_user(p, arg1, 0);
7608         return ret;
7609 #endif
7610 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7611     case TARGET_NR_faccessat:
7612         if (!(p = lock_user_string(arg2))) {
7613             return -TARGET_EFAULT;
7614         }
7615         ret = get_errno(faccessat(arg1, p, arg3, 0));
7616         unlock_user(p, arg2, 0);
7617         return ret;
7618 #endif
7619 #ifdef TARGET_NR_nice /* not on alpha */
7620     case TARGET_NR_nice:
7621         return get_errno(nice(arg1));
7622 #endif
7623     case TARGET_NR_sync:
7624         sync();
7625         return 0;
7626 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7627     case TARGET_NR_syncfs:
7628         return get_errno(syncfs(arg1));
7629 #endif
7630     case TARGET_NR_kill:
7631         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7632 #ifdef TARGET_NR_rename
7633     case TARGET_NR_rename:
7634         {
7635             void *p2;
7636             p = lock_user_string(arg1);
7637             p2 = lock_user_string(arg2);
7638             if (!p || !p2)
7639                 ret = -TARGET_EFAULT;
7640             else
7641                 ret = get_errno(rename(p, p2));
7642             unlock_user(p2, arg2, 0);
7643             unlock_user(p, arg1, 0);
7644         }
7645         return ret;
7646 #endif
7647 #if defined(TARGET_NR_renameat)
7648     case TARGET_NR_renameat:
7649         {
7650             void *p2;
7651             p  = lock_user_string(arg2);
7652             p2 = lock_user_string(arg4);
7653             if (!p || !p2)
7654                 ret = -TARGET_EFAULT;
7655             else
7656                 ret = get_errno(renameat(arg1, p, arg3, p2));
7657             unlock_user(p2, arg4, 0);
7658             unlock_user(p, arg2, 0);
7659         }
7660         return ret;
7661 #endif
7662 #if defined(TARGET_NR_renameat2)
7663     case TARGET_NR_renameat2:
7664         {
7665             void *p2;
7666             p  = lock_user_string(arg2);
7667             p2 = lock_user_string(arg4);
7668             if (!p || !p2) {
7669                 ret = -TARGET_EFAULT;
7670             } else {
7671                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7672             }
7673             unlock_user(p2, arg4, 0);
7674             unlock_user(p, arg2, 0);
7675         }
7676         return ret;
7677 #endif
7678 #ifdef TARGET_NR_mkdir
7679     case TARGET_NR_mkdir:
7680         if (!(p = lock_user_string(arg1)))
7681             return -TARGET_EFAULT;
7682         ret = get_errno(mkdir(p, arg2));
7683         unlock_user(p, arg1, 0);
7684         return ret;
7685 #endif
7686 #if defined(TARGET_NR_mkdirat)
7687     case TARGET_NR_mkdirat:
7688         if (!(p = lock_user_string(arg2)))
7689             return -TARGET_EFAULT;
7690         ret = get_errno(mkdirat(arg1, p, arg3));
7691         unlock_user(p, arg2, 0);
7692         return ret;
7693 #endif
7694 #ifdef TARGET_NR_rmdir
7695     case TARGET_NR_rmdir:
7696         if (!(p = lock_user_string(arg1)))
7697             return -TARGET_EFAULT;
7698         ret = get_errno(rmdir(p));
7699         unlock_user(p, arg1, 0);
7700         return ret;
7701 #endif
7702     case TARGET_NR_dup:
7703         ret = get_errno(dup(arg1));
7704         if (ret >= 0) {
7705             fd_trans_dup(arg1, ret);
7706         }
7707         return ret;
7708 #ifdef TARGET_NR_pipe
7709     case TARGET_NR_pipe:
7710         return do_pipe(cpu_env, arg1, 0, 0);
7711 #endif
7712 #ifdef TARGET_NR_pipe2
7713     case TARGET_NR_pipe2:
7714         return do_pipe(cpu_env, arg1,
7715                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7716 #endif
7717     case TARGET_NR_times:
7718         {
7719             struct target_tms *tmsp;
7720             struct tms tms;
7721             ret = get_errno(times(&tms));
7722             if (arg1) {
7723                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7724                 if (!tmsp)
7725                     return -TARGET_EFAULT;
7726                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7727                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7728                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7729                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7730             }
7731             if (!is_error(ret))
7732                 ret = host_to_target_clock_t(ret);
7733         }
7734         return ret;
7735     case TARGET_NR_acct:
7736         if (arg1 == 0) {
7737             ret = get_errno(acct(NULL));
7738         } else {
7739             if (!(p = lock_user_string(arg1))) {
7740                 return -TARGET_EFAULT;
7741             }
7742             ret = get_errno(acct(path(p)));
7743             unlock_user(p, arg1, 0);
7744         }
7745         return ret;
7746 #ifdef TARGET_NR_umount2
7747     case TARGET_NR_umount2:
7748         if (!(p = lock_user_string(arg1)))
7749             return -TARGET_EFAULT;
7750         ret = get_errno(umount2(p, arg2));
7751         unlock_user(p, arg1, 0);
7752         return ret;
7753 #endif
7754     case TARGET_NR_ioctl:
7755         return do_ioctl(arg1, arg2, arg3);
7756 #ifdef TARGET_NR_fcntl
7757     case TARGET_NR_fcntl:
7758         return do_fcntl(arg1, arg2, arg3);
7759 #endif
7760     case TARGET_NR_setpgid:
7761         return get_errno(setpgid(arg1, arg2));
7762     case TARGET_NR_umask:
7763         return get_errno(umask(arg1));
7764     case TARGET_NR_chroot:
7765         if (!(p = lock_user_string(arg1)))
7766             return -TARGET_EFAULT;
7767         ret = get_errno(chroot(p));
7768         unlock_user(p, arg1, 0);
7769         return ret;
7770 #ifdef TARGET_NR_dup2
7771     case TARGET_NR_dup2:
7772         ret = get_errno(dup2(arg1, arg2));
7773         if (ret >= 0) {
7774             fd_trans_dup(arg1, arg2);
7775         }
7776         return ret;
7777 #endif
7778 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7779     case TARGET_NR_dup3:
7780     {
7781         int host_flags;
7782 
7783         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7784             return -EINVAL;
7785         }
7786         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7787         ret = get_errno(dup3(arg1, arg2, host_flags));
7788         if (ret >= 0) {
7789             fd_trans_dup(arg1, arg2);
7790         }
7791         return ret;
7792     }
7793 #endif
7794 #ifdef TARGET_NR_getppid /* not on alpha */
7795     case TARGET_NR_getppid:
7796         return get_errno(getppid());
7797 #endif
7798 #ifdef TARGET_NR_getpgrp
7799     case TARGET_NR_getpgrp:
7800         return get_errno(getpgrp());
7801 #endif
7802     case TARGET_NR_setsid:
7803         return get_errno(setsid());
7804 #ifdef TARGET_NR_sigaction
7805     case TARGET_NR_sigaction:
7806         {
7807 #if defined(TARGET_ALPHA)
7808             struct target_sigaction act, oact, *pact = 0;
7809             struct target_old_sigaction *old_act;
7810             if (arg2) {
7811                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7812                     return -TARGET_EFAULT;
7813                 act._sa_handler = old_act->_sa_handler;
7814                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7815                 act.sa_flags = old_act->sa_flags;
7816                 act.sa_restorer = 0;
7817                 unlock_user_struct(old_act, arg2, 0);
7818                 pact = &act;
7819             }
7820             ret = get_errno(do_sigaction(arg1, pact, &oact));
7821             if (!is_error(ret) && arg3) {
7822                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7823                     return -TARGET_EFAULT;
7824                 old_act->_sa_handler = oact._sa_handler;
7825                 old_act->sa_mask = oact.sa_mask.sig[0];
7826                 old_act->sa_flags = oact.sa_flags;
7827                 unlock_user_struct(old_act, arg3, 1);
7828             }
7829 #elif defined(TARGET_MIPS)
7830 	    struct target_sigaction act, oact, *pact, *old_act;
7831 
7832 	    if (arg2) {
7833                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7834                     return -TARGET_EFAULT;
7835 		act._sa_handler = old_act->_sa_handler;
7836 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7837 		act.sa_flags = old_act->sa_flags;
7838 		unlock_user_struct(old_act, arg2, 0);
7839 		pact = &act;
7840 	    } else {
7841 		pact = NULL;
7842 	    }
7843 
7844 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7845 
7846 	    if (!is_error(ret) && arg3) {
7847                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7848                     return -TARGET_EFAULT;
7849 		old_act->_sa_handler = oact._sa_handler;
7850 		old_act->sa_flags = oact.sa_flags;
7851 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7852 		old_act->sa_mask.sig[1] = 0;
7853 		old_act->sa_mask.sig[2] = 0;
7854 		old_act->sa_mask.sig[3] = 0;
7855 		unlock_user_struct(old_act, arg3, 1);
7856 	    }
7857 #else
7858             struct target_old_sigaction *old_act;
7859             struct target_sigaction act, oact, *pact;
7860             if (arg2) {
7861                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7862                     return -TARGET_EFAULT;
7863                 act._sa_handler = old_act->_sa_handler;
7864                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7865                 act.sa_flags = old_act->sa_flags;
7866                 act.sa_restorer = old_act->sa_restorer;
7867 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7868                 act.ka_restorer = 0;
7869 #endif
7870                 unlock_user_struct(old_act, arg2, 0);
7871                 pact = &act;
7872             } else {
7873                 pact = NULL;
7874             }
7875             ret = get_errno(do_sigaction(arg1, pact, &oact));
7876             if (!is_error(ret) && arg3) {
7877                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7878                     return -TARGET_EFAULT;
7879                 old_act->_sa_handler = oact._sa_handler;
7880                 old_act->sa_mask = oact.sa_mask.sig[0];
7881                 old_act->sa_flags = oact.sa_flags;
7882                 old_act->sa_restorer = oact.sa_restorer;
7883                 unlock_user_struct(old_act, arg3, 1);
7884             }
7885 #endif
7886         }
7887         return ret;
7888 #endif
7889     case TARGET_NR_rt_sigaction:
7890         {
7891 #if defined(TARGET_ALPHA)
7892             /* For Alpha and SPARC this is a 5 argument syscall, with
7893              * a 'restorer' parameter which must be copied into the
7894              * sa_restorer field of the sigaction struct.
7895              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7896              * and arg5 is the sigsetsize.
7897              * Alpha also has a separate rt_sigaction struct that it uses
7898              * here; SPARC uses the usual sigaction struct.
7899              */
7900             struct target_rt_sigaction *rt_act;
7901             struct target_sigaction act, oact, *pact = 0;
7902 
7903             if (arg4 != sizeof(target_sigset_t)) {
7904                 return -TARGET_EINVAL;
7905             }
7906             if (arg2) {
7907                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7908                     return -TARGET_EFAULT;
7909                 act._sa_handler = rt_act->_sa_handler;
7910                 act.sa_mask = rt_act->sa_mask;
7911                 act.sa_flags = rt_act->sa_flags;
7912                 act.sa_restorer = arg5;
7913                 unlock_user_struct(rt_act, arg2, 0);
7914                 pact = &act;
7915             }
7916             ret = get_errno(do_sigaction(arg1, pact, &oact));
7917             if (!is_error(ret) && arg3) {
7918                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7919                     return -TARGET_EFAULT;
7920                 rt_act->_sa_handler = oact._sa_handler;
7921                 rt_act->sa_mask = oact.sa_mask;
7922                 rt_act->sa_flags = oact.sa_flags;
7923                 unlock_user_struct(rt_act, arg3, 1);
7924             }
7925 #else
7926 #ifdef TARGET_SPARC
7927             target_ulong restorer = arg4;
7928             target_ulong sigsetsize = arg5;
7929 #else
7930             target_ulong sigsetsize = arg4;
7931 #endif
7932             struct target_sigaction *act;
7933             struct target_sigaction *oact;
7934 
7935             if (sigsetsize != sizeof(target_sigset_t)) {
7936                 return -TARGET_EINVAL;
7937             }
7938             if (arg2) {
7939                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7940                     return -TARGET_EFAULT;
7941                 }
7942 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7943                 act->ka_restorer = restorer;
7944 #endif
7945             } else {
7946                 act = NULL;
7947             }
7948             if (arg3) {
7949                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7950                     ret = -TARGET_EFAULT;
7951                     goto rt_sigaction_fail;
7952                 }
7953             } else
7954                 oact = NULL;
7955             ret = get_errno(do_sigaction(arg1, act, oact));
7956 	rt_sigaction_fail:
7957             if (act)
7958                 unlock_user_struct(act, arg2, 0);
7959             if (oact)
7960                 unlock_user_struct(oact, arg3, 1);
7961 #endif
7962         }
7963         return ret;
7964 #ifdef TARGET_NR_sgetmask /* not on alpha */
7965     case TARGET_NR_sgetmask:
7966         {
7967             sigset_t cur_set;
7968             abi_ulong target_set;
7969             ret = do_sigprocmask(0, NULL, &cur_set);
7970             if (!ret) {
7971                 host_to_target_old_sigset(&target_set, &cur_set);
7972                 ret = target_set;
7973             }
7974         }
7975         return ret;
7976 #endif
7977 #ifdef TARGET_NR_ssetmask /* not on alpha */
7978     case TARGET_NR_ssetmask:
7979         {
7980             sigset_t set, oset;
7981             abi_ulong target_set = arg1;
7982             target_to_host_old_sigset(&set, &target_set);
7983             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7984             if (!ret) {
7985                 host_to_target_old_sigset(&target_set, &oset);
7986                 ret = target_set;
7987             }
7988         }
7989         return ret;
7990 #endif
7991 #ifdef TARGET_NR_sigprocmask
7992     case TARGET_NR_sigprocmask:
7993         {
7994 #if defined(TARGET_ALPHA)
7995             sigset_t set, oldset;
7996             abi_ulong mask;
7997             int how;
7998 
7999             switch (arg1) {
8000             case TARGET_SIG_BLOCK:
8001                 how = SIG_BLOCK;
8002                 break;
8003             case TARGET_SIG_UNBLOCK:
8004                 how = SIG_UNBLOCK;
8005                 break;
8006             case TARGET_SIG_SETMASK:
8007                 how = SIG_SETMASK;
8008                 break;
8009             default:
8010                 return -TARGET_EINVAL;
8011             }
8012             mask = arg2;
8013             target_to_host_old_sigset(&set, &mask);
8014 
8015             ret = do_sigprocmask(how, &set, &oldset);
8016             if (!is_error(ret)) {
8017                 host_to_target_old_sigset(&mask, &oldset);
8018                 ret = mask;
8019                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8020             }
8021 #else
8022             sigset_t set, oldset, *set_ptr;
8023             int how;
8024 
8025             if (arg2) {
8026                 switch (arg1) {
8027                 case TARGET_SIG_BLOCK:
8028                     how = SIG_BLOCK;
8029                     break;
8030                 case TARGET_SIG_UNBLOCK:
8031                     how = SIG_UNBLOCK;
8032                     break;
8033                 case TARGET_SIG_SETMASK:
8034                     how = SIG_SETMASK;
8035                     break;
8036                 default:
8037                     return -TARGET_EINVAL;
8038                 }
8039                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8040                     return -TARGET_EFAULT;
8041                 target_to_host_old_sigset(&set, p);
8042                 unlock_user(p, arg2, 0);
8043                 set_ptr = &set;
8044             } else {
8045                 how = 0;
8046                 set_ptr = NULL;
8047             }
8048             ret = do_sigprocmask(how, set_ptr, &oldset);
8049             if (!is_error(ret) && arg3) {
8050                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8051                     return -TARGET_EFAULT;
8052                 host_to_target_old_sigset(p, &oldset);
8053                 unlock_user(p, arg3, sizeof(target_sigset_t));
8054             }
8055 #endif
8056         }
8057         return ret;
8058 #endif
8059     case TARGET_NR_rt_sigprocmask:
8060         {
8061             int how = arg1;
8062             sigset_t set, oldset, *set_ptr;
8063 
8064             if (arg4 != sizeof(target_sigset_t)) {
8065                 return -TARGET_EINVAL;
8066             }
8067 
8068             if (arg2) {
8069                 switch(how) {
8070                 case TARGET_SIG_BLOCK:
8071                     how = SIG_BLOCK;
8072                     break;
8073                 case TARGET_SIG_UNBLOCK:
8074                     how = SIG_UNBLOCK;
8075                     break;
8076                 case TARGET_SIG_SETMASK:
8077                     how = SIG_SETMASK;
8078                     break;
8079                 default:
8080                     return -TARGET_EINVAL;
8081                 }
8082                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8083                     return -TARGET_EFAULT;
8084                 target_to_host_sigset(&set, p);
8085                 unlock_user(p, arg2, 0);
8086                 set_ptr = &set;
8087             } else {
8088                 how = 0;
8089                 set_ptr = NULL;
8090             }
8091             ret = do_sigprocmask(how, set_ptr, &oldset);
8092             if (!is_error(ret) && arg3) {
8093                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8094                     return -TARGET_EFAULT;
8095                 host_to_target_sigset(p, &oldset);
8096                 unlock_user(p, arg3, sizeof(target_sigset_t));
8097             }
8098         }
8099         return ret;
8100 #ifdef TARGET_NR_sigpending
8101     case TARGET_NR_sigpending:
8102         {
8103             sigset_t set;
8104             ret = get_errno(sigpending(&set));
8105             if (!is_error(ret)) {
8106                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8107                     return -TARGET_EFAULT;
8108                 host_to_target_old_sigset(p, &set);
8109                 unlock_user(p, arg1, sizeof(target_sigset_t));
8110             }
8111         }
8112         return ret;
8113 #endif
8114     case TARGET_NR_rt_sigpending:
8115         {
8116             sigset_t set;
8117 
8118             /* Yes, this check is >, not != like most. We follow the kernel's
8119              * logic and it does it like this because it implements
8120              * NR_sigpending through the same code path, and in that case
8121              * the old_sigset_t is smaller in size.
8122              */
8123             if (arg2 > sizeof(target_sigset_t)) {
8124                 return -TARGET_EINVAL;
8125             }
8126 
8127             ret = get_errno(sigpending(&set));
8128             if (!is_error(ret)) {
8129                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8130                     return -TARGET_EFAULT;
8131                 host_to_target_sigset(p, &set);
8132                 unlock_user(p, arg1, sizeof(target_sigset_t));
8133             }
8134         }
8135         return ret;
8136 #ifdef TARGET_NR_sigsuspend
8137     case TARGET_NR_sigsuspend:
8138         {
8139             TaskState *ts = cpu->opaque;
8140 #if defined(TARGET_ALPHA)
8141             abi_ulong mask = arg1;
8142             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8143 #else
8144             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8145                 return -TARGET_EFAULT;
8146             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8147             unlock_user(p, arg1, 0);
8148 #endif
8149             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8150                                                SIGSET_T_SIZE));
8151             if (ret != -TARGET_ERESTARTSYS) {
8152                 ts->in_sigsuspend = 1;
8153             }
8154         }
8155         return ret;
8156 #endif
8157     case TARGET_NR_rt_sigsuspend:
8158         {
8159             TaskState *ts = cpu->opaque;
8160 
8161             if (arg2 != sizeof(target_sigset_t)) {
8162                 return -TARGET_EINVAL;
8163             }
8164             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8165                 return -TARGET_EFAULT;
8166             target_to_host_sigset(&ts->sigsuspend_mask, p);
8167             unlock_user(p, arg1, 0);
8168             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8169                                                SIGSET_T_SIZE));
8170             if (ret != -TARGET_ERESTARTSYS) {
8171                 ts->in_sigsuspend = 1;
8172             }
8173         }
8174         return ret;
8175     case TARGET_NR_rt_sigtimedwait:
8176         {
8177             sigset_t set;
8178             struct timespec uts, *puts;
8179             siginfo_t uinfo;
8180 
8181             if (arg4 != sizeof(target_sigset_t)) {
8182                 return -TARGET_EINVAL;
8183             }
8184 
8185             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8186                 return -TARGET_EFAULT;
8187             target_to_host_sigset(&set, p);
8188             unlock_user(p, arg1, 0);
8189             if (arg3) {
8190                 puts = &uts;
8191                 target_to_host_timespec(puts, arg3);
8192             } else {
8193                 puts = NULL;
8194             }
8195             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8196                                                  SIGSET_T_SIZE));
8197             if (!is_error(ret)) {
8198                 if (arg2) {
8199                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8200                                   0);
8201                     if (!p) {
8202                         return -TARGET_EFAULT;
8203                     }
8204                     host_to_target_siginfo(p, &uinfo);
8205                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8206                 }
8207                 ret = host_to_target_signal(ret);
8208             }
8209         }
8210         return ret;
8211     case TARGET_NR_rt_sigqueueinfo:
8212         {
8213             siginfo_t uinfo;
8214 
8215             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8216             if (!p) {
8217                 return -TARGET_EFAULT;
8218             }
8219             target_to_host_siginfo(&uinfo, p);
8220             unlock_user(p, arg3, 0);
8221             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8222         }
8223         return ret;
8224     case TARGET_NR_rt_tgsigqueueinfo:
8225         {
8226             siginfo_t uinfo;
8227 
8228             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8229             if (!p) {
8230                 return -TARGET_EFAULT;
8231             }
8232             target_to_host_siginfo(&uinfo, p);
8233             unlock_user(p, arg4, 0);
8234             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8235         }
8236         return ret;
8237 #ifdef TARGET_NR_sigreturn
8238     case TARGET_NR_sigreturn:
8239         if (block_signals()) {
8240             return -TARGET_ERESTARTSYS;
8241         }
8242         return do_sigreturn(cpu_env);
8243 #endif
8244     case TARGET_NR_rt_sigreturn:
8245         if (block_signals()) {
8246             return -TARGET_ERESTARTSYS;
8247         }
8248         return do_rt_sigreturn(cpu_env);
8249     case TARGET_NR_sethostname:
8250         if (!(p = lock_user_string(arg1)))
8251             return -TARGET_EFAULT;
8252         ret = get_errno(sethostname(p, arg2));
8253         unlock_user(p, arg1, 0);
8254         return ret;
8255 #ifdef TARGET_NR_setrlimit
8256     case TARGET_NR_setrlimit:
8257         {
8258             int resource = target_to_host_resource(arg1);
8259             struct target_rlimit *target_rlim;
8260             struct rlimit rlim;
8261             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8262                 return -TARGET_EFAULT;
8263             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8264             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8265             unlock_user_struct(target_rlim, arg2, 0);
8266             /*
8267              * If we just passed through resource limit settings for memory then
8268              * they would also apply to QEMU's own allocations, and QEMU will
8269              * crash or hang or die if its allocations fail. Ideally we would
8270              * track the guest allocations in QEMU and apply the limits ourselves.
8271              * For now, just tell the guest the call succeeded but don't actually
8272              * limit anything.
8273              */
8274             if (resource != RLIMIT_AS &&
8275                 resource != RLIMIT_DATA &&
8276                 resource != RLIMIT_STACK) {
8277                 return get_errno(setrlimit(resource, &rlim));
8278             } else {
8279                 return 0;
8280             }
8281         }
8282 #endif
8283 #ifdef TARGET_NR_getrlimit
8284     case TARGET_NR_getrlimit:
8285         {
8286             int resource = target_to_host_resource(arg1);
8287             struct target_rlimit *target_rlim;
8288             struct rlimit rlim;
8289 
8290             ret = get_errno(getrlimit(resource, &rlim));
8291             if (!is_error(ret)) {
8292                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8293                     return -TARGET_EFAULT;
8294                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8295                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8296                 unlock_user_struct(target_rlim, arg2, 1);
8297             }
8298         }
8299         return ret;
8300 #endif
8301     case TARGET_NR_getrusage:
8302         {
8303             struct rusage rusage;
8304             ret = get_errno(getrusage(arg1, &rusage));
8305             if (!is_error(ret)) {
8306                 ret = host_to_target_rusage(arg2, &rusage);
8307             }
8308         }
8309         return ret;
8310     case TARGET_NR_gettimeofday:
8311         {
8312             struct timeval tv;
8313             ret = get_errno(gettimeofday(&tv, NULL));
8314             if (!is_error(ret)) {
8315                 if (copy_to_user_timeval(arg1, &tv))
8316                     return -TARGET_EFAULT;
8317             }
8318         }
8319         return ret;
8320     case TARGET_NR_settimeofday:
8321         {
8322             struct timeval tv, *ptv = NULL;
8323             struct timezone tz, *ptz = NULL;
8324 
8325             if (arg1) {
8326                 if (copy_from_user_timeval(&tv, arg1)) {
8327                     return -TARGET_EFAULT;
8328                 }
8329                 ptv = &tv;
8330             }
8331 
8332             if (arg2) {
8333                 if (copy_from_user_timezone(&tz, arg2)) {
8334                     return -TARGET_EFAULT;
8335                 }
8336                 ptz = &tz;
8337             }
8338 
8339             return get_errno(settimeofday(ptv, ptz));
8340         }
8341 #if defined(TARGET_NR_select)
8342     case TARGET_NR_select:
8343 #if defined(TARGET_WANT_NI_OLD_SELECT)
8344         /* some architectures used to have old_select here
8345          * but now ENOSYS it.
8346          */
8347         ret = -TARGET_ENOSYS;
8348 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8349         ret = do_old_select(arg1);
8350 #else
8351         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8352 #endif
8353         return ret;
8354 #endif
8355 #ifdef TARGET_NR_pselect6
8356     case TARGET_NR_pselect6:
8357         {
8358             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8359             fd_set rfds, wfds, efds;
8360             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8361             struct timespec ts, *ts_ptr;
8362 
8363             /*
8364              * The 6th arg is actually two args smashed together,
8365              * so we cannot use the C library.
8366              */
8367             sigset_t set;
8368             struct {
8369                 sigset_t *set;
8370                 size_t size;
8371             } sig, *sig_ptr;
8372 
8373             abi_ulong arg_sigset, arg_sigsize, *arg7;
8374             target_sigset_t *target_sigset;
8375 
8376             n = arg1;
8377             rfd_addr = arg2;
8378             wfd_addr = arg3;
8379             efd_addr = arg4;
8380             ts_addr = arg5;
8381 
8382             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8383             if (ret) {
8384                 return ret;
8385             }
8386             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8387             if (ret) {
8388                 return ret;
8389             }
8390             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8391             if (ret) {
8392                 return ret;
8393             }
8394 
8395             /*
8396              * This takes a timespec, and not a timeval, so we cannot
8397              * use the do_select() helper ...
8398              */
8399             if (ts_addr) {
8400                 if (target_to_host_timespec(&ts, ts_addr)) {
8401                     return -TARGET_EFAULT;
8402                 }
8403                 ts_ptr = &ts;
8404             } else {
8405                 ts_ptr = NULL;
8406             }
8407 
8408             /* Extract the two packed args for the sigset */
8409             if (arg6) {
8410                 sig_ptr = &sig;
8411                 sig.size = SIGSET_T_SIZE;
8412 
8413                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8414                 if (!arg7) {
8415                     return -TARGET_EFAULT;
8416                 }
8417                 arg_sigset = tswapal(arg7[0]);
8418                 arg_sigsize = tswapal(arg7[1]);
8419                 unlock_user(arg7, arg6, 0);
8420 
8421                 if (arg_sigset) {
8422                     sig.set = &set;
8423                     if (arg_sigsize != sizeof(*target_sigset)) {
8424                         /* Like the kernel, we enforce correct size sigsets */
8425                         return -TARGET_EINVAL;
8426                     }
8427                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8428                                               sizeof(*target_sigset), 1);
8429                     if (!target_sigset) {
8430                         return -TARGET_EFAULT;
8431                     }
8432                     target_to_host_sigset(&set, target_sigset);
8433                     unlock_user(target_sigset, arg_sigset, 0);
8434                 } else {
8435                     sig.set = NULL;
8436                 }
8437             } else {
8438                 sig_ptr = NULL;
8439             }
8440 
8441             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8442                                           ts_ptr, sig_ptr));
8443 
8444             if (!is_error(ret)) {
8445                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8446                     return -TARGET_EFAULT;
8447                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8448                     return -TARGET_EFAULT;
8449                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8450                     return -TARGET_EFAULT;
8451 
8452                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8453                     return -TARGET_EFAULT;
8454             }
8455         }
8456         return ret;
8457 #endif
8458 #ifdef TARGET_NR_symlink
8459     case TARGET_NR_symlink:
8460         {
8461             void *p2;
8462             p = lock_user_string(arg1);
8463             p2 = lock_user_string(arg2);
8464             if (!p || !p2)
8465                 ret = -TARGET_EFAULT;
8466             else
8467                 ret = get_errno(symlink(p, p2));
8468             unlock_user(p2, arg2, 0);
8469             unlock_user(p, arg1, 0);
8470         }
8471         return ret;
8472 #endif
8473 #if defined(TARGET_NR_symlinkat)
8474     case TARGET_NR_symlinkat:
8475         {
8476             void *p2;
8477             p  = lock_user_string(arg1);
8478             p2 = lock_user_string(arg3);
8479             if (!p || !p2)
8480                 ret = -TARGET_EFAULT;
8481             else
8482                 ret = get_errno(symlinkat(p, arg2, p2));
8483             unlock_user(p2, arg3, 0);
8484             unlock_user(p, arg1, 0);
8485         }
8486         return ret;
8487 #endif
8488 #ifdef TARGET_NR_readlink
8489     case TARGET_NR_readlink:
8490         {
8491             void *p2;
8492             p = lock_user_string(arg1);
8493             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8494             if (!p || !p2) {
8495                 ret = -TARGET_EFAULT;
8496             } else if (!arg3) {
8497                 /* Short circuit this for the magic exe check. */
8498                 ret = -TARGET_EINVAL;
8499             } else if (is_proc_myself((const char *)p, "exe")) {
8500                 char real[PATH_MAX], *temp;
8501                 temp = realpath(exec_path, real);
8502                 /* Return value is # of bytes that we wrote to the buffer. */
8503                 if (temp == NULL) {
8504                     ret = get_errno(-1);
8505                 } else {
8506                     /* Don't worry about sign mismatch as earlier mapping
8507                      * logic would have thrown a bad address error. */
8508                     ret = MIN(strlen(real), arg3);
8509                     /* We cannot NUL terminate the string. */
8510                     memcpy(p2, real, ret);
8511                 }
8512             } else {
8513                 ret = get_errno(readlink(path(p), p2, arg3));
8514             }
8515             unlock_user(p2, arg2, ret);
8516             unlock_user(p, arg1, 0);
8517         }
8518         return ret;
8519 #endif
8520 #if defined(TARGET_NR_readlinkat)
8521     case TARGET_NR_readlinkat:
8522         {
8523             void *p2;
8524             p  = lock_user_string(arg2);
8525             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8526             if (!p || !p2) {
8527                 ret = -TARGET_EFAULT;
8528             } else if (is_proc_myself((const char *)p, "exe")) {
8529                 char real[PATH_MAX], *temp;
8530                 temp = realpath(exec_path, real);
8531                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8532                 snprintf((char *)p2, arg4, "%s", real);
8533             } else {
8534                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8535             }
8536             unlock_user(p2, arg3, ret);
8537             unlock_user(p, arg2, 0);
8538         }
8539         return ret;
8540 #endif
8541 #ifdef TARGET_NR_swapon
8542     case TARGET_NR_swapon:
8543         if (!(p = lock_user_string(arg1)))
8544             return -TARGET_EFAULT;
8545         ret = get_errno(swapon(p, arg2));
8546         unlock_user(p, arg1, 0);
8547         return ret;
8548 #endif
8549     case TARGET_NR_reboot:
8550         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8551            /* arg4 must be ignored in all other cases */
8552            p = lock_user_string(arg4);
8553            if (!p) {
8554                return -TARGET_EFAULT;
8555            }
8556            ret = get_errno(reboot(arg1, arg2, arg3, p));
8557            unlock_user(p, arg4, 0);
8558         } else {
8559            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8560         }
8561         return ret;
8562 #ifdef TARGET_NR_mmap
8563     case TARGET_NR_mmap:
8564 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8565     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8566     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8567     || defined(TARGET_S390X)
8568         {
8569             abi_ulong *v;
8570             abi_ulong v1, v2, v3, v4, v5, v6;
8571             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8572                 return -TARGET_EFAULT;
8573             v1 = tswapal(v[0]);
8574             v2 = tswapal(v[1]);
8575             v3 = tswapal(v[2]);
8576             v4 = tswapal(v[3]);
8577             v5 = tswapal(v[4]);
8578             v6 = tswapal(v[5]);
8579             unlock_user(v, arg1, 0);
8580             ret = get_errno(target_mmap(v1, v2, v3,
8581                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8582                                         v5, v6));
8583         }
8584 #else
8585         ret = get_errno(target_mmap(arg1, arg2, arg3,
8586                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8587                                     arg5,
8588                                     arg6));
8589 #endif
8590         return ret;
8591 #endif
8592 #ifdef TARGET_NR_mmap2
8593     case TARGET_NR_mmap2:
8594 #ifndef MMAP_SHIFT
8595 #define MMAP_SHIFT 12
8596 #endif
8597         ret = target_mmap(arg1, arg2, arg3,
8598                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8599                           arg5, arg6 << MMAP_SHIFT);
8600         return get_errno(ret);
8601 #endif
8602     case TARGET_NR_munmap:
8603         return get_errno(target_munmap(arg1, arg2));
8604     case TARGET_NR_mprotect:
8605         {
8606             TaskState *ts = cpu->opaque;
8607             /* Special hack to detect libc making the stack executable.  */
8608             if ((arg3 & PROT_GROWSDOWN)
8609                 && arg1 >= ts->info->stack_limit
8610                 && arg1 <= ts->info->start_stack) {
8611                 arg3 &= ~PROT_GROWSDOWN;
8612                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8613                 arg1 = ts->info->stack_limit;
8614             }
8615         }
8616         return get_errno(target_mprotect(arg1, arg2, arg3));
8617 #ifdef TARGET_NR_mremap
8618     case TARGET_NR_mremap:
8619         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8620 #endif
8621         /* ??? msync/mlock/munlock are broken for softmmu.  */
8622 #ifdef TARGET_NR_msync
8623     case TARGET_NR_msync:
8624         return get_errno(msync(g2h(arg1), arg2, arg3));
8625 #endif
8626 #ifdef TARGET_NR_mlock
8627     case TARGET_NR_mlock:
8628         return get_errno(mlock(g2h(arg1), arg2));
8629 #endif
8630 #ifdef TARGET_NR_munlock
8631     case TARGET_NR_munlock:
8632         return get_errno(munlock(g2h(arg1), arg2));
8633 #endif
8634 #ifdef TARGET_NR_mlockall
8635     case TARGET_NR_mlockall:
8636         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8637 #endif
8638 #ifdef TARGET_NR_munlockall
8639     case TARGET_NR_munlockall:
8640         return get_errno(munlockall());
8641 #endif
8642 #ifdef TARGET_NR_truncate
8643     case TARGET_NR_truncate:
8644         if (!(p = lock_user_string(arg1)))
8645             return -TARGET_EFAULT;
8646         ret = get_errno(truncate(p, arg2));
8647         unlock_user(p, arg1, 0);
8648         return ret;
8649 #endif
8650 #ifdef TARGET_NR_ftruncate
8651     case TARGET_NR_ftruncate:
8652         return get_errno(ftruncate(arg1, arg2));
8653 #endif
8654     case TARGET_NR_fchmod:
8655         return get_errno(fchmod(arg1, arg2));
8656 #if defined(TARGET_NR_fchmodat)
8657     case TARGET_NR_fchmodat:
8658         if (!(p = lock_user_string(arg2)))
8659             return -TARGET_EFAULT;
8660         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8661         unlock_user(p, arg2, 0);
8662         return ret;
8663 #endif
8664     case TARGET_NR_getpriority:
8665         /* Note that negative values are valid for getpriority, so we must
8666            differentiate based on errno settings.  */
8667         errno = 0;
8668         ret = getpriority(arg1, arg2);
8669         if (ret == -1 && errno != 0) {
8670             return -host_to_target_errno(errno);
8671         }
8672 #ifdef TARGET_ALPHA
8673         /* Return value is the unbiased priority.  Signal no error.  */
8674         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8675 #else
8676         /* Return value is a biased priority to avoid negative numbers.  */
8677         ret = 20 - ret;
8678 #endif
8679         return ret;
8680     case TARGET_NR_setpriority:
8681         return get_errno(setpriority(arg1, arg2, arg3));
8682 #ifdef TARGET_NR_statfs
8683     case TARGET_NR_statfs:
8684         if (!(p = lock_user_string(arg1))) {
8685             return -TARGET_EFAULT;
8686         }
8687         ret = get_errno(statfs(path(p), &stfs));
8688         unlock_user(p, arg1, 0);
8689     convert_statfs:
8690         if (!is_error(ret)) {
8691             struct target_statfs *target_stfs;
8692 
8693             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8694                 return -TARGET_EFAULT;
8695             __put_user(stfs.f_type, &target_stfs->f_type);
8696             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8697             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8698             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8699             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8700             __put_user(stfs.f_files, &target_stfs->f_files);
8701             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8702             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8703             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8704             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8705             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8706 #ifdef _STATFS_F_FLAGS
8707             __put_user(stfs.f_flags, &target_stfs->f_flags);
8708 #else
8709             __put_user(0, &target_stfs->f_flags);
8710 #endif
8711             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8712             unlock_user_struct(target_stfs, arg2, 1);
8713         }
8714         return ret;
8715 #endif
8716 #ifdef TARGET_NR_fstatfs
8717     case TARGET_NR_fstatfs:
8718         ret = get_errno(fstatfs(arg1, &stfs));
8719         goto convert_statfs;
8720 #endif
8721 #ifdef TARGET_NR_statfs64
8722     case TARGET_NR_statfs64:
8723         if (!(p = lock_user_string(arg1))) {
8724             return -TARGET_EFAULT;
8725         }
8726         ret = get_errno(statfs(path(p), &stfs));
8727         unlock_user(p, arg1, 0);
8728     convert_statfs64:
8729         if (!is_error(ret)) {
8730             struct target_statfs64 *target_stfs;
8731 
8732             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8733                 return -TARGET_EFAULT;
8734             __put_user(stfs.f_type, &target_stfs->f_type);
8735             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8736             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8737             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8738             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8739             __put_user(stfs.f_files, &target_stfs->f_files);
8740             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8741             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8742             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8743             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8744             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8745             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8746             unlock_user_struct(target_stfs, arg3, 1);
8747         }
8748         return ret;
8749     case TARGET_NR_fstatfs64:
8750         ret = get_errno(fstatfs(arg1, &stfs));
8751         goto convert_statfs64;
8752 #endif
8753 #ifdef TARGET_NR_socketcall
8754     case TARGET_NR_socketcall:
8755         return do_socketcall(arg1, arg2);
8756 #endif
8757 #ifdef TARGET_NR_accept
8758     case TARGET_NR_accept:
8759         return do_accept4(arg1, arg2, arg3, 0);
8760 #endif
8761 #ifdef TARGET_NR_accept4
8762     case TARGET_NR_accept4:
8763         return do_accept4(arg1, arg2, arg3, arg4);
8764 #endif
8765 #ifdef TARGET_NR_bind
8766     case TARGET_NR_bind:
8767         return do_bind(arg1, arg2, arg3);
8768 #endif
8769 #ifdef TARGET_NR_connect
8770     case TARGET_NR_connect:
8771         return do_connect(arg1, arg2, arg3);
8772 #endif
8773 #ifdef TARGET_NR_getpeername
8774     case TARGET_NR_getpeername:
8775         return do_getpeername(arg1, arg2, arg3);
8776 #endif
8777 #ifdef TARGET_NR_getsockname
8778     case TARGET_NR_getsockname:
8779         return do_getsockname(arg1, arg2, arg3);
8780 #endif
8781 #ifdef TARGET_NR_getsockopt
8782     case TARGET_NR_getsockopt:
8783         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8784 #endif
8785 #ifdef TARGET_NR_listen
8786     case TARGET_NR_listen:
8787         return get_errno(listen(arg1, arg2));
8788 #endif
8789 #ifdef TARGET_NR_recv
8790     case TARGET_NR_recv:
8791         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8792 #endif
8793 #ifdef TARGET_NR_recvfrom
8794     case TARGET_NR_recvfrom:
8795         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8796 #endif
8797 #ifdef TARGET_NR_recvmsg
8798     case TARGET_NR_recvmsg:
8799         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8800 #endif
8801 #ifdef TARGET_NR_send
8802     case TARGET_NR_send:
8803         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8804 #endif
8805 #ifdef TARGET_NR_sendmsg
8806     case TARGET_NR_sendmsg:
8807         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8808 #endif
8809 #ifdef TARGET_NR_sendmmsg
8810     case TARGET_NR_sendmmsg:
8811         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8812     case TARGET_NR_recvmmsg:
8813         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8814 #endif
8815 #ifdef TARGET_NR_sendto
8816     case TARGET_NR_sendto:
8817         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8818 #endif
8819 #ifdef TARGET_NR_shutdown
8820     case TARGET_NR_shutdown:
8821         return get_errno(shutdown(arg1, arg2));
8822 #endif
8823 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8824     case TARGET_NR_getrandom:
8825         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8826         if (!p) {
8827             return -TARGET_EFAULT;
8828         }
8829         ret = get_errno(getrandom(p, arg2, arg3));
8830         unlock_user(p, arg1, ret);
8831         return ret;
8832 #endif
8833 #ifdef TARGET_NR_socket
8834     case TARGET_NR_socket:
8835         return do_socket(arg1, arg2, arg3);
8836 #endif
8837 #ifdef TARGET_NR_socketpair
8838     case TARGET_NR_socketpair:
8839         return do_socketpair(arg1, arg2, arg3, arg4);
8840 #endif
8841 #ifdef TARGET_NR_setsockopt
8842     case TARGET_NR_setsockopt:
8843         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8844 #endif
8845 #if defined(TARGET_NR_syslog)
8846     case TARGET_NR_syslog:
8847         {
8848             int len = arg2;
8849 
8850             switch (arg1) {
8851             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8852             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8853             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8854             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8855             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8856             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8857             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8858             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8859                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8860             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8861             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8862             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8863                 {
8864                     if (len < 0) {
8865                         return -TARGET_EINVAL;
8866                     }
8867                     if (len == 0) {
8868                         return 0;
8869                     }
8870                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8871                     if (!p) {
8872                         return -TARGET_EFAULT;
8873                     }
8874                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8875                     unlock_user(p, arg2, arg3);
8876                 }
8877                 return ret;
8878             default:
8879                 return -TARGET_EINVAL;
8880             }
8881         }
8882         break;
8883 #endif
8884     case TARGET_NR_setitimer:
8885         {
8886             struct itimerval value, ovalue, *pvalue;
8887 
8888             if (arg2) {
8889                 pvalue = &value;
8890                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8891                     || copy_from_user_timeval(&pvalue->it_value,
8892                                               arg2 + sizeof(struct target_timeval)))
8893                     return -TARGET_EFAULT;
8894             } else {
8895                 pvalue = NULL;
8896             }
8897             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8898             if (!is_error(ret) && arg3) {
8899                 if (copy_to_user_timeval(arg3,
8900                                          &ovalue.it_interval)
8901                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8902                                             &ovalue.it_value))
8903                     return -TARGET_EFAULT;
8904             }
8905         }
8906         return ret;
8907     case TARGET_NR_getitimer:
8908         {
8909             struct itimerval value;
8910 
8911             ret = get_errno(getitimer(arg1, &value));
8912             if (!is_error(ret) && arg2) {
8913                 if (copy_to_user_timeval(arg2,
8914                                          &value.it_interval)
8915                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8916                                             &value.it_value))
8917                     return -TARGET_EFAULT;
8918             }
8919         }
8920         return ret;
8921 #ifdef TARGET_NR_stat
8922     case TARGET_NR_stat:
8923         if (!(p = lock_user_string(arg1))) {
8924             return -TARGET_EFAULT;
8925         }
8926         ret = get_errno(stat(path(p), &st));
8927         unlock_user(p, arg1, 0);
8928         goto do_stat;
8929 #endif
8930 #ifdef TARGET_NR_lstat
8931     case TARGET_NR_lstat:
8932         if (!(p = lock_user_string(arg1))) {
8933             return -TARGET_EFAULT;
8934         }
8935         ret = get_errno(lstat(path(p), &st));
8936         unlock_user(p, arg1, 0);
8937         goto do_stat;
8938 #endif
8939 #ifdef TARGET_NR_fstat
8940     case TARGET_NR_fstat:
8941         {
8942             ret = get_errno(fstat(arg1, &st));
8943 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8944         do_stat:
8945 #endif
8946             if (!is_error(ret)) {
8947                 struct target_stat *target_st;
8948 
8949                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8950                     return -TARGET_EFAULT;
8951                 memset(target_st, 0, sizeof(*target_st));
8952                 __put_user(st.st_dev, &target_st->st_dev);
8953                 __put_user(st.st_ino, &target_st->st_ino);
8954                 __put_user(st.st_mode, &target_st->st_mode);
8955                 __put_user(st.st_uid, &target_st->st_uid);
8956                 __put_user(st.st_gid, &target_st->st_gid);
8957                 __put_user(st.st_nlink, &target_st->st_nlink);
8958                 __put_user(st.st_rdev, &target_st->st_rdev);
8959                 __put_user(st.st_size, &target_st->st_size);
8960                 __put_user(st.st_blksize, &target_st->st_blksize);
8961                 __put_user(st.st_blocks, &target_st->st_blocks);
8962                 __put_user(st.st_atime, &target_st->target_st_atime);
8963                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8964                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8965 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8966     defined(TARGET_STAT_HAVE_NSEC)
8967                 __put_user(st.st_atim.tv_nsec,
8968                            &target_st->target_st_atime_nsec);
8969                 __put_user(st.st_mtim.tv_nsec,
8970                            &target_st->target_st_mtime_nsec);
8971                 __put_user(st.st_ctim.tv_nsec,
8972                            &target_st->target_st_ctime_nsec);
8973 #endif
8974                 unlock_user_struct(target_st, arg2, 1);
8975             }
8976         }
8977         return ret;
8978 #endif
8979     case TARGET_NR_vhangup:
8980         return get_errno(vhangup());
8981 #ifdef TARGET_NR_syscall
8982     case TARGET_NR_syscall:
8983         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8984                           arg6, arg7, arg8, 0);
8985 #endif
8986     case TARGET_NR_wait4:
8987         {
8988             int status;
8989             abi_long status_ptr = arg2;
8990             struct rusage rusage, *rusage_ptr;
8991             abi_ulong target_rusage = arg4;
8992             abi_long rusage_err;
8993             if (target_rusage)
8994                 rusage_ptr = &rusage;
8995             else
8996                 rusage_ptr = NULL;
8997             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8998             if (!is_error(ret)) {
8999                 if (status_ptr && ret) {
9000                     status = host_to_target_waitstatus(status);
9001                     if (put_user_s32(status, status_ptr))
9002                         return -TARGET_EFAULT;
9003                 }
9004                 if (target_rusage) {
9005                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9006                     if (rusage_err) {
9007                         ret = rusage_err;
9008                     }
9009                 }
9010             }
9011         }
9012         return ret;
9013 #ifdef TARGET_NR_swapoff
9014     case TARGET_NR_swapoff:
9015         if (!(p = lock_user_string(arg1)))
9016             return -TARGET_EFAULT;
9017         ret = get_errno(swapoff(p));
9018         unlock_user(p, arg1, 0);
9019         return ret;
9020 #endif
9021     case TARGET_NR_sysinfo:
9022         {
9023             struct target_sysinfo *target_value;
9024             struct sysinfo value;
9025             ret = get_errno(sysinfo(&value));
9026             if (!is_error(ret) && arg1)
9027             {
9028                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9029                     return -TARGET_EFAULT;
9030                 __put_user(value.uptime, &target_value->uptime);
9031                 __put_user(value.loads[0], &target_value->loads[0]);
9032                 __put_user(value.loads[1], &target_value->loads[1]);
9033                 __put_user(value.loads[2], &target_value->loads[2]);
9034                 __put_user(value.totalram, &target_value->totalram);
9035                 __put_user(value.freeram, &target_value->freeram);
9036                 __put_user(value.sharedram, &target_value->sharedram);
9037                 __put_user(value.bufferram, &target_value->bufferram);
9038                 __put_user(value.totalswap, &target_value->totalswap);
9039                 __put_user(value.freeswap, &target_value->freeswap);
9040                 __put_user(value.procs, &target_value->procs);
9041                 __put_user(value.totalhigh, &target_value->totalhigh);
9042                 __put_user(value.freehigh, &target_value->freehigh);
9043                 __put_user(value.mem_unit, &target_value->mem_unit);
9044                 unlock_user_struct(target_value, arg1, 1);
9045             }
9046         }
9047         return ret;
9048 #ifdef TARGET_NR_ipc
9049     case TARGET_NR_ipc:
9050         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9051 #endif
9052 #ifdef TARGET_NR_semget
9053     case TARGET_NR_semget:
9054         return get_errno(semget(arg1, arg2, arg3));
9055 #endif
9056 #ifdef TARGET_NR_semop
9057     case TARGET_NR_semop:
9058         return do_semop(arg1, arg2, arg3);
9059 #endif
9060 #ifdef TARGET_NR_semctl
9061     case TARGET_NR_semctl:
9062         return do_semctl(arg1, arg2, arg3, arg4);
9063 #endif
9064 #ifdef TARGET_NR_msgctl
9065     case TARGET_NR_msgctl:
9066         return do_msgctl(arg1, arg2, arg3);
9067 #endif
9068 #ifdef TARGET_NR_msgget
9069     case TARGET_NR_msgget:
9070         return get_errno(msgget(arg1, arg2));
9071 #endif
9072 #ifdef TARGET_NR_msgrcv
9073     case TARGET_NR_msgrcv:
9074         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9075 #endif
9076 #ifdef TARGET_NR_msgsnd
9077     case TARGET_NR_msgsnd:
9078         return do_msgsnd(arg1, arg2, arg3, arg4);
9079 #endif
9080 #ifdef TARGET_NR_shmget
9081     case TARGET_NR_shmget:
9082         return get_errno(shmget(arg1, arg2, arg3));
9083 #endif
9084 #ifdef TARGET_NR_shmctl
9085     case TARGET_NR_shmctl:
9086         return do_shmctl(arg1, arg2, arg3);
9087 #endif
9088 #ifdef TARGET_NR_shmat
9089     case TARGET_NR_shmat:
9090         return do_shmat(cpu_env, arg1, arg2, arg3);
9091 #endif
9092 #ifdef TARGET_NR_shmdt
9093     case TARGET_NR_shmdt:
9094         return do_shmdt(arg1);
9095 #endif
9096     case TARGET_NR_fsync:
9097         return get_errno(fsync(arg1));
9098     case TARGET_NR_clone:
9099         /* Linux manages to have three different orderings for its
9100          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9101          * match the kernel's CONFIG_CLONE_* settings.
9102          * Microblaze is further special in that it uses a sixth
9103          * implicit argument to clone for the TLS pointer.
9104          */
9105 #if defined(TARGET_MICROBLAZE)
9106         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9107 #elif defined(TARGET_CLONE_BACKWARDS)
9108         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9109 #elif defined(TARGET_CLONE_BACKWARDS2)
9110         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9111 #else
9112         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9113 #endif
9114         return ret;
9115 #ifdef __NR_exit_group
9116         /* new thread calls */
9117     case TARGET_NR_exit_group:
9118         preexit_cleanup(cpu_env, arg1);
9119         return get_errno(exit_group(arg1));
9120 #endif
9121     case TARGET_NR_setdomainname:
9122         if (!(p = lock_user_string(arg1)))
9123             return -TARGET_EFAULT;
9124         ret = get_errno(setdomainname(p, arg2));
9125         unlock_user(p, arg1, 0);
9126         return ret;
9127     case TARGET_NR_uname:
9128         /* no need to transcode because we use the linux syscall */
9129         {
9130             struct new_utsname * buf;
9131 
9132             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9133                 return -TARGET_EFAULT;
9134             ret = get_errno(sys_uname(buf));
9135             if (!is_error(ret)) {
9136                 /* Overwrite the native machine name with whatever is being
9137                    emulated. */
9138                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9139                           sizeof(buf->machine));
9140                 /* Allow the user to override the reported release.  */
9141                 if (qemu_uname_release && *qemu_uname_release) {
9142                     g_strlcpy(buf->release, qemu_uname_release,
9143                               sizeof(buf->release));
9144                 }
9145             }
9146             unlock_user_struct(buf, arg1, 1);
9147         }
9148         return ret;
9149 #ifdef TARGET_I386
9150     case TARGET_NR_modify_ldt:
9151         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9152 #if !defined(TARGET_X86_64)
9153     case TARGET_NR_vm86:
9154         return do_vm86(cpu_env, arg1, arg2);
9155 #endif
9156 #endif
9157     case TARGET_NR_adjtimex:
9158         {
9159             struct timex host_buf;
9160 
9161             if (target_to_host_timex(&host_buf, arg1) != 0) {
9162                 return -TARGET_EFAULT;
9163             }
9164             ret = get_errno(adjtimex(&host_buf));
9165             if (!is_error(ret)) {
9166                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9167                     return -TARGET_EFAULT;
9168                 }
9169             }
9170         }
9171         return ret;
9172 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9173     case TARGET_NR_clock_adjtime:
9174         {
9175             struct timex htx, *phtx = &htx;
9176 
9177             if (target_to_host_timex(phtx, arg2) != 0) {
9178                 return -TARGET_EFAULT;
9179             }
9180             ret = get_errno(clock_adjtime(arg1, phtx));
9181             if (!is_error(ret) && phtx) {
9182                 if (host_to_target_timex(arg2, phtx) != 0) {
9183                     return -TARGET_EFAULT;
9184                 }
9185             }
9186         }
9187         return ret;
9188 #endif
9189     case TARGET_NR_getpgid:
9190         return get_errno(getpgid(arg1));
9191     case TARGET_NR_fchdir:
9192         return get_errno(fchdir(arg1));
9193     case TARGET_NR_personality:
9194         return get_errno(personality(arg1));
9195 #ifdef TARGET_NR__llseek /* Not on alpha */
9196     case TARGET_NR__llseek:
9197         {
9198             int64_t res;
9199 #if !defined(__NR_llseek)
9200             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9201             if (res == -1) {
9202                 ret = get_errno(res);
9203             } else {
9204                 ret = 0;
9205             }
9206 #else
9207             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9208 #endif
9209             if ((ret == 0) && put_user_s64(res, arg4)) {
9210                 return -TARGET_EFAULT;
9211             }
9212         }
9213         return ret;
9214 #endif
9215 #ifdef TARGET_NR_getdents
9216     case TARGET_NR_getdents:
9217 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9218 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9219         {
9220             struct target_dirent *target_dirp;
9221             struct linux_dirent *dirp;
9222             abi_long count = arg3;
9223 
9224             dirp = g_try_malloc(count);
9225             if (!dirp) {
9226                 return -TARGET_ENOMEM;
9227             }
9228 
9229             ret = get_errno(sys_getdents(arg1, dirp, count));
9230             if (!is_error(ret)) {
9231                 struct linux_dirent *de;
9232 		struct target_dirent *tde;
9233                 int len = ret;
9234                 int reclen, treclen;
9235 		int count1, tnamelen;
9236 
9237 		count1 = 0;
9238                 de = dirp;
9239                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9240                     return -TARGET_EFAULT;
9241 		tde = target_dirp;
9242                 while (len > 0) {
9243                     reclen = de->d_reclen;
9244                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9245                     assert(tnamelen >= 0);
9246                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9247                     assert(count1 + treclen <= count);
9248                     tde->d_reclen = tswap16(treclen);
9249                     tde->d_ino = tswapal(de->d_ino);
9250                     tde->d_off = tswapal(de->d_off);
9251                     memcpy(tde->d_name, de->d_name, tnamelen);
9252                     de = (struct linux_dirent *)((char *)de + reclen);
9253                     len -= reclen;
9254                     tde = (struct target_dirent *)((char *)tde + treclen);
9255 		    count1 += treclen;
9256                 }
9257 		ret = count1;
9258                 unlock_user(target_dirp, arg2, ret);
9259             }
9260             g_free(dirp);
9261         }
9262 #else
9263         {
9264             struct linux_dirent *dirp;
9265             abi_long count = arg3;
9266 
9267             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9268                 return -TARGET_EFAULT;
9269             ret = get_errno(sys_getdents(arg1, dirp, count));
9270             if (!is_error(ret)) {
9271                 struct linux_dirent *de;
9272                 int len = ret;
9273                 int reclen;
9274                 de = dirp;
9275                 while (len > 0) {
9276                     reclen = de->d_reclen;
9277                     if (reclen > len)
9278                         break;
9279                     de->d_reclen = tswap16(reclen);
9280                     tswapls(&de->d_ino);
9281                     tswapls(&de->d_off);
9282                     de = (struct linux_dirent *)((char *)de + reclen);
9283                     len -= reclen;
9284                 }
9285             }
9286             unlock_user(dirp, arg2, ret);
9287         }
9288 #endif
9289 #else
9290         /* Implement getdents in terms of getdents64 */
9291         {
9292             struct linux_dirent64 *dirp;
9293             abi_long count = arg3;
9294 
9295             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9296             if (!dirp) {
9297                 return -TARGET_EFAULT;
9298             }
9299             ret = get_errno(sys_getdents64(arg1, dirp, count));
9300             if (!is_error(ret)) {
9301                 /* Convert the dirent64 structs to target dirent.  We do this
9302                  * in-place, since we can guarantee that a target_dirent is no
9303                  * larger than a dirent64; however this means we have to be
9304                  * careful to read everything before writing in the new format.
9305                  */
9306                 struct linux_dirent64 *de;
9307                 struct target_dirent *tde;
9308                 int len = ret;
9309                 int tlen = 0;
9310 
9311                 de = dirp;
9312                 tde = (struct target_dirent *)dirp;
9313                 while (len > 0) {
9314                     int namelen, treclen;
9315                     int reclen = de->d_reclen;
9316                     uint64_t ino = de->d_ino;
9317                     int64_t off = de->d_off;
9318                     uint8_t type = de->d_type;
9319 
9320                     namelen = strlen(de->d_name);
9321                     treclen = offsetof(struct target_dirent, d_name)
9322                         + namelen + 2;
9323                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9324 
9325                     memmove(tde->d_name, de->d_name, namelen + 1);
9326                     tde->d_ino = tswapal(ino);
9327                     tde->d_off = tswapal(off);
9328                     tde->d_reclen = tswap16(treclen);
9329                     /* The target_dirent type is in what was formerly a padding
9330                      * byte at the end of the structure:
9331                      */
9332                     *(((char *)tde) + treclen - 1) = type;
9333 
9334                     de = (struct linux_dirent64 *)((char *)de + reclen);
9335                     tde = (struct target_dirent *)((char *)tde + treclen);
9336                     len -= reclen;
9337                     tlen += treclen;
9338                 }
9339                 ret = tlen;
9340             }
9341             unlock_user(dirp, arg2, ret);
9342         }
9343 #endif
9344         return ret;
9345 #endif /* TARGET_NR_getdents */
9346 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9347     case TARGET_NR_getdents64:
9348         {
9349             struct linux_dirent64 *dirp;
9350             abi_long count = arg3;
9351             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9352                 return -TARGET_EFAULT;
9353             ret = get_errno(sys_getdents64(arg1, dirp, count));
9354             if (!is_error(ret)) {
9355                 struct linux_dirent64 *de;
9356                 int len = ret;
9357                 int reclen;
9358                 de = dirp;
9359                 while (len > 0) {
9360                     reclen = de->d_reclen;
9361                     if (reclen > len)
9362                         break;
9363                     de->d_reclen = tswap16(reclen);
9364                     tswap64s((uint64_t *)&de->d_ino);
9365                     tswap64s((uint64_t *)&de->d_off);
9366                     de = (struct linux_dirent64 *)((char *)de + reclen);
9367                     len -= reclen;
9368                 }
9369             }
9370             unlock_user(dirp, arg2, ret);
9371         }
9372         return ret;
9373 #endif /* TARGET_NR_getdents64 */
9374 #if defined(TARGET_NR__newselect)
9375     case TARGET_NR__newselect:
9376         return do_select(arg1, arg2, arg3, arg4, arg5);
9377 #endif
9378 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9379 # ifdef TARGET_NR_poll
9380     case TARGET_NR_poll:
9381 # endif
9382 # ifdef TARGET_NR_ppoll
9383     case TARGET_NR_ppoll:
9384 # endif
9385         {
9386             struct target_pollfd *target_pfd;
9387             unsigned int nfds = arg2;
9388             struct pollfd *pfd;
9389             unsigned int i;
9390 
9391             pfd = NULL;
9392             target_pfd = NULL;
9393             if (nfds) {
9394                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9395                     return -TARGET_EINVAL;
9396                 }
9397 
9398                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9399                                        sizeof(struct target_pollfd) * nfds, 1);
9400                 if (!target_pfd) {
9401                     return -TARGET_EFAULT;
9402                 }
9403 
9404                 pfd = alloca(sizeof(struct pollfd) * nfds);
9405                 for (i = 0; i < nfds; i++) {
9406                     pfd[i].fd = tswap32(target_pfd[i].fd);
9407                     pfd[i].events = tswap16(target_pfd[i].events);
9408                 }
9409             }
9410 
9411             switch (num) {
9412 # ifdef TARGET_NR_ppoll
9413             case TARGET_NR_ppoll:
9414             {
9415                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9416                 target_sigset_t *target_set;
9417                 sigset_t _set, *set = &_set;
9418 
9419                 if (arg3) {
9420                     if (target_to_host_timespec(timeout_ts, arg3)) {
9421                         unlock_user(target_pfd, arg1, 0);
9422                         return -TARGET_EFAULT;
9423                     }
9424                 } else {
9425                     timeout_ts = NULL;
9426                 }
9427 
9428                 if (arg4) {
9429                     if (arg5 != sizeof(target_sigset_t)) {
9430                         unlock_user(target_pfd, arg1, 0);
9431                         return -TARGET_EINVAL;
9432                     }
9433 
9434                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9435                     if (!target_set) {
9436                         unlock_user(target_pfd, arg1, 0);
9437                         return -TARGET_EFAULT;
9438                     }
9439                     target_to_host_sigset(set, target_set);
9440                 } else {
9441                     set = NULL;
9442                 }
9443 
9444                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9445                                            set, SIGSET_T_SIZE));
9446 
9447                 if (!is_error(ret) && arg3) {
9448                     host_to_target_timespec(arg3, timeout_ts);
9449                 }
9450                 if (arg4) {
9451                     unlock_user(target_set, arg4, 0);
9452                 }
9453                 break;
9454             }
9455 # endif
9456 # ifdef TARGET_NR_poll
9457             case TARGET_NR_poll:
9458             {
9459                 struct timespec ts, *pts;
9460 
9461                 if (arg3 >= 0) {
9462                     /* Convert ms to secs, ns */
9463                     ts.tv_sec = arg3 / 1000;
9464                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9465                     pts = &ts;
9466                 } else {
9467                     /* -ve poll() timeout means "infinite" */
9468                     pts = NULL;
9469                 }
9470                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9471                 break;
9472             }
9473 # endif
9474             default:
9475                 g_assert_not_reached();
9476             }
9477 
9478             if (!is_error(ret)) {
9479                 for(i = 0; i < nfds; i++) {
9480                     target_pfd[i].revents = tswap16(pfd[i].revents);
9481                 }
9482             }
9483             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9484         }
9485         return ret;
9486 #endif
9487     case TARGET_NR_flock:
9488         /* NOTE: the flock constant seems to be the same for every
9489            Linux platform */
9490         return get_errno(safe_flock(arg1, arg2));
9491     case TARGET_NR_readv:
9492         {
9493             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9494             if (vec != NULL) {
9495                 ret = get_errno(safe_readv(arg1, vec, arg3));
9496                 unlock_iovec(vec, arg2, arg3, 1);
9497             } else {
9498                 ret = -host_to_target_errno(errno);
9499             }
9500         }
9501         return ret;
9502     case TARGET_NR_writev:
9503         {
9504             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9505             if (vec != NULL) {
9506                 ret = get_errno(safe_writev(arg1, vec, arg3));
9507                 unlock_iovec(vec, arg2, arg3, 0);
9508             } else {
9509                 ret = -host_to_target_errno(errno);
9510             }
9511         }
9512         return ret;
9513 #if defined(TARGET_NR_preadv)
9514     case TARGET_NR_preadv:
9515         {
9516             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9517             if (vec != NULL) {
9518                 unsigned long low, high;
9519 
9520                 target_to_host_low_high(arg4, arg5, &low, &high);
9521                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9522                 unlock_iovec(vec, arg2, arg3, 1);
9523             } else {
9524                 ret = -host_to_target_errno(errno);
9525            }
9526         }
9527         return ret;
9528 #endif
9529 #if defined(TARGET_NR_pwritev)
9530     case TARGET_NR_pwritev:
9531         {
9532             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9533             if (vec != NULL) {
9534                 unsigned long low, high;
9535 
9536                 target_to_host_low_high(arg4, arg5, &low, &high);
9537                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9538                 unlock_iovec(vec, arg2, arg3, 0);
9539             } else {
9540                 ret = -host_to_target_errno(errno);
9541            }
9542         }
9543         return ret;
9544 #endif
9545     case TARGET_NR_getsid:
9546         return get_errno(getsid(arg1));
9547 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9548     case TARGET_NR_fdatasync:
9549         return get_errno(fdatasync(arg1));
9550 #endif
9551 #ifdef TARGET_NR__sysctl
9552     case TARGET_NR__sysctl:
9553         /* We don't implement this, but ENOTDIR is always a safe
9554            return value. */
9555         return -TARGET_ENOTDIR;
9556 #endif
9557     case TARGET_NR_sched_getaffinity:
9558         {
9559             unsigned int mask_size;
9560             unsigned long *mask;
9561 
9562             /*
9563              * sched_getaffinity needs multiples of ulong, so need to take
9564              * care of mismatches between target ulong and host ulong sizes.
9565              */
9566             if (arg2 & (sizeof(abi_ulong) - 1)) {
9567                 return -TARGET_EINVAL;
9568             }
9569             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9570 
9571             mask = alloca(mask_size);
9572             memset(mask, 0, mask_size);
9573             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9574 
9575             if (!is_error(ret)) {
9576                 if (ret > arg2) {
9577                     /* More data returned than the caller's buffer will fit.
9578                      * This only happens if sizeof(abi_long) < sizeof(long)
9579                      * and the caller passed us a buffer holding an odd number
9580                      * of abi_longs. If the host kernel is actually using the
9581                      * extra 4 bytes then fail EINVAL; otherwise we can just
9582                      * ignore them and only copy the interesting part.
9583                      */
9584                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9585                     if (numcpus > arg2 * 8) {
9586                         return -TARGET_EINVAL;
9587                     }
9588                     ret = arg2;
9589                 }
9590 
9591                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9592                     return -TARGET_EFAULT;
9593                 }
9594             }
9595         }
9596         return ret;
9597     case TARGET_NR_sched_setaffinity:
9598         {
9599             unsigned int mask_size;
9600             unsigned long *mask;
9601 
9602             /*
9603              * sched_setaffinity needs multiples of ulong, so need to take
9604              * care of mismatches between target ulong and host ulong sizes.
9605              */
9606             if (arg2 & (sizeof(abi_ulong) - 1)) {
9607                 return -TARGET_EINVAL;
9608             }
9609             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9610             mask = alloca(mask_size);
9611 
9612             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9613             if (ret) {
9614                 return ret;
9615             }
9616 
9617             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9618         }
9619     case TARGET_NR_getcpu:
9620         {
9621             unsigned cpu, node;
9622             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9623                                        arg2 ? &node : NULL,
9624                                        NULL));
9625             if (is_error(ret)) {
9626                 return ret;
9627             }
9628             if (arg1 && put_user_u32(cpu, arg1)) {
9629                 return -TARGET_EFAULT;
9630             }
9631             if (arg2 && put_user_u32(node, arg2)) {
9632                 return -TARGET_EFAULT;
9633             }
9634         }
9635         return ret;
9636     case TARGET_NR_sched_setparam:
9637         {
9638             struct sched_param *target_schp;
9639             struct sched_param schp;
9640 
9641             if (arg2 == 0) {
9642                 return -TARGET_EINVAL;
9643             }
9644             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9645                 return -TARGET_EFAULT;
9646             schp.sched_priority = tswap32(target_schp->sched_priority);
9647             unlock_user_struct(target_schp, arg2, 0);
9648             return get_errno(sched_setparam(arg1, &schp));
9649         }
9650     case TARGET_NR_sched_getparam:
9651         {
9652             struct sched_param *target_schp;
9653             struct sched_param schp;
9654 
9655             if (arg2 == 0) {
9656                 return -TARGET_EINVAL;
9657             }
9658             ret = get_errno(sched_getparam(arg1, &schp));
9659             if (!is_error(ret)) {
9660                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9661                     return -TARGET_EFAULT;
9662                 target_schp->sched_priority = tswap32(schp.sched_priority);
9663                 unlock_user_struct(target_schp, arg2, 1);
9664             }
9665         }
9666         return ret;
9667     case TARGET_NR_sched_setscheduler:
9668         {
9669             struct sched_param *target_schp;
9670             struct sched_param schp;
9671             if (arg3 == 0) {
9672                 return -TARGET_EINVAL;
9673             }
9674             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9675                 return -TARGET_EFAULT;
9676             schp.sched_priority = tswap32(target_schp->sched_priority);
9677             unlock_user_struct(target_schp, arg3, 0);
9678             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9679         }
9680     case TARGET_NR_sched_getscheduler:
9681         return get_errno(sched_getscheduler(arg1));
9682     case TARGET_NR_sched_yield:
9683         return get_errno(sched_yield());
9684     case TARGET_NR_sched_get_priority_max:
9685         return get_errno(sched_get_priority_max(arg1));
9686     case TARGET_NR_sched_get_priority_min:
9687         return get_errno(sched_get_priority_min(arg1));
9688     case TARGET_NR_sched_rr_get_interval:
9689         {
9690             struct timespec ts;
9691             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9692             if (!is_error(ret)) {
9693                 ret = host_to_target_timespec(arg2, &ts);
9694             }
9695         }
9696         return ret;
9697     case TARGET_NR_nanosleep:
9698         {
9699             struct timespec req, rem;
9700             target_to_host_timespec(&req, arg1);
9701             ret = get_errno(safe_nanosleep(&req, &rem));
9702             if (is_error(ret) && arg2) {
9703                 host_to_target_timespec(arg2, &rem);
9704             }
9705         }
9706         return ret;
9707     case TARGET_NR_prctl:
9708         switch (arg1) {
9709         case PR_GET_PDEATHSIG:
9710         {
9711             int deathsig;
9712             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9713             if (!is_error(ret) && arg2
9714                 && put_user_ual(deathsig, arg2)) {
9715                 return -TARGET_EFAULT;
9716             }
9717             return ret;
9718         }
9719 #ifdef PR_GET_NAME
9720         case PR_GET_NAME:
9721         {
9722             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9723             if (!name) {
9724                 return -TARGET_EFAULT;
9725             }
9726             ret = get_errno(prctl(arg1, (unsigned long)name,
9727                                   arg3, arg4, arg5));
9728             unlock_user(name, arg2, 16);
9729             return ret;
9730         }
9731         case PR_SET_NAME:
9732         {
9733             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9734             if (!name) {
9735                 return -TARGET_EFAULT;
9736             }
9737             ret = get_errno(prctl(arg1, (unsigned long)name,
9738                                   arg3, arg4, arg5));
9739             unlock_user(name, arg2, 0);
9740             return ret;
9741         }
9742 #endif
9743 #ifdef TARGET_MIPS
9744         case TARGET_PR_GET_FP_MODE:
9745         {
9746             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9747             ret = 0;
9748             if (env->CP0_Status & (1 << CP0St_FR)) {
9749                 ret |= TARGET_PR_FP_MODE_FR;
9750             }
9751             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9752                 ret |= TARGET_PR_FP_MODE_FRE;
9753             }
9754             return ret;
9755         }
9756         case TARGET_PR_SET_FP_MODE:
9757         {
9758             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9759             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9760             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9761             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9762             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9763 
9764             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9765                                             TARGET_PR_FP_MODE_FRE;
9766 
9767             /* If nothing to change, return right away, successfully.  */
9768             if (old_fr == new_fr && old_fre == new_fre) {
9769                 return 0;
9770             }
9771             /* Check the value is valid */
9772             if (arg2 & ~known_bits) {
9773                 return -TARGET_EOPNOTSUPP;
9774             }
9775             /* Setting FRE without FR is not supported.  */
9776             if (new_fre && !new_fr) {
9777                 return -TARGET_EOPNOTSUPP;
9778             }
9779             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9780                 /* FR1 is not supported */
9781                 return -TARGET_EOPNOTSUPP;
9782             }
9783             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9784                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9785                 /* cannot set FR=0 */
9786                 return -TARGET_EOPNOTSUPP;
9787             }
9788             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9789                 /* Cannot set FRE=1 */
9790                 return -TARGET_EOPNOTSUPP;
9791             }
9792 
9793             int i;
9794             fpr_t *fpr = env->active_fpu.fpr;
9795             for (i = 0; i < 32 ; i += 2) {
9796                 if (!old_fr && new_fr) {
9797                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9798                 } else if (old_fr && !new_fr) {
9799                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9800                 }
9801             }
9802 
9803             if (new_fr) {
9804                 env->CP0_Status |= (1 << CP0St_FR);
9805                 env->hflags |= MIPS_HFLAG_F64;
9806             } else {
9807                 env->CP0_Status &= ~(1 << CP0St_FR);
9808                 env->hflags &= ~MIPS_HFLAG_F64;
9809             }
9810             if (new_fre) {
9811                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9812                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9813                     env->hflags |= MIPS_HFLAG_FRE;
9814                 }
9815             } else {
9816                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9817                 env->hflags &= ~MIPS_HFLAG_FRE;
9818             }
9819 
9820             return 0;
9821         }
9822 #endif /* MIPS */
9823 #ifdef TARGET_AARCH64
9824         case TARGET_PR_SVE_SET_VL:
9825             /*
9826              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9827              * PR_SVE_VL_INHERIT.  Note the kernel definition
9828              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9829              * even though the current architectural maximum is VQ=16.
9830              */
9831             ret = -TARGET_EINVAL;
9832             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9833                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9834                 CPUARMState *env = cpu_env;
9835                 ARMCPU *cpu = env_archcpu(env);
9836                 uint32_t vq, old_vq;
9837 
9838                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9839                 vq = MAX(arg2 / 16, 1);
9840                 vq = MIN(vq, cpu->sve_max_vq);
9841 
9842                 if (vq < old_vq) {
9843                     aarch64_sve_narrow_vq(env, vq);
9844                 }
9845                 env->vfp.zcr_el[1] = vq - 1;
9846                 ret = vq * 16;
9847             }
9848             return ret;
9849         case TARGET_PR_SVE_GET_VL:
9850             ret = -TARGET_EINVAL;
9851             {
9852                 ARMCPU *cpu = env_archcpu(cpu_env);
9853                 if (cpu_isar_feature(aa64_sve, cpu)) {
9854                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9855                 }
9856             }
9857             return ret;
9858         case TARGET_PR_PAC_RESET_KEYS:
9859             {
9860                 CPUARMState *env = cpu_env;
9861                 ARMCPU *cpu = env_archcpu(env);
9862 
9863                 if (arg3 || arg4 || arg5) {
9864                     return -TARGET_EINVAL;
9865                 }
9866                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9867                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9868                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9869                                TARGET_PR_PAC_APGAKEY);
9870                     int ret = 0;
9871                     Error *err = NULL;
9872 
9873                     if (arg2 == 0) {
9874                         arg2 = all;
9875                     } else if (arg2 & ~all) {
9876                         return -TARGET_EINVAL;
9877                     }
9878                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9879                         ret |= qemu_guest_getrandom(&env->keys.apia,
9880                                                     sizeof(ARMPACKey), &err);
9881                     }
9882                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9883                         ret |= qemu_guest_getrandom(&env->keys.apib,
9884                                                     sizeof(ARMPACKey), &err);
9885                     }
9886                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9887                         ret |= qemu_guest_getrandom(&env->keys.apda,
9888                                                     sizeof(ARMPACKey), &err);
9889                     }
9890                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9891                         ret |= qemu_guest_getrandom(&env->keys.apdb,
9892                                                     sizeof(ARMPACKey), &err);
9893                     }
9894                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9895                         ret |= qemu_guest_getrandom(&env->keys.apga,
9896                                                     sizeof(ARMPACKey), &err);
9897                     }
9898                     if (ret != 0) {
9899                         /*
9900                          * Some unknown failure in the crypto.  The best
9901                          * we can do is log it and fail the syscall.
9902                          * The real syscall cannot fail this way.
9903                          */
9904                         qemu_log_mask(LOG_UNIMP,
9905                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
9906                                       error_get_pretty(err));
9907                         error_free(err);
9908                         return -TARGET_EIO;
9909                     }
9910                     return 0;
9911                 }
9912             }
9913             return -TARGET_EINVAL;
9914 #endif /* AARCH64 */
9915         case PR_GET_SECCOMP:
9916         case PR_SET_SECCOMP:
9917             /* Disable seccomp to prevent the target disabling syscalls we
9918              * need. */
9919             return -TARGET_EINVAL;
9920         default:
9921             /* Most prctl options have no pointer arguments */
9922             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9923         }
9924         break;
9925 #ifdef TARGET_NR_arch_prctl
9926     case TARGET_NR_arch_prctl:
9927 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9928         return do_arch_prctl(cpu_env, arg1, arg2);
9929 #else
9930 #error unreachable
9931 #endif
9932 #endif
9933 #ifdef TARGET_NR_pread64
9934     case TARGET_NR_pread64:
9935         if (regpairs_aligned(cpu_env, num)) {
9936             arg4 = arg5;
9937             arg5 = arg6;
9938         }
9939         if (arg2 == 0 && arg3 == 0) {
9940             /* Special-case NULL buffer and zero length, which should succeed */
9941             p = 0;
9942         } else {
9943             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9944             if (!p) {
9945                 return -TARGET_EFAULT;
9946             }
9947         }
9948         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9949         unlock_user(p, arg2, ret);
9950         return ret;
9951     case TARGET_NR_pwrite64:
9952         if (regpairs_aligned(cpu_env, num)) {
9953             arg4 = arg5;
9954             arg5 = arg6;
9955         }
9956         if (arg2 == 0 && arg3 == 0) {
9957             /* Special-case NULL buffer and zero length, which should succeed */
9958             p = 0;
9959         } else {
9960             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9961             if (!p) {
9962                 return -TARGET_EFAULT;
9963             }
9964         }
9965         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9966         unlock_user(p, arg2, 0);
9967         return ret;
9968 #endif
9969     case TARGET_NR_getcwd:
9970         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9971             return -TARGET_EFAULT;
9972         ret = get_errno(sys_getcwd1(p, arg2));
9973         unlock_user(p, arg1, ret);
9974         return ret;
9975     case TARGET_NR_capget:
9976     case TARGET_NR_capset:
9977     {
9978         struct target_user_cap_header *target_header;
9979         struct target_user_cap_data *target_data = NULL;
9980         struct __user_cap_header_struct header;
9981         struct __user_cap_data_struct data[2];
9982         struct __user_cap_data_struct *dataptr = NULL;
9983         int i, target_datalen;
9984         int data_items = 1;
9985 
9986         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9987             return -TARGET_EFAULT;
9988         }
9989         header.version = tswap32(target_header->version);
9990         header.pid = tswap32(target_header->pid);
9991 
9992         if (header.version != _LINUX_CAPABILITY_VERSION) {
9993             /* Version 2 and up takes pointer to two user_data structs */
9994             data_items = 2;
9995         }
9996 
9997         target_datalen = sizeof(*target_data) * data_items;
9998 
9999         if (arg2) {
10000             if (num == TARGET_NR_capget) {
10001                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10002             } else {
10003                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10004             }
10005             if (!target_data) {
10006                 unlock_user_struct(target_header, arg1, 0);
10007                 return -TARGET_EFAULT;
10008             }
10009 
10010             if (num == TARGET_NR_capset) {
10011                 for (i = 0; i < data_items; i++) {
10012                     data[i].effective = tswap32(target_data[i].effective);
10013                     data[i].permitted = tswap32(target_data[i].permitted);
10014                     data[i].inheritable = tswap32(target_data[i].inheritable);
10015                 }
10016             }
10017 
10018             dataptr = data;
10019         }
10020 
10021         if (num == TARGET_NR_capget) {
10022             ret = get_errno(capget(&header, dataptr));
10023         } else {
10024             ret = get_errno(capset(&header, dataptr));
10025         }
10026 
10027         /* The kernel always updates version for both capget and capset */
10028         target_header->version = tswap32(header.version);
10029         unlock_user_struct(target_header, arg1, 1);
10030 
10031         if (arg2) {
10032             if (num == TARGET_NR_capget) {
10033                 for (i = 0; i < data_items; i++) {
10034                     target_data[i].effective = tswap32(data[i].effective);
10035                     target_data[i].permitted = tswap32(data[i].permitted);
10036                     target_data[i].inheritable = tswap32(data[i].inheritable);
10037                 }
10038                 unlock_user(target_data, arg2, target_datalen);
10039             } else {
10040                 unlock_user(target_data, arg2, 0);
10041             }
10042         }
10043         return ret;
10044     }
10045     case TARGET_NR_sigaltstack:
10046         return do_sigaltstack(arg1, arg2,
10047                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10048 
10049 #ifdef CONFIG_SENDFILE
10050 #ifdef TARGET_NR_sendfile
10051     case TARGET_NR_sendfile:
10052     {
10053         off_t *offp = NULL;
10054         off_t off;
10055         if (arg3) {
10056             ret = get_user_sal(off, arg3);
10057             if (is_error(ret)) {
10058                 return ret;
10059             }
10060             offp = &off;
10061         }
10062         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10063         if (!is_error(ret) && arg3) {
10064             abi_long ret2 = put_user_sal(off, arg3);
10065             if (is_error(ret2)) {
10066                 ret = ret2;
10067             }
10068         }
10069         return ret;
10070     }
10071 #endif
10072 #ifdef TARGET_NR_sendfile64
10073     case TARGET_NR_sendfile64:
10074     {
10075         off_t *offp = NULL;
10076         off_t off;
10077         if (arg3) {
10078             ret = get_user_s64(off, arg3);
10079             if (is_error(ret)) {
10080                 return ret;
10081             }
10082             offp = &off;
10083         }
10084         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10085         if (!is_error(ret) && arg3) {
10086             abi_long ret2 = put_user_s64(off, arg3);
10087             if (is_error(ret2)) {
10088                 ret = ret2;
10089             }
10090         }
10091         return ret;
10092     }
10093 #endif
10094 #endif
10095 #ifdef TARGET_NR_vfork
10096     case TARGET_NR_vfork:
10097         return get_errno(do_fork(cpu_env,
10098                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10099                          0, 0, 0, 0));
10100 #endif
10101 #ifdef TARGET_NR_ugetrlimit
10102     case TARGET_NR_ugetrlimit:
10103     {
10104 	struct rlimit rlim;
10105 	int resource = target_to_host_resource(arg1);
10106 	ret = get_errno(getrlimit(resource, &rlim));
10107 	if (!is_error(ret)) {
10108 	    struct target_rlimit *target_rlim;
10109             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10110                 return -TARGET_EFAULT;
10111 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10112 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10113             unlock_user_struct(target_rlim, arg2, 1);
10114 	}
10115         return ret;
10116     }
10117 #endif
10118 #ifdef TARGET_NR_truncate64
10119     case TARGET_NR_truncate64:
10120         if (!(p = lock_user_string(arg1)))
10121             return -TARGET_EFAULT;
10122 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10123         unlock_user(p, arg1, 0);
10124         return ret;
10125 #endif
10126 #ifdef TARGET_NR_ftruncate64
10127     case TARGET_NR_ftruncate64:
10128         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10129 #endif
10130 #ifdef TARGET_NR_stat64
10131     case TARGET_NR_stat64:
10132         if (!(p = lock_user_string(arg1))) {
10133             return -TARGET_EFAULT;
10134         }
10135         ret = get_errno(stat(path(p), &st));
10136         unlock_user(p, arg1, 0);
10137         if (!is_error(ret))
10138             ret = host_to_target_stat64(cpu_env, arg2, &st);
10139         return ret;
10140 #endif
10141 #ifdef TARGET_NR_lstat64
10142     case TARGET_NR_lstat64:
10143         if (!(p = lock_user_string(arg1))) {
10144             return -TARGET_EFAULT;
10145         }
10146         ret = get_errno(lstat(path(p), &st));
10147         unlock_user(p, arg1, 0);
10148         if (!is_error(ret))
10149             ret = host_to_target_stat64(cpu_env, arg2, &st);
10150         return ret;
10151 #endif
10152 #ifdef TARGET_NR_fstat64
10153     case TARGET_NR_fstat64:
10154         ret = get_errno(fstat(arg1, &st));
10155         if (!is_error(ret))
10156             ret = host_to_target_stat64(cpu_env, arg2, &st);
10157         return ret;
10158 #endif
10159 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10160 #ifdef TARGET_NR_fstatat64
10161     case TARGET_NR_fstatat64:
10162 #endif
10163 #ifdef TARGET_NR_newfstatat
10164     case TARGET_NR_newfstatat:
10165 #endif
10166         if (!(p = lock_user_string(arg2))) {
10167             return -TARGET_EFAULT;
10168         }
10169         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10170         unlock_user(p, arg2, 0);
10171         if (!is_error(ret))
10172             ret = host_to_target_stat64(cpu_env, arg3, &st);
10173         return ret;
10174 #endif
10175 #ifdef TARGET_NR_lchown
10176     case TARGET_NR_lchown:
10177         if (!(p = lock_user_string(arg1)))
10178             return -TARGET_EFAULT;
10179         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10180         unlock_user(p, arg1, 0);
10181         return ret;
10182 #endif
10183 #ifdef TARGET_NR_getuid
10184     case TARGET_NR_getuid:
10185         return get_errno(high2lowuid(getuid()));
10186 #endif
10187 #ifdef TARGET_NR_getgid
10188     case TARGET_NR_getgid:
10189         return get_errno(high2lowgid(getgid()));
10190 #endif
10191 #ifdef TARGET_NR_geteuid
10192     case TARGET_NR_geteuid:
10193         return get_errno(high2lowuid(geteuid()));
10194 #endif
10195 #ifdef TARGET_NR_getegid
10196     case TARGET_NR_getegid:
10197         return get_errno(high2lowgid(getegid()));
10198 #endif
10199     case TARGET_NR_setreuid:
10200         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10201     case TARGET_NR_setregid:
10202         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10203     case TARGET_NR_getgroups:
10204         {
10205             int gidsetsize = arg1;
10206             target_id *target_grouplist;
10207             gid_t *grouplist;
10208             int i;
10209 
10210             grouplist = alloca(gidsetsize * sizeof(gid_t));
10211             ret = get_errno(getgroups(gidsetsize, grouplist));
10212             if (gidsetsize == 0)
10213                 return ret;
10214             if (!is_error(ret)) {
10215                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10216                 if (!target_grouplist)
10217                     return -TARGET_EFAULT;
10218                 for(i = 0;i < ret; i++)
10219                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10220                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10221             }
10222         }
10223         return ret;
10224     case TARGET_NR_setgroups:
10225         {
10226             int gidsetsize = arg1;
10227             target_id *target_grouplist;
10228             gid_t *grouplist = NULL;
10229             int i;
10230             if (gidsetsize) {
10231                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10232                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10233                 if (!target_grouplist) {
10234                     return -TARGET_EFAULT;
10235                 }
10236                 for (i = 0; i < gidsetsize; i++) {
10237                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10238                 }
10239                 unlock_user(target_grouplist, arg2, 0);
10240             }
10241             return get_errno(setgroups(gidsetsize, grouplist));
10242         }
10243     case TARGET_NR_fchown:
10244         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10245 #if defined(TARGET_NR_fchownat)
10246     case TARGET_NR_fchownat:
10247         if (!(p = lock_user_string(arg2)))
10248             return -TARGET_EFAULT;
10249         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10250                                  low2highgid(arg4), arg5));
10251         unlock_user(p, arg2, 0);
10252         return ret;
10253 #endif
10254 #ifdef TARGET_NR_setresuid
10255     case TARGET_NR_setresuid:
10256         return get_errno(sys_setresuid(low2highuid(arg1),
10257                                        low2highuid(arg2),
10258                                        low2highuid(arg3)));
10259 #endif
10260 #ifdef TARGET_NR_getresuid
10261     case TARGET_NR_getresuid:
10262         {
10263             uid_t ruid, euid, suid;
10264             ret = get_errno(getresuid(&ruid, &euid, &suid));
10265             if (!is_error(ret)) {
10266                 if (put_user_id(high2lowuid(ruid), arg1)
10267                     || put_user_id(high2lowuid(euid), arg2)
10268                     || put_user_id(high2lowuid(suid), arg3))
10269                     return -TARGET_EFAULT;
10270             }
10271         }
10272         return ret;
10273 #endif
10274 #ifdef TARGET_NR_getresgid
10275     case TARGET_NR_setresgid:
10276         return get_errno(sys_setresgid(low2highgid(arg1),
10277                                        low2highgid(arg2),
10278                                        low2highgid(arg3)));
10279 #endif
10280 #ifdef TARGET_NR_getresgid
10281     case TARGET_NR_getresgid:
10282         {
10283             gid_t rgid, egid, sgid;
10284             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10285             if (!is_error(ret)) {
10286                 if (put_user_id(high2lowgid(rgid), arg1)
10287                     || put_user_id(high2lowgid(egid), arg2)
10288                     || put_user_id(high2lowgid(sgid), arg3))
10289                     return -TARGET_EFAULT;
10290             }
10291         }
10292         return ret;
10293 #endif
10294 #ifdef TARGET_NR_chown
10295     case TARGET_NR_chown:
10296         if (!(p = lock_user_string(arg1)))
10297             return -TARGET_EFAULT;
10298         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10299         unlock_user(p, arg1, 0);
10300         return ret;
10301 #endif
10302     case TARGET_NR_setuid:
10303         return get_errno(sys_setuid(low2highuid(arg1)));
10304     case TARGET_NR_setgid:
10305         return get_errno(sys_setgid(low2highgid(arg1)));
10306     case TARGET_NR_setfsuid:
10307         return get_errno(setfsuid(arg1));
10308     case TARGET_NR_setfsgid:
10309         return get_errno(setfsgid(arg1));
10310 
10311 #ifdef TARGET_NR_lchown32
10312     case TARGET_NR_lchown32:
10313         if (!(p = lock_user_string(arg1)))
10314             return -TARGET_EFAULT;
10315         ret = get_errno(lchown(p, arg2, arg3));
10316         unlock_user(p, arg1, 0);
10317         return ret;
10318 #endif
10319 #ifdef TARGET_NR_getuid32
10320     case TARGET_NR_getuid32:
10321         return get_errno(getuid());
10322 #endif
10323 
10324 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10325    /* Alpha specific */
10326     case TARGET_NR_getxuid:
10327          {
10328             uid_t euid;
10329             euid=geteuid();
10330             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10331          }
10332         return get_errno(getuid());
10333 #endif
10334 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10335    /* Alpha specific */
10336     case TARGET_NR_getxgid:
10337          {
10338             uid_t egid;
10339             egid=getegid();
10340             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10341          }
10342         return get_errno(getgid());
10343 #endif
10344 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10345     /* Alpha specific */
10346     case TARGET_NR_osf_getsysinfo:
10347         ret = -TARGET_EOPNOTSUPP;
10348         switch (arg1) {
10349           case TARGET_GSI_IEEE_FP_CONTROL:
10350             {
10351                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10352                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10353 
10354                 swcr &= ~SWCR_STATUS_MASK;
10355                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10356 
10357                 if (put_user_u64 (swcr, arg2))
10358                         return -TARGET_EFAULT;
10359                 ret = 0;
10360             }
10361             break;
10362 
10363           /* case GSI_IEEE_STATE_AT_SIGNAL:
10364              -- Not implemented in linux kernel.
10365              case GSI_UACPROC:
10366              -- Retrieves current unaligned access state; not much used.
10367              case GSI_PROC_TYPE:
10368              -- Retrieves implver information; surely not used.
10369              case GSI_GET_HWRPB:
10370              -- Grabs a copy of the HWRPB; surely not used.
10371           */
10372         }
10373         return ret;
10374 #endif
10375 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10376     /* Alpha specific */
10377     case TARGET_NR_osf_setsysinfo:
10378         ret = -TARGET_EOPNOTSUPP;
10379         switch (arg1) {
10380           case TARGET_SSI_IEEE_FP_CONTROL:
10381             {
10382                 uint64_t swcr, fpcr;
10383 
10384                 if (get_user_u64 (swcr, arg2)) {
10385                     return -TARGET_EFAULT;
10386                 }
10387 
10388                 /*
10389                  * The kernel calls swcr_update_status to update the
10390                  * status bits from the fpcr at every point that it
10391                  * could be queried.  Therefore, we store the status
10392                  * bits only in FPCR.
10393                  */
10394                 ((CPUAlphaState *)cpu_env)->swcr
10395                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10396 
10397                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10398                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10399                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10400                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10401                 ret = 0;
10402             }
10403             break;
10404 
10405           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10406             {
10407                 uint64_t exc, fpcr, fex;
10408 
10409                 if (get_user_u64(exc, arg2)) {
10410                     return -TARGET_EFAULT;
10411                 }
10412                 exc &= SWCR_STATUS_MASK;
10413                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10414 
10415                 /* Old exceptions are not signaled.  */
10416                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10417                 fex = exc & ~fex;
10418                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10419                 fex &= ((CPUArchState *)cpu_env)->swcr;
10420 
10421                 /* Update the hardware fpcr.  */
10422                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10423                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10424 
10425                 if (fex) {
10426                     int si_code = TARGET_FPE_FLTUNK;
10427                     target_siginfo_t info;
10428 
10429                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10430                         si_code = TARGET_FPE_FLTUND;
10431                     }
10432                     if (fex & SWCR_TRAP_ENABLE_INE) {
10433                         si_code = TARGET_FPE_FLTRES;
10434                     }
10435                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10436                         si_code = TARGET_FPE_FLTUND;
10437                     }
10438                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10439                         si_code = TARGET_FPE_FLTOVF;
10440                     }
10441                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10442                         si_code = TARGET_FPE_FLTDIV;
10443                     }
10444                     if (fex & SWCR_TRAP_ENABLE_INV) {
10445                         si_code = TARGET_FPE_FLTINV;
10446                     }
10447 
10448                     info.si_signo = SIGFPE;
10449                     info.si_errno = 0;
10450                     info.si_code = si_code;
10451                     info._sifields._sigfault._addr
10452                         = ((CPUArchState *)cpu_env)->pc;
10453                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10454                                  QEMU_SI_FAULT, &info);
10455                 }
10456                 ret = 0;
10457             }
10458             break;
10459 
10460           /* case SSI_NVPAIRS:
10461              -- Used with SSIN_UACPROC to enable unaligned accesses.
10462              case SSI_IEEE_STATE_AT_SIGNAL:
10463              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10464              -- Not implemented in linux kernel
10465           */
10466         }
10467         return ret;
10468 #endif
10469 #ifdef TARGET_NR_osf_sigprocmask
10470     /* Alpha specific.  */
10471     case TARGET_NR_osf_sigprocmask:
10472         {
10473             abi_ulong mask;
10474             int how;
10475             sigset_t set, oldset;
10476 
10477             switch(arg1) {
10478             case TARGET_SIG_BLOCK:
10479                 how = SIG_BLOCK;
10480                 break;
10481             case TARGET_SIG_UNBLOCK:
10482                 how = SIG_UNBLOCK;
10483                 break;
10484             case TARGET_SIG_SETMASK:
10485                 how = SIG_SETMASK;
10486                 break;
10487             default:
10488                 return -TARGET_EINVAL;
10489             }
10490             mask = arg2;
10491             target_to_host_old_sigset(&set, &mask);
10492             ret = do_sigprocmask(how, &set, &oldset);
10493             if (!ret) {
10494                 host_to_target_old_sigset(&mask, &oldset);
10495                 ret = mask;
10496             }
10497         }
10498         return ret;
10499 #endif
10500 
10501 #ifdef TARGET_NR_getgid32
10502     case TARGET_NR_getgid32:
10503         return get_errno(getgid());
10504 #endif
10505 #ifdef TARGET_NR_geteuid32
10506     case TARGET_NR_geteuid32:
10507         return get_errno(geteuid());
10508 #endif
10509 #ifdef TARGET_NR_getegid32
10510     case TARGET_NR_getegid32:
10511         return get_errno(getegid());
10512 #endif
10513 #ifdef TARGET_NR_setreuid32
10514     case TARGET_NR_setreuid32:
10515         return get_errno(setreuid(arg1, arg2));
10516 #endif
10517 #ifdef TARGET_NR_setregid32
10518     case TARGET_NR_setregid32:
10519         return get_errno(setregid(arg1, arg2));
10520 #endif
10521 #ifdef TARGET_NR_getgroups32
10522     case TARGET_NR_getgroups32:
10523         {
10524             int gidsetsize = arg1;
10525             uint32_t *target_grouplist;
10526             gid_t *grouplist;
10527             int i;
10528 
10529             grouplist = alloca(gidsetsize * sizeof(gid_t));
10530             ret = get_errno(getgroups(gidsetsize, grouplist));
10531             if (gidsetsize == 0)
10532                 return ret;
10533             if (!is_error(ret)) {
10534                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10535                 if (!target_grouplist) {
10536                     return -TARGET_EFAULT;
10537                 }
10538                 for(i = 0;i < ret; i++)
10539                     target_grouplist[i] = tswap32(grouplist[i]);
10540                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10541             }
10542         }
10543         return ret;
10544 #endif
10545 #ifdef TARGET_NR_setgroups32
10546     case TARGET_NR_setgroups32:
10547         {
10548             int gidsetsize = arg1;
10549             uint32_t *target_grouplist;
10550             gid_t *grouplist;
10551             int i;
10552 
10553             grouplist = alloca(gidsetsize * sizeof(gid_t));
10554             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10555             if (!target_grouplist) {
10556                 return -TARGET_EFAULT;
10557             }
10558             for(i = 0;i < gidsetsize; i++)
10559                 grouplist[i] = tswap32(target_grouplist[i]);
10560             unlock_user(target_grouplist, arg2, 0);
10561             return get_errno(setgroups(gidsetsize, grouplist));
10562         }
10563 #endif
10564 #ifdef TARGET_NR_fchown32
10565     case TARGET_NR_fchown32:
10566         return get_errno(fchown(arg1, arg2, arg3));
10567 #endif
10568 #ifdef TARGET_NR_setresuid32
10569     case TARGET_NR_setresuid32:
10570         return get_errno(sys_setresuid(arg1, arg2, arg3));
10571 #endif
10572 #ifdef TARGET_NR_getresuid32
10573     case TARGET_NR_getresuid32:
10574         {
10575             uid_t ruid, euid, suid;
10576             ret = get_errno(getresuid(&ruid, &euid, &suid));
10577             if (!is_error(ret)) {
10578                 if (put_user_u32(ruid, arg1)
10579                     || put_user_u32(euid, arg2)
10580                     || put_user_u32(suid, arg3))
10581                     return -TARGET_EFAULT;
10582             }
10583         }
10584         return ret;
10585 #endif
10586 #ifdef TARGET_NR_setresgid32
10587     case TARGET_NR_setresgid32:
10588         return get_errno(sys_setresgid(arg1, arg2, arg3));
10589 #endif
10590 #ifdef TARGET_NR_getresgid32
10591     case TARGET_NR_getresgid32:
10592         {
10593             gid_t rgid, egid, sgid;
10594             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10595             if (!is_error(ret)) {
10596                 if (put_user_u32(rgid, arg1)
10597                     || put_user_u32(egid, arg2)
10598                     || put_user_u32(sgid, arg3))
10599                     return -TARGET_EFAULT;
10600             }
10601         }
10602         return ret;
10603 #endif
10604 #ifdef TARGET_NR_chown32
10605     case TARGET_NR_chown32:
10606         if (!(p = lock_user_string(arg1)))
10607             return -TARGET_EFAULT;
10608         ret = get_errno(chown(p, arg2, arg3));
10609         unlock_user(p, arg1, 0);
10610         return ret;
10611 #endif
10612 #ifdef TARGET_NR_setuid32
10613     case TARGET_NR_setuid32:
10614         return get_errno(sys_setuid(arg1));
10615 #endif
10616 #ifdef TARGET_NR_setgid32
10617     case TARGET_NR_setgid32:
10618         return get_errno(sys_setgid(arg1));
10619 #endif
10620 #ifdef TARGET_NR_setfsuid32
10621     case TARGET_NR_setfsuid32:
10622         return get_errno(setfsuid(arg1));
10623 #endif
10624 #ifdef TARGET_NR_setfsgid32
10625     case TARGET_NR_setfsgid32:
10626         return get_errno(setfsgid(arg1));
10627 #endif
10628 #ifdef TARGET_NR_mincore
10629     case TARGET_NR_mincore:
10630         {
10631             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10632             if (!a) {
10633                 return -TARGET_ENOMEM;
10634             }
10635             p = lock_user_string(arg3);
10636             if (!p) {
10637                 ret = -TARGET_EFAULT;
10638             } else {
10639                 ret = get_errno(mincore(a, arg2, p));
10640                 unlock_user(p, arg3, ret);
10641             }
10642             unlock_user(a, arg1, 0);
10643         }
10644         return ret;
10645 #endif
10646 #ifdef TARGET_NR_arm_fadvise64_64
10647     case TARGET_NR_arm_fadvise64_64:
10648         /* arm_fadvise64_64 looks like fadvise64_64 but
10649          * with different argument order: fd, advice, offset, len
10650          * rather than the usual fd, offset, len, advice.
10651          * Note that offset and len are both 64-bit so appear as
10652          * pairs of 32-bit registers.
10653          */
10654         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10655                             target_offset64(arg5, arg6), arg2);
10656         return -host_to_target_errno(ret);
10657 #endif
10658 
10659 #if TARGET_ABI_BITS == 32
10660 
10661 #ifdef TARGET_NR_fadvise64_64
10662     case TARGET_NR_fadvise64_64:
10663 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10664         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10665         ret = arg2;
10666         arg2 = arg3;
10667         arg3 = arg4;
10668         arg4 = arg5;
10669         arg5 = arg6;
10670         arg6 = ret;
10671 #else
10672         /* 6 args: fd, offset (high, low), len (high, low), advice */
10673         if (regpairs_aligned(cpu_env, num)) {
10674             /* offset is in (3,4), len in (5,6) and advice in 7 */
10675             arg2 = arg3;
10676             arg3 = arg4;
10677             arg4 = arg5;
10678             arg5 = arg6;
10679             arg6 = arg7;
10680         }
10681 #endif
10682         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10683                             target_offset64(arg4, arg5), arg6);
10684         return -host_to_target_errno(ret);
10685 #endif
10686 
10687 #ifdef TARGET_NR_fadvise64
10688     case TARGET_NR_fadvise64:
10689         /* 5 args: fd, offset (high, low), len, advice */
10690         if (regpairs_aligned(cpu_env, num)) {
10691             /* offset is in (3,4), len in 5 and advice in 6 */
10692             arg2 = arg3;
10693             arg3 = arg4;
10694             arg4 = arg5;
10695             arg5 = arg6;
10696         }
10697         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10698         return -host_to_target_errno(ret);
10699 #endif
10700 
10701 #else /* not a 32-bit ABI */
10702 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10703 #ifdef TARGET_NR_fadvise64_64
10704     case TARGET_NR_fadvise64_64:
10705 #endif
10706 #ifdef TARGET_NR_fadvise64
10707     case TARGET_NR_fadvise64:
10708 #endif
10709 #ifdef TARGET_S390X
10710         switch (arg4) {
10711         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10712         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10713         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10714         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10715         default: break;
10716         }
10717 #endif
10718         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10719 #endif
10720 #endif /* end of 64-bit ABI fadvise handling */
10721 
10722 #ifdef TARGET_NR_madvise
10723     case TARGET_NR_madvise:
10724         /* A straight passthrough may not be safe because qemu sometimes
10725            turns private file-backed mappings into anonymous mappings.
10726            This will break MADV_DONTNEED.
10727            This is a hint, so ignoring and returning success is ok.  */
10728         return 0;
10729 #endif
10730 #if TARGET_ABI_BITS == 32
10731     case TARGET_NR_fcntl64:
10732     {
10733 	int cmd;
10734 	struct flock64 fl;
10735         from_flock64_fn *copyfrom = copy_from_user_flock64;
10736         to_flock64_fn *copyto = copy_to_user_flock64;
10737 
10738 #ifdef TARGET_ARM
10739         if (!((CPUARMState *)cpu_env)->eabi) {
10740             copyfrom = copy_from_user_oabi_flock64;
10741             copyto = copy_to_user_oabi_flock64;
10742         }
10743 #endif
10744 
10745 	cmd = target_to_host_fcntl_cmd(arg2);
10746         if (cmd == -TARGET_EINVAL) {
10747             return cmd;
10748         }
10749 
10750         switch(arg2) {
10751         case TARGET_F_GETLK64:
10752             ret = copyfrom(&fl, arg3);
10753             if (ret) {
10754                 break;
10755             }
10756             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10757             if (ret == 0) {
10758                 ret = copyto(arg3, &fl);
10759             }
10760 	    break;
10761 
10762         case TARGET_F_SETLK64:
10763         case TARGET_F_SETLKW64:
10764             ret = copyfrom(&fl, arg3);
10765             if (ret) {
10766                 break;
10767             }
10768             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10769 	    break;
10770         default:
10771             ret = do_fcntl(arg1, arg2, arg3);
10772             break;
10773         }
10774         return ret;
10775     }
10776 #endif
10777 #ifdef TARGET_NR_cacheflush
10778     case TARGET_NR_cacheflush:
10779         /* self-modifying code is handled automatically, so nothing needed */
10780         return 0;
10781 #endif
10782 #ifdef TARGET_NR_getpagesize
10783     case TARGET_NR_getpagesize:
10784         return TARGET_PAGE_SIZE;
10785 #endif
10786     case TARGET_NR_gettid:
10787         return get_errno(sys_gettid());
10788 #ifdef TARGET_NR_readahead
10789     case TARGET_NR_readahead:
10790 #if TARGET_ABI_BITS == 32
10791         if (regpairs_aligned(cpu_env, num)) {
10792             arg2 = arg3;
10793             arg3 = arg4;
10794             arg4 = arg5;
10795         }
10796         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10797 #else
10798         ret = get_errno(readahead(arg1, arg2, arg3));
10799 #endif
10800         return ret;
10801 #endif
10802 #ifdef CONFIG_ATTR
10803 #ifdef TARGET_NR_setxattr
10804     case TARGET_NR_listxattr:
10805     case TARGET_NR_llistxattr:
10806     {
10807         void *p, *b = 0;
10808         if (arg2) {
10809             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10810             if (!b) {
10811                 return -TARGET_EFAULT;
10812             }
10813         }
10814         p = lock_user_string(arg1);
10815         if (p) {
10816             if (num == TARGET_NR_listxattr) {
10817                 ret = get_errno(listxattr(p, b, arg3));
10818             } else {
10819                 ret = get_errno(llistxattr(p, b, arg3));
10820             }
10821         } else {
10822             ret = -TARGET_EFAULT;
10823         }
10824         unlock_user(p, arg1, 0);
10825         unlock_user(b, arg2, arg3);
10826         return ret;
10827     }
10828     case TARGET_NR_flistxattr:
10829     {
10830         void *b = 0;
10831         if (arg2) {
10832             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10833             if (!b) {
10834                 return -TARGET_EFAULT;
10835             }
10836         }
10837         ret = get_errno(flistxattr(arg1, b, arg3));
10838         unlock_user(b, arg2, arg3);
10839         return ret;
10840     }
10841     case TARGET_NR_setxattr:
10842     case TARGET_NR_lsetxattr:
10843         {
10844             void *p, *n, *v = 0;
10845             if (arg3) {
10846                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10847                 if (!v) {
10848                     return -TARGET_EFAULT;
10849                 }
10850             }
10851             p = lock_user_string(arg1);
10852             n = lock_user_string(arg2);
10853             if (p && n) {
10854                 if (num == TARGET_NR_setxattr) {
10855                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10856                 } else {
10857                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10858                 }
10859             } else {
10860                 ret = -TARGET_EFAULT;
10861             }
10862             unlock_user(p, arg1, 0);
10863             unlock_user(n, arg2, 0);
10864             unlock_user(v, arg3, 0);
10865         }
10866         return ret;
10867     case TARGET_NR_fsetxattr:
10868         {
10869             void *n, *v = 0;
10870             if (arg3) {
10871                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10872                 if (!v) {
10873                     return -TARGET_EFAULT;
10874                 }
10875             }
10876             n = lock_user_string(arg2);
10877             if (n) {
10878                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10879             } else {
10880                 ret = -TARGET_EFAULT;
10881             }
10882             unlock_user(n, arg2, 0);
10883             unlock_user(v, arg3, 0);
10884         }
10885         return ret;
10886     case TARGET_NR_getxattr:
10887     case TARGET_NR_lgetxattr:
10888         {
10889             void *p, *n, *v = 0;
10890             if (arg3) {
10891                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10892                 if (!v) {
10893                     return -TARGET_EFAULT;
10894                 }
10895             }
10896             p = lock_user_string(arg1);
10897             n = lock_user_string(arg2);
10898             if (p && n) {
10899                 if (num == TARGET_NR_getxattr) {
10900                     ret = get_errno(getxattr(p, n, v, arg4));
10901                 } else {
10902                     ret = get_errno(lgetxattr(p, n, v, arg4));
10903                 }
10904             } else {
10905                 ret = -TARGET_EFAULT;
10906             }
10907             unlock_user(p, arg1, 0);
10908             unlock_user(n, arg2, 0);
10909             unlock_user(v, arg3, arg4);
10910         }
10911         return ret;
10912     case TARGET_NR_fgetxattr:
10913         {
10914             void *n, *v = 0;
10915             if (arg3) {
10916                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10917                 if (!v) {
10918                     return -TARGET_EFAULT;
10919                 }
10920             }
10921             n = lock_user_string(arg2);
10922             if (n) {
10923                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10924             } else {
10925                 ret = -TARGET_EFAULT;
10926             }
10927             unlock_user(n, arg2, 0);
10928             unlock_user(v, arg3, arg4);
10929         }
10930         return ret;
10931     case TARGET_NR_removexattr:
10932     case TARGET_NR_lremovexattr:
10933         {
10934             void *p, *n;
10935             p = lock_user_string(arg1);
10936             n = lock_user_string(arg2);
10937             if (p && n) {
10938                 if (num == TARGET_NR_removexattr) {
10939                     ret = get_errno(removexattr(p, n));
10940                 } else {
10941                     ret = get_errno(lremovexattr(p, n));
10942                 }
10943             } else {
10944                 ret = -TARGET_EFAULT;
10945             }
10946             unlock_user(p, arg1, 0);
10947             unlock_user(n, arg2, 0);
10948         }
10949         return ret;
10950     case TARGET_NR_fremovexattr:
10951         {
10952             void *n;
10953             n = lock_user_string(arg2);
10954             if (n) {
10955                 ret = get_errno(fremovexattr(arg1, n));
10956             } else {
10957                 ret = -TARGET_EFAULT;
10958             }
10959             unlock_user(n, arg2, 0);
10960         }
10961         return ret;
10962 #endif
10963 #endif /* CONFIG_ATTR */
10964 #ifdef TARGET_NR_set_thread_area
10965     case TARGET_NR_set_thread_area:
10966 #if defined(TARGET_MIPS)
10967       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10968       return 0;
10969 #elif defined(TARGET_CRIS)
10970       if (arg1 & 0xff)
10971           ret = -TARGET_EINVAL;
10972       else {
10973           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10974           ret = 0;
10975       }
10976       return ret;
10977 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10978       return do_set_thread_area(cpu_env, arg1);
10979 #elif defined(TARGET_M68K)
10980       {
10981           TaskState *ts = cpu->opaque;
10982           ts->tp_value = arg1;
10983           return 0;
10984       }
10985 #else
10986       return -TARGET_ENOSYS;
10987 #endif
10988 #endif
10989 #ifdef TARGET_NR_get_thread_area
10990     case TARGET_NR_get_thread_area:
10991 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10992         return do_get_thread_area(cpu_env, arg1);
10993 #elif defined(TARGET_M68K)
10994         {
10995             TaskState *ts = cpu->opaque;
10996             return ts->tp_value;
10997         }
10998 #else
10999         return -TARGET_ENOSYS;
11000 #endif
11001 #endif
11002 #ifdef TARGET_NR_getdomainname
11003     case TARGET_NR_getdomainname:
11004         return -TARGET_ENOSYS;
11005 #endif
11006 
11007 #ifdef TARGET_NR_clock_settime
11008     case TARGET_NR_clock_settime:
11009     {
11010         struct timespec ts;
11011 
11012         ret = target_to_host_timespec(&ts, arg2);
11013         if (!is_error(ret)) {
11014             ret = get_errno(clock_settime(arg1, &ts));
11015         }
11016         return ret;
11017     }
11018 #endif
11019 #ifdef TARGET_NR_clock_gettime
11020     case TARGET_NR_clock_gettime:
11021     {
11022         struct timespec ts;
11023         ret = get_errno(clock_gettime(arg1, &ts));
11024         if (!is_error(ret)) {
11025             ret = host_to_target_timespec(arg2, &ts);
11026         }
11027         return ret;
11028     }
11029 #endif
11030 #ifdef TARGET_NR_clock_getres
11031     case TARGET_NR_clock_getres:
11032     {
11033         struct timespec ts;
11034         ret = get_errno(clock_getres(arg1, &ts));
11035         if (!is_error(ret)) {
11036             host_to_target_timespec(arg2, &ts);
11037         }
11038         return ret;
11039     }
11040 #endif
11041 #ifdef TARGET_NR_clock_nanosleep
11042     case TARGET_NR_clock_nanosleep:
11043     {
11044         struct timespec ts;
11045         target_to_host_timespec(&ts, arg3);
11046         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11047                                              &ts, arg4 ? &ts : NULL));
11048         if (arg4)
11049             host_to_target_timespec(arg4, &ts);
11050 
11051 #if defined(TARGET_PPC)
11052         /* clock_nanosleep is odd in that it returns positive errno values.
11053          * On PPC, CR0 bit 3 should be set in such a situation. */
11054         if (ret && ret != -TARGET_ERESTARTSYS) {
11055             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11056         }
11057 #endif
11058         return ret;
11059     }
11060 #endif
11061 
11062 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11063     case TARGET_NR_set_tid_address:
11064         return get_errno(set_tid_address((int *)g2h(arg1)));
11065 #endif
11066 
11067     case TARGET_NR_tkill:
11068         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11069 
11070     case TARGET_NR_tgkill:
11071         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11072                          target_to_host_signal(arg3)));
11073 
11074 #ifdef TARGET_NR_set_robust_list
11075     case TARGET_NR_set_robust_list:
11076     case TARGET_NR_get_robust_list:
11077         /* The ABI for supporting robust futexes has userspace pass
11078          * the kernel a pointer to a linked list which is updated by
11079          * userspace after the syscall; the list is walked by the kernel
11080          * when the thread exits. Since the linked list in QEMU guest
11081          * memory isn't a valid linked list for the host and we have
11082          * no way to reliably intercept the thread-death event, we can't
11083          * support these. Silently return ENOSYS so that guest userspace
11084          * falls back to a non-robust futex implementation (which should
11085          * be OK except in the corner case of the guest crashing while
11086          * holding a mutex that is shared with another process via
11087          * shared memory).
11088          */
11089         return -TARGET_ENOSYS;
11090 #endif
11091 
11092 #if defined(TARGET_NR_utimensat)
11093     case TARGET_NR_utimensat:
11094         {
11095             struct timespec *tsp, ts[2];
11096             if (!arg3) {
11097                 tsp = NULL;
11098             } else {
11099                 target_to_host_timespec(ts, arg3);
11100                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11101                 tsp = ts;
11102             }
11103             if (!arg2)
11104                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11105             else {
11106                 if (!(p = lock_user_string(arg2))) {
11107                     return -TARGET_EFAULT;
11108                 }
11109                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11110                 unlock_user(p, arg2, 0);
11111             }
11112         }
11113         return ret;
11114 #endif
11115     case TARGET_NR_futex:
11116         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11117 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11118     case TARGET_NR_inotify_init:
11119         ret = get_errno(sys_inotify_init());
11120         if (ret >= 0) {
11121             fd_trans_register(ret, &target_inotify_trans);
11122         }
11123         return ret;
11124 #endif
11125 #ifdef CONFIG_INOTIFY1
11126 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11127     case TARGET_NR_inotify_init1:
11128         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11129                                           fcntl_flags_tbl)));
11130         if (ret >= 0) {
11131             fd_trans_register(ret, &target_inotify_trans);
11132         }
11133         return ret;
11134 #endif
11135 #endif
11136 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11137     case TARGET_NR_inotify_add_watch:
11138         p = lock_user_string(arg2);
11139         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11140         unlock_user(p, arg2, 0);
11141         return ret;
11142 #endif
11143 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11144     case TARGET_NR_inotify_rm_watch:
11145         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11146 #endif
11147 
11148 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11149     case TARGET_NR_mq_open:
11150         {
11151             struct mq_attr posix_mq_attr;
11152             struct mq_attr *pposix_mq_attr;
11153             int host_flags;
11154 
11155             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11156             pposix_mq_attr = NULL;
11157             if (arg4) {
11158                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11159                     return -TARGET_EFAULT;
11160                 }
11161                 pposix_mq_attr = &posix_mq_attr;
11162             }
11163             p = lock_user_string(arg1 - 1);
11164             if (!p) {
11165                 return -TARGET_EFAULT;
11166             }
11167             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11168             unlock_user (p, arg1, 0);
11169         }
11170         return ret;
11171 
11172     case TARGET_NR_mq_unlink:
11173         p = lock_user_string(arg1 - 1);
11174         if (!p) {
11175             return -TARGET_EFAULT;
11176         }
11177         ret = get_errno(mq_unlink(p));
11178         unlock_user (p, arg1, 0);
11179         return ret;
11180 
11181     case TARGET_NR_mq_timedsend:
11182         {
11183             struct timespec ts;
11184 
11185             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11186             if (arg5 != 0) {
11187                 target_to_host_timespec(&ts, arg5);
11188                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11189                 host_to_target_timespec(arg5, &ts);
11190             } else {
11191                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11192             }
11193             unlock_user (p, arg2, arg3);
11194         }
11195         return ret;
11196 
11197     case TARGET_NR_mq_timedreceive:
11198         {
11199             struct timespec ts;
11200             unsigned int prio;
11201 
11202             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11203             if (arg5 != 0) {
11204                 target_to_host_timespec(&ts, arg5);
11205                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11206                                                      &prio, &ts));
11207                 host_to_target_timespec(arg5, &ts);
11208             } else {
11209                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11210                                                      &prio, NULL));
11211             }
11212             unlock_user (p, arg2, arg3);
11213             if (arg4 != 0)
11214                 put_user_u32(prio, arg4);
11215         }
11216         return ret;
11217 
11218     /* Not implemented for now... */
11219 /*     case TARGET_NR_mq_notify: */
11220 /*         break; */
11221 
11222     case TARGET_NR_mq_getsetattr:
11223         {
11224             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11225             ret = 0;
11226             if (arg2 != 0) {
11227                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11228                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11229                                            &posix_mq_attr_out));
11230             } else if (arg3 != 0) {
11231                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11232             }
11233             if (ret == 0 && arg3 != 0) {
11234                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11235             }
11236         }
11237         return ret;
11238 #endif
11239 
11240 #ifdef CONFIG_SPLICE
11241 #ifdef TARGET_NR_tee
11242     case TARGET_NR_tee:
11243         {
11244             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11245         }
11246         return ret;
11247 #endif
11248 #ifdef TARGET_NR_splice
11249     case TARGET_NR_splice:
11250         {
11251             loff_t loff_in, loff_out;
11252             loff_t *ploff_in = NULL, *ploff_out = NULL;
11253             if (arg2) {
11254                 if (get_user_u64(loff_in, arg2)) {
11255                     return -TARGET_EFAULT;
11256                 }
11257                 ploff_in = &loff_in;
11258             }
11259             if (arg4) {
11260                 if (get_user_u64(loff_out, arg4)) {
11261                     return -TARGET_EFAULT;
11262                 }
11263                 ploff_out = &loff_out;
11264             }
11265             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11266             if (arg2) {
11267                 if (put_user_u64(loff_in, arg2)) {
11268                     return -TARGET_EFAULT;
11269                 }
11270             }
11271             if (arg4) {
11272                 if (put_user_u64(loff_out, arg4)) {
11273                     return -TARGET_EFAULT;
11274                 }
11275             }
11276         }
11277         return ret;
11278 #endif
11279 #ifdef TARGET_NR_vmsplice
11280 	case TARGET_NR_vmsplice:
11281         {
11282             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11283             if (vec != NULL) {
11284                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11285                 unlock_iovec(vec, arg2, arg3, 0);
11286             } else {
11287                 ret = -host_to_target_errno(errno);
11288             }
11289         }
11290         return ret;
11291 #endif
11292 #endif /* CONFIG_SPLICE */
11293 #ifdef CONFIG_EVENTFD
11294 #if defined(TARGET_NR_eventfd)
11295     case TARGET_NR_eventfd:
11296         ret = get_errno(eventfd(arg1, 0));
11297         if (ret >= 0) {
11298             fd_trans_register(ret, &target_eventfd_trans);
11299         }
11300         return ret;
11301 #endif
11302 #if defined(TARGET_NR_eventfd2)
11303     case TARGET_NR_eventfd2:
11304     {
11305         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11306         if (arg2 & TARGET_O_NONBLOCK) {
11307             host_flags |= O_NONBLOCK;
11308         }
11309         if (arg2 & TARGET_O_CLOEXEC) {
11310             host_flags |= O_CLOEXEC;
11311         }
11312         ret = get_errno(eventfd(arg1, host_flags));
11313         if (ret >= 0) {
11314             fd_trans_register(ret, &target_eventfd_trans);
11315         }
11316         return ret;
11317     }
11318 #endif
11319 #endif /* CONFIG_EVENTFD  */
11320 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11321     case TARGET_NR_fallocate:
11322 #if TARGET_ABI_BITS == 32
11323         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11324                                   target_offset64(arg5, arg6)));
11325 #else
11326         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11327 #endif
11328         return ret;
11329 #endif
11330 #if defined(CONFIG_SYNC_FILE_RANGE)
11331 #if defined(TARGET_NR_sync_file_range)
11332     case TARGET_NR_sync_file_range:
11333 #if TARGET_ABI_BITS == 32
11334 #if defined(TARGET_MIPS)
11335         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11336                                         target_offset64(arg5, arg6), arg7));
11337 #else
11338         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11339                                         target_offset64(arg4, arg5), arg6));
11340 #endif /* !TARGET_MIPS */
11341 #else
11342         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11343 #endif
11344         return ret;
11345 #endif
11346 #if defined(TARGET_NR_sync_file_range2)
11347     case TARGET_NR_sync_file_range2:
11348         /* This is like sync_file_range but the arguments are reordered */
11349 #if TARGET_ABI_BITS == 32
11350         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11351                                         target_offset64(arg5, arg6), arg2));
11352 #else
11353         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11354 #endif
11355         return ret;
11356 #endif
11357 #endif
11358 #if defined(TARGET_NR_signalfd4)
11359     case TARGET_NR_signalfd4:
11360         return do_signalfd4(arg1, arg2, arg4);
11361 #endif
11362 #if defined(TARGET_NR_signalfd)
11363     case TARGET_NR_signalfd:
11364         return do_signalfd4(arg1, arg2, 0);
11365 #endif
11366 #if defined(CONFIG_EPOLL)
11367 #if defined(TARGET_NR_epoll_create)
11368     case TARGET_NR_epoll_create:
11369         return get_errno(epoll_create(arg1));
11370 #endif
11371 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11372     case TARGET_NR_epoll_create1:
11373         return get_errno(epoll_create1(arg1));
11374 #endif
11375 #if defined(TARGET_NR_epoll_ctl)
11376     case TARGET_NR_epoll_ctl:
11377     {
11378         struct epoll_event ep;
11379         struct epoll_event *epp = 0;
11380         if (arg4) {
11381             struct target_epoll_event *target_ep;
11382             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11383                 return -TARGET_EFAULT;
11384             }
11385             ep.events = tswap32(target_ep->events);
11386             /* The epoll_data_t union is just opaque data to the kernel,
11387              * so we transfer all 64 bits across and need not worry what
11388              * actual data type it is.
11389              */
11390             ep.data.u64 = tswap64(target_ep->data.u64);
11391             unlock_user_struct(target_ep, arg4, 0);
11392             epp = &ep;
11393         }
11394         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11395     }
11396 #endif
11397 
11398 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11399 #if defined(TARGET_NR_epoll_wait)
11400     case TARGET_NR_epoll_wait:
11401 #endif
11402 #if defined(TARGET_NR_epoll_pwait)
11403     case TARGET_NR_epoll_pwait:
11404 #endif
11405     {
11406         struct target_epoll_event *target_ep;
11407         struct epoll_event *ep;
11408         int epfd = arg1;
11409         int maxevents = arg3;
11410         int timeout = arg4;
11411 
11412         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11413             return -TARGET_EINVAL;
11414         }
11415 
11416         target_ep = lock_user(VERIFY_WRITE, arg2,
11417                               maxevents * sizeof(struct target_epoll_event), 1);
11418         if (!target_ep) {
11419             return -TARGET_EFAULT;
11420         }
11421 
11422         ep = g_try_new(struct epoll_event, maxevents);
11423         if (!ep) {
11424             unlock_user(target_ep, arg2, 0);
11425             return -TARGET_ENOMEM;
11426         }
11427 
11428         switch (num) {
11429 #if defined(TARGET_NR_epoll_pwait)
11430         case TARGET_NR_epoll_pwait:
11431         {
11432             target_sigset_t *target_set;
11433             sigset_t _set, *set = &_set;
11434 
11435             if (arg5) {
11436                 if (arg6 != sizeof(target_sigset_t)) {
11437                     ret = -TARGET_EINVAL;
11438                     break;
11439                 }
11440 
11441                 target_set = lock_user(VERIFY_READ, arg5,
11442                                        sizeof(target_sigset_t), 1);
11443                 if (!target_set) {
11444                     ret = -TARGET_EFAULT;
11445                     break;
11446                 }
11447                 target_to_host_sigset(set, target_set);
11448                 unlock_user(target_set, arg5, 0);
11449             } else {
11450                 set = NULL;
11451             }
11452 
11453             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11454                                              set, SIGSET_T_SIZE));
11455             break;
11456         }
11457 #endif
11458 #if defined(TARGET_NR_epoll_wait)
11459         case TARGET_NR_epoll_wait:
11460             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11461                                              NULL, 0));
11462             break;
11463 #endif
11464         default:
11465             ret = -TARGET_ENOSYS;
11466         }
11467         if (!is_error(ret)) {
11468             int i;
11469             for (i = 0; i < ret; i++) {
11470                 target_ep[i].events = tswap32(ep[i].events);
11471                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11472             }
11473             unlock_user(target_ep, arg2,
11474                         ret * sizeof(struct target_epoll_event));
11475         } else {
11476             unlock_user(target_ep, arg2, 0);
11477         }
11478         g_free(ep);
11479         return ret;
11480     }
11481 #endif
11482 #endif
11483 #ifdef TARGET_NR_prlimit64
11484     case TARGET_NR_prlimit64:
11485     {
11486         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11487         struct target_rlimit64 *target_rnew, *target_rold;
11488         struct host_rlimit64 rnew, rold, *rnewp = 0;
11489         int resource = target_to_host_resource(arg2);
11490         if (arg3) {
11491             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11492                 return -TARGET_EFAULT;
11493             }
11494             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11495             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11496             unlock_user_struct(target_rnew, arg3, 0);
11497             rnewp = &rnew;
11498         }
11499 
11500         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11501         if (!is_error(ret) && arg4) {
11502             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11503                 return -TARGET_EFAULT;
11504             }
11505             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11506             target_rold->rlim_max = tswap64(rold.rlim_max);
11507             unlock_user_struct(target_rold, arg4, 1);
11508         }
11509         return ret;
11510     }
11511 #endif
11512 #ifdef TARGET_NR_gethostname
11513     case TARGET_NR_gethostname:
11514     {
11515         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11516         if (name) {
11517             ret = get_errno(gethostname(name, arg2));
11518             unlock_user(name, arg1, arg2);
11519         } else {
11520             ret = -TARGET_EFAULT;
11521         }
11522         return ret;
11523     }
11524 #endif
11525 #ifdef TARGET_NR_atomic_cmpxchg_32
11526     case TARGET_NR_atomic_cmpxchg_32:
11527     {
11528         /* should use start_exclusive from main.c */
11529         abi_ulong mem_value;
11530         if (get_user_u32(mem_value, arg6)) {
11531             target_siginfo_t info;
11532             info.si_signo = SIGSEGV;
11533             info.si_errno = 0;
11534             info.si_code = TARGET_SEGV_MAPERR;
11535             info._sifields._sigfault._addr = arg6;
11536             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11537                          QEMU_SI_FAULT, &info);
11538             ret = 0xdeadbeef;
11539 
11540         }
11541         if (mem_value == arg2)
11542             put_user_u32(arg1, arg6);
11543         return mem_value;
11544     }
11545 #endif
11546 #ifdef TARGET_NR_atomic_barrier
11547     case TARGET_NR_atomic_barrier:
11548         /* Like the kernel implementation and the
11549            qemu arm barrier, no-op this? */
11550         return 0;
11551 #endif
11552 
11553 #ifdef TARGET_NR_timer_create
11554     case TARGET_NR_timer_create:
11555     {
11556         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11557 
11558         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11559 
11560         int clkid = arg1;
11561         int timer_index = next_free_host_timer();
11562 
11563         if (timer_index < 0) {
11564             ret = -TARGET_EAGAIN;
11565         } else {
11566             timer_t *phtimer = g_posix_timers  + timer_index;
11567 
11568             if (arg2) {
11569                 phost_sevp = &host_sevp;
11570                 ret = target_to_host_sigevent(phost_sevp, arg2);
11571                 if (ret != 0) {
11572                     return ret;
11573                 }
11574             }
11575 
11576             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11577             if (ret) {
11578                 phtimer = NULL;
11579             } else {
11580                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11581                     return -TARGET_EFAULT;
11582                 }
11583             }
11584         }
11585         return ret;
11586     }
11587 #endif
11588 
11589 #ifdef TARGET_NR_timer_settime
11590     case TARGET_NR_timer_settime:
11591     {
11592         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11593          * struct itimerspec * old_value */
11594         target_timer_t timerid = get_timer_id(arg1);
11595 
11596         if (timerid < 0) {
11597             ret = timerid;
11598         } else if (arg3 == 0) {
11599             ret = -TARGET_EINVAL;
11600         } else {
11601             timer_t htimer = g_posix_timers[timerid];
11602             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11603 
11604             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11605                 return -TARGET_EFAULT;
11606             }
11607             ret = get_errno(
11608                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11609             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11610                 return -TARGET_EFAULT;
11611             }
11612         }
11613         return ret;
11614     }
11615 #endif
11616 
11617 #ifdef TARGET_NR_timer_gettime
11618     case TARGET_NR_timer_gettime:
11619     {
11620         /* args: timer_t timerid, struct itimerspec *curr_value */
11621         target_timer_t timerid = get_timer_id(arg1);
11622 
11623         if (timerid < 0) {
11624             ret = timerid;
11625         } else if (!arg2) {
11626             ret = -TARGET_EFAULT;
11627         } else {
11628             timer_t htimer = g_posix_timers[timerid];
11629             struct itimerspec hspec;
11630             ret = get_errno(timer_gettime(htimer, &hspec));
11631 
11632             if (host_to_target_itimerspec(arg2, &hspec)) {
11633                 ret = -TARGET_EFAULT;
11634             }
11635         }
11636         return ret;
11637     }
11638 #endif
11639 
11640 #ifdef TARGET_NR_timer_getoverrun
11641     case TARGET_NR_timer_getoverrun:
11642     {
11643         /* args: timer_t timerid */
11644         target_timer_t timerid = get_timer_id(arg1);
11645 
11646         if (timerid < 0) {
11647             ret = timerid;
11648         } else {
11649             timer_t htimer = g_posix_timers[timerid];
11650             ret = get_errno(timer_getoverrun(htimer));
11651         }
11652         fd_trans_unregister(ret);
11653         return ret;
11654     }
11655 #endif
11656 
11657 #ifdef TARGET_NR_timer_delete
11658     case TARGET_NR_timer_delete:
11659     {
11660         /* args: timer_t timerid */
11661         target_timer_t timerid = get_timer_id(arg1);
11662 
11663         if (timerid < 0) {
11664             ret = timerid;
11665         } else {
11666             timer_t htimer = g_posix_timers[timerid];
11667             ret = get_errno(timer_delete(htimer));
11668             g_posix_timers[timerid] = 0;
11669         }
11670         return ret;
11671     }
11672 #endif
11673 
11674 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11675     case TARGET_NR_timerfd_create:
11676         return get_errno(timerfd_create(arg1,
11677                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11678 #endif
11679 
11680 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11681     case TARGET_NR_timerfd_gettime:
11682         {
11683             struct itimerspec its_curr;
11684 
11685             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11686 
11687             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11688                 return -TARGET_EFAULT;
11689             }
11690         }
11691         return ret;
11692 #endif
11693 
11694 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11695     case TARGET_NR_timerfd_settime:
11696         {
11697             struct itimerspec its_new, its_old, *p_new;
11698 
11699             if (arg3) {
11700                 if (target_to_host_itimerspec(&its_new, arg3)) {
11701                     return -TARGET_EFAULT;
11702                 }
11703                 p_new = &its_new;
11704             } else {
11705                 p_new = NULL;
11706             }
11707 
11708             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11709 
11710             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11711                 return -TARGET_EFAULT;
11712             }
11713         }
11714         return ret;
11715 #endif
11716 
11717 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11718     case TARGET_NR_ioprio_get:
11719         return get_errno(ioprio_get(arg1, arg2));
11720 #endif
11721 
11722 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11723     case TARGET_NR_ioprio_set:
11724         return get_errno(ioprio_set(arg1, arg2, arg3));
11725 #endif
11726 
11727 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11728     case TARGET_NR_setns:
11729         return get_errno(setns(arg1, arg2));
11730 #endif
11731 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11732     case TARGET_NR_unshare:
11733         return get_errno(unshare(arg1));
11734 #endif
11735 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11736     case TARGET_NR_kcmp:
11737         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11738 #endif
11739 #ifdef TARGET_NR_swapcontext
11740     case TARGET_NR_swapcontext:
11741         /* PowerPC specific.  */
11742         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11743 #endif
11744 
11745     default:
11746         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11747         return -TARGET_ENOSYS;
11748     }
11749     return ret;
11750 }
11751 
11752 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11753                     abi_long arg2, abi_long arg3, abi_long arg4,
11754                     abi_long arg5, abi_long arg6, abi_long arg7,
11755                     abi_long arg8)
11756 {
11757     CPUState *cpu = env_cpu(cpu_env);
11758     abi_long ret;
11759 
11760 #ifdef DEBUG_ERESTARTSYS
11761     /* Debug-only code for exercising the syscall-restart code paths
11762      * in the per-architecture cpu main loops: restart every syscall
11763      * the guest makes once before letting it through.
11764      */
11765     {
11766         static bool flag;
11767         flag = !flag;
11768         if (flag) {
11769             return -TARGET_ERESTARTSYS;
11770         }
11771     }
11772 #endif
11773 
11774     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11775                              arg5, arg6, arg7, arg8);
11776 
11777     if (unlikely(do_strace)) {
11778         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11779         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11780                           arg5, arg6, arg7, arg8);
11781         print_syscall_ret(num, ret);
11782     } else {
11783         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11784                           arg5, arg6, arg7, arg8);
11785     }
11786 
11787     trace_guest_user_syscall_ret(cpu, num, ret);
11788     return ret;
11789 }
11790