xref: /openbmc/qemu/linux-user/syscall.c (revision 5ee5c14c)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "fd-trans.h"
111 
112 #ifndef CLONE_IO
113 #define CLONE_IO                0x80000000      /* Clone io context */
114 #endif
115 
116 /* We can't directly call the host clone syscall, because this will
117  * badly confuse libc (breaking mutexes, for example). So we must
118  * divide clone flags into:
119  *  * flag combinations that look like pthread_create()
120  *  * flag combinations that look like fork()
121  *  * flags we can implement within QEMU itself
122  *  * flags we can't support and will return an error for
123  */
124 /* For thread creation, all these flags must be present; for
125  * fork, none must be present.
126  */
127 #define CLONE_THREAD_FLAGS                              \
128     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
129      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
130 
131 /* These flags are ignored:
132  * CLONE_DETACHED is now ignored by the kernel;
133  * CLONE_IO is just an optimisation hint to the I/O scheduler
134  */
135 #define CLONE_IGNORED_FLAGS                     \
136     (CLONE_DETACHED | CLONE_IO)
137 
138 /* Flags for fork which we can implement within QEMU itself */
139 #define CLONE_OPTIONAL_FORK_FLAGS               \
140     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
141      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
142 
143 /* Flags for thread creation which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
145     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
146      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
147 
148 #define CLONE_INVALID_FORK_FLAGS                                        \
149     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
150 
151 #define CLONE_INVALID_THREAD_FLAGS                                      \
152     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
153        CLONE_IGNORED_FLAGS))
154 
155 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
156  * have almost all been allocated. We cannot support any of
157  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
158  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
159  * The checks against the invalid thread masks above will catch these.
160  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
161  */
162 
163 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
164  * once. This exercises the codepaths for restart.
165  */
166 //#define DEBUG_ERESTARTSYS
167 
168 //#include <linux/msdos_fs.h>
169 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
170 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
171 
172 #undef _syscall0
173 #undef _syscall1
174 #undef _syscall2
175 #undef _syscall3
176 #undef _syscall4
177 #undef _syscall5
178 #undef _syscall6
179 
180 #define _syscall0(type,name)		\
181 static type name (void)			\
182 {					\
183 	return syscall(__NR_##name);	\
184 }
185 
186 #define _syscall1(type,name,type1,arg1)		\
187 static type name (type1 arg1)			\
188 {						\
189 	return syscall(__NR_##name, arg1);	\
190 }
191 
192 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
193 static type name (type1 arg1,type2 arg2)		\
194 {							\
195 	return syscall(__NR_##name, arg1, arg2);	\
196 }
197 
198 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
199 static type name (type1 arg1,type2 arg2,type3 arg3)		\
200 {								\
201 	return syscall(__NR_##name, arg1, arg2, arg3);		\
202 }
203 
204 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
205 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
206 {										\
207 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
208 }
209 
210 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
211 		  type5,arg5)							\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
215 }
216 
217 
218 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
219 		  type5,arg5,type6,arg6)					\
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
221                   type6 arg6)							\
222 {										\
223 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
224 }
225 
226 
227 #define __NR_sys_uname __NR_uname
228 #define __NR_sys_getcwd1 __NR_getcwd
229 #define __NR_sys_getdents __NR_getdents
230 #define __NR_sys_getdents64 __NR_getdents64
231 #define __NR_sys_getpriority __NR_getpriority
232 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
233 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
234 #define __NR_sys_syslog __NR_syslog
235 #define __NR_sys_futex __NR_futex
236 #define __NR_sys_inotify_init __NR_inotify_init
237 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
238 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
239 
240 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
241 #define __NR__llseek __NR_lseek
242 #endif
243 
244 /* Newer kernel ports have llseek() instead of _llseek() */
245 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
246 #define TARGET_NR__llseek TARGET_NR_llseek
247 #endif
248 
249 #define __NR_sys_gettid __NR_gettid
250 _syscall0(int, sys_gettid)
251 
252 /* For the 64-bit guest on 32-bit host case we must emulate
253  * getdents using getdents64, because otherwise the host
254  * might hand us back more dirent records than we can fit
255  * into the guest buffer after structure format conversion.
256  * Otherwise we emulate getdents with getdents if the host has it.
257  */
258 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
259 #define EMULATE_GETDENTS_WITH_GETDENTS
260 #endif
261 
262 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if (defined(TARGET_NR_getdents) && \
266       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
267     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #endif
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
272           loff_t *, res, uint, wh);
273 #endif
274 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
275 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276           siginfo_t *, uinfo)
277 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
278 #ifdef __NR_exit_group
279 _syscall1(int,exit_group,int,error_code)
280 #endif
281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
282 _syscall1(int,set_tid_address,int *,tidptr)
283 #endif
284 #if defined(TARGET_NR_futex) && defined(__NR_futex)
285 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
286           const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #endif
288 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
289 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
290           unsigned long *, user_mask_ptr);
291 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
292 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
293           unsigned long *, user_mask_ptr);
294 #define __NR_sys_getcpu __NR_getcpu
295 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
296 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297           void *, arg);
298 _syscall2(int, capget, struct __user_cap_header_struct *, header,
299           struct __user_cap_data_struct *, data);
300 _syscall2(int, capset, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
303 _syscall2(int, ioprio_get, int, which, int, who)
304 #endif
305 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
306 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #endif
308 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
309 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
310 #endif
311 
312 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
313 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
314           unsigned long, idx1, unsigned long, idx2)
315 #endif
316 
317 static bitmask_transtbl fcntl_flags_tbl[] = {
318   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
319   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
320   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
321   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
322   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
323   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
324   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
325   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
326   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
327   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
328   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
329   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
330   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
331 #if defined(O_DIRECT)
332   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
333 #endif
334 #if defined(O_NOATIME)
335   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
336 #endif
337 #if defined(O_CLOEXEC)
338   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
339 #endif
340 #if defined(O_PATH)
341   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
342 #endif
343 #if defined(O_TMPFILE)
344   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
345 #endif
346   /* Don't terminate the list prematurely on 64-bit host+guest.  */
347 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
348   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
349 #endif
350   { 0, 0, 0, 0 }
351 };
352 
353 static int sys_getcwd1(char *buf, size_t size)
354 {
355   if (getcwd(buf, size) == NULL) {
356       /* getcwd() sets errno */
357       return (-1);
358   }
359   return strlen(buf)+1;
360 }
361 
362 #ifdef TARGET_NR_utimensat
363 #if defined(__NR_utimensat)
364 #define __NR_sys_utimensat __NR_utimensat
365 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
366           const struct timespec *,tsp,int,flags)
367 #else
368 static int sys_utimensat(int dirfd, const char *pathname,
369                          const struct timespec times[2], int flags)
370 {
371     errno = ENOSYS;
372     return -1;
373 }
374 #endif
375 #endif /* TARGET_NR_utimensat */
376 
377 #ifdef TARGET_NR_renameat2
378 #if defined(__NR_renameat2)
379 #define __NR_sys_renameat2 __NR_renameat2
380 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
381           const char *, new, unsigned int, flags)
382 #else
383 static int sys_renameat2(int oldfd, const char *old,
384                          int newfd, const char *new, int flags)
385 {
386     if (flags == 0) {
387         return renameat(oldfd, old, newfd, new);
388     }
389     errno = ENOSYS;
390     return -1;
391 }
392 #endif
393 #endif /* TARGET_NR_renameat2 */
394 
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
397 
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
400 {
401   return (inotify_init());
402 }
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
406 {
407   return (inotify_add_watch(fd, pathname, mask));
408 }
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
412 {
413   return (inotify_rm_watch(fd, wd));
414 }
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
419 {
420   return (inotify_init1(flags));
421 }
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY  */
431 
432 #if defined(TARGET_NR_prlimit64)
433 #ifndef __NR_prlimit64
434 # define __NR_prlimit64 -1
435 #endif
436 #define __NR_sys_prlimit64 __NR_prlimit64
437 /* The glibc rlimit structure may not be that used by the underlying syscall */
438 struct host_rlimit64 {
439     uint64_t rlim_cur;
440     uint64_t rlim_max;
441 };
442 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
443           const struct host_rlimit64 *, new_limit,
444           struct host_rlimit64 *, old_limit)
445 #endif
446 
447 
448 #if defined(TARGET_NR_timer_create)
449 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
450 static timer_t g_posix_timers[32] = { 0, } ;
451 
452 static inline int next_free_host_timer(void)
453 {
454     int k ;
455     /* FIXME: Does finding the next free slot require a lock? */
456     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
457         if (g_posix_timers[k] == 0) {
458             g_posix_timers[k] = (timer_t) 1;
459             return k;
460         }
461     }
462     return -1;
463 }
464 #endif
465 
466 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
467 #ifdef TARGET_ARM
468 static inline int regpairs_aligned(void *cpu_env, int num)
469 {
470     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
471 }
472 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
473 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476  * of registers which translates to the same as ARM/MIPS, because we start with
477  * r3 as arg1 */
478 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
479 #elif defined(TARGET_SH4)
480 /* SH4 doesn't align register pairs, except for p{read,write}64 */
481 static inline int regpairs_aligned(void *cpu_env, int num)
482 {
483     switch (num) {
484     case TARGET_NR_pread64:
485     case TARGET_NR_pwrite64:
486         return 1;
487 
488     default:
489         return 0;
490     }
491 }
492 #elif defined(TARGET_XTENSA)
493 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
494 #else
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
721               struct rusage *, rusage)
722 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
723               int, options, struct rusage *, rusage)
724 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
725 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
726               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
727 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
728               struct timespec *, tsp, const sigset_t *, sigmask,
729               size_t, sigsetsize)
730 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
731               int, maxevents, int, timeout, const sigset_t *, sigmask,
732               size_t, sigsetsize)
733 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
734               const struct timespec *,timeout,int *,uaddr2,int,val3)
735 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
736 safe_syscall2(int, kill, pid_t, pid, int, sig)
737 safe_syscall2(int, tkill, int, tid, int, sig)
738 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
739 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
740 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
741 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
742               unsigned long, pos_l, unsigned long, pos_h)
743 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
746               socklen_t, addrlen)
747 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
748               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
749 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
750               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
751 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
752 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
753 safe_syscall2(int, flock, int, fd, int, operation)
754 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
755               const struct timespec *, uts, size_t, sigsetsize)
756 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
757               int, flags)
758 safe_syscall2(int, nanosleep, const struct timespec *, req,
759               struct timespec *, rem)
760 #ifdef TARGET_NR_clock_nanosleep
761 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
762               const struct timespec *, req, struct timespec *, rem)
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
768               long, msgtype, int, flags)
769 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
770               unsigned, nsops, const struct timespec *, timeout)
771 #else
772 /* This host kernel architecture uses a single ipc syscall; fake up
773  * wrappers for the sub-operations to hide this implementation detail.
774  * Annoyingly we can't include linux/ipc.h to get the constant definitions
775  * for the call parameter because some structs in there conflict with the
776  * sys/ipc.h ones. So we just define them here, and rely on them being
777  * the same for all host architectures.
778  */
779 #define Q_SEMTIMEDOP 4
780 #define Q_MSGSND 11
781 #define Q_MSGRCV 12
782 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
783 
784 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
785               void *, ptr, long, fifth)
786 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
787 {
788     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
789 }
790 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
791 {
792     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
793 }
794 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
795                            const struct timespec *timeout)
796 {
797     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
798                     (long)timeout);
799 }
800 #endif
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
803               size_t, len, unsigned, prio, const struct timespec *, timeout)
804 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
805               size_t, len, unsigned *, prio, const struct timespec *, timeout)
806 #endif
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808  * "third argument might be integer or pointer or not present" behaviour of
809  * the libc function.
810  */
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814  *  use the flock64 struct rather than unsuffixed flock
815  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
816  */
817 #ifdef __NR_fcntl64
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
819 #else
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
821 #endif
822 
823 static inline int host_to_target_sock_type(int host_type)
824 {
825     int target_type;
826 
827     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
828     case SOCK_DGRAM:
829         target_type = TARGET_SOCK_DGRAM;
830         break;
831     case SOCK_STREAM:
832         target_type = TARGET_SOCK_STREAM;
833         break;
834     default:
835         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
836         break;
837     }
838 
839 #if defined(SOCK_CLOEXEC)
840     if (host_type & SOCK_CLOEXEC) {
841         target_type |= TARGET_SOCK_CLOEXEC;
842     }
843 #endif
844 
845 #if defined(SOCK_NONBLOCK)
846     if (host_type & SOCK_NONBLOCK) {
847         target_type |= TARGET_SOCK_NONBLOCK;
848     }
849 #endif
850 
851     return target_type;
852 }
853 
854 static abi_ulong target_brk;
855 static abi_ulong target_original_brk;
856 static abi_ulong brk_page;
857 
858 void target_set_brk(abi_ulong new_brk)
859 {
860     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
861     brk_page = HOST_PAGE_ALIGN(target_brk);
862 }
863 
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
866 
867 /* do_brk() must return target values and target errnos. */
868 abi_long do_brk(abi_ulong new_brk)
869 {
870     abi_long mapped_addr;
871     abi_ulong new_alloc_size;
872 
873     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
874 
875     if (!new_brk) {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
877         return target_brk;
878     }
879     if (new_brk < target_original_brk) {
880         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
881                    target_brk);
882         return target_brk;
883     }
884 
885     /* If the new brk is less than the highest page reserved to the
886      * target heap allocation, set it and we're almost done...  */
887     if (new_brk <= brk_page) {
888         /* Heap contents are initialized to zero, as for anonymous
889          * mapped pages.  */
890         if (new_brk > target_brk) {
891             memset(g2h(target_brk), 0, new_brk - target_brk);
892         }
893 	target_brk = new_brk;
894         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
895 	return target_brk;
896     }
897 
898     /* We need to allocate more memory after the brk... Note that
899      * we don't use MAP_FIXED because that will map over the top of
900      * any existing mapping (like the one with the host libc or qemu
901      * itself); instead we treat "mapped but at wrong address" as
902      * a failure and unmap again.
903      */
904     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
905     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
906                                         PROT_READ|PROT_WRITE,
907                                         MAP_ANON|MAP_PRIVATE, 0, 0));
908 
909     if (mapped_addr == brk_page) {
910         /* Heap contents are initialized to zero, as for anonymous
911          * mapped pages.  Technically the new pages are already
912          * initialized to zero since they *are* anonymous mapped
913          * pages, however we have to take care with the contents that
914          * come from the remaining part of the previous page: it may
915          * contains garbage data due to a previous heap usage (grown
916          * then shrunken).  */
917         memset(g2h(target_brk), 0, brk_page - target_brk);
918 
919         target_brk = new_brk;
920         brk_page = HOST_PAGE_ALIGN(target_brk);
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
922             target_brk);
923         return target_brk;
924     } else if (mapped_addr != -1) {
925         /* Mapped but at wrong address, meaning there wasn't actually
926          * enough space for this brk.
927          */
928         target_munmap(mapped_addr, new_alloc_size);
929         mapped_addr = -1;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
931     }
932     else {
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
934     }
935 
936 #if defined(TARGET_ALPHA)
937     /* We (partially) emulate OSF/1 on Alpha, which requires we
938        return a proper errno, not an unchanged brk value.  */
939     return -TARGET_ENOMEM;
940 #endif
941     /* For everything else, return the previous break. */
942     return target_brk;
943 }
944 
945 static inline abi_long copy_from_user_fdset(fd_set *fds,
946                                             abi_ulong target_fds_addr,
947                                             int n)
948 {
949     int i, nw, j, k;
950     abi_ulong b, *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_READ,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  1)))
957         return -TARGET_EFAULT;
958 
959     FD_ZERO(fds);
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         /* grab the abi_ulong */
963         __get_user(b, &target_fds[i]);
964         for (j = 0; j < TARGET_ABI_BITS; j++) {
965             /* check the bit inside the abi_ulong */
966             if ((b >> j) & 1)
967                 FD_SET(k, fds);
968             k++;
969         }
970     }
971 
972     unlock_user(target_fds, target_fds_addr, 0);
973 
974     return 0;
975 }
976 
977 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
978                                                  abi_ulong target_fds_addr,
979                                                  int n)
980 {
981     if (target_fds_addr) {
982         if (copy_from_user_fdset(fds, target_fds_addr, n))
983             return -TARGET_EFAULT;
984         *fds_ptr = fds;
985     } else {
986         *fds_ptr = NULL;
987     }
988     return 0;
989 }
990 
991 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
992                                           const fd_set *fds,
993                                           int n)
994 {
995     int i, nw, j, k;
996     abi_long v;
997     abi_ulong *target_fds;
998 
999     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1000     if (!(target_fds = lock_user(VERIFY_WRITE,
1001                                  target_fds_addr,
1002                                  sizeof(abi_ulong) * nw,
1003                                  0)))
1004         return -TARGET_EFAULT;
1005 
1006     k = 0;
1007     for (i = 0; i < nw; i++) {
1008         v = 0;
1009         for (j = 0; j < TARGET_ABI_BITS; j++) {
1010             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1011             k++;
1012         }
1013         __put_user(v, &target_fds[i]);
1014     }
1015 
1016     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1017 
1018     return 0;
1019 }
1020 
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1023 #else
1024 #define HOST_HZ 100
1025 #endif
1026 
1027 static inline abi_long host_to_target_clock_t(long ticks)
1028 {
1029 #if HOST_HZ == TARGET_HZ
1030     return ticks;
1031 #else
1032     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1033 #endif
1034 }
1035 
1036 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1037                                              const struct rusage *rusage)
1038 {
1039     struct target_rusage *target_rusage;
1040 
1041     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1042         return -TARGET_EFAULT;
1043     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1044     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1045     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1046     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1047     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1048     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1049     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1050     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1051     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1052     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1053     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1054     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1055     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1056     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1057     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1058     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1059     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1060     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1061     unlock_user_struct(target_rusage, target_addr, 1);
1062 
1063     return 0;
1064 }
1065 
1066 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1067 {
1068     abi_ulong target_rlim_swap;
1069     rlim_t result;
1070 
1071     target_rlim_swap = tswapal(target_rlim);
1072     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1073         return RLIM_INFINITY;
1074 
1075     result = target_rlim_swap;
1076     if (target_rlim_swap != (rlim_t)result)
1077         return RLIM_INFINITY;
1078 
1079     return result;
1080 }
1081 
1082 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1083 {
1084     abi_ulong target_rlim_swap;
1085     abi_ulong result;
1086 
1087     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1088         target_rlim_swap = TARGET_RLIM_INFINITY;
1089     else
1090         target_rlim_swap = rlim;
1091     result = tswapal(target_rlim_swap);
1092 
1093     return result;
1094 }
1095 
1096 static inline int target_to_host_resource(int code)
1097 {
1098     switch (code) {
1099     case TARGET_RLIMIT_AS:
1100         return RLIMIT_AS;
1101     case TARGET_RLIMIT_CORE:
1102         return RLIMIT_CORE;
1103     case TARGET_RLIMIT_CPU:
1104         return RLIMIT_CPU;
1105     case TARGET_RLIMIT_DATA:
1106         return RLIMIT_DATA;
1107     case TARGET_RLIMIT_FSIZE:
1108         return RLIMIT_FSIZE;
1109     case TARGET_RLIMIT_LOCKS:
1110         return RLIMIT_LOCKS;
1111     case TARGET_RLIMIT_MEMLOCK:
1112         return RLIMIT_MEMLOCK;
1113     case TARGET_RLIMIT_MSGQUEUE:
1114         return RLIMIT_MSGQUEUE;
1115     case TARGET_RLIMIT_NICE:
1116         return RLIMIT_NICE;
1117     case TARGET_RLIMIT_NOFILE:
1118         return RLIMIT_NOFILE;
1119     case TARGET_RLIMIT_NPROC:
1120         return RLIMIT_NPROC;
1121     case TARGET_RLIMIT_RSS:
1122         return RLIMIT_RSS;
1123     case TARGET_RLIMIT_RTPRIO:
1124         return RLIMIT_RTPRIO;
1125     case TARGET_RLIMIT_SIGPENDING:
1126         return RLIMIT_SIGPENDING;
1127     case TARGET_RLIMIT_STACK:
1128         return RLIMIT_STACK;
1129     default:
1130         return code;
1131     }
1132 }
1133 
1134 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1135                                               abi_ulong target_tv_addr)
1136 {
1137     struct target_timeval *target_tv;
1138 
1139     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1140         return -TARGET_EFAULT;
1141 
1142     __get_user(tv->tv_sec, &target_tv->tv_sec);
1143     __get_user(tv->tv_usec, &target_tv->tv_usec);
1144 
1145     unlock_user_struct(target_tv, target_tv_addr, 0);
1146 
1147     return 0;
1148 }
1149 
1150 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1151                                             const struct timeval *tv)
1152 {
1153     struct target_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1156         return -TARGET_EFAULT;
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1167                                                abi_ulong target_tz_addr)
1168 {
1169     struct target_timezone *target_tz;
1170 
1171     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1172         return -TARGET_EFAULT;
1173     }
1174 
1175     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1176     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1177 
1178     unlock_user_struct(target_tz, target_tz_addr, 0);
1179 
1180     return 0;
1181 }
1182 
1183 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1184 #include <mqueue.h>
1185 
1186 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1187                                               abi_ulong target_mq_attr_addr)
1188 {
1189     struct target_mq_attr *target_mq_attr;
1190 
1191     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1192                           target_mq_attr_addr, 1))
1193         return -TARGET_EFAULT;
1194 
1195     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1196     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1197     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1198     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1199 
1200     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1201 
1202     return 0;
1203 }
1204 
1205 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1206                                             const struct mq_attr *attr)
1207 {
1208     struct target_mq_attr *target_mq_attr;
1209 
1210     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1211                           target_mq_attr_addr, 0))
1212         return -TARGET_EFAULT;
1213 
1214     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1215     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1216     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1217     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1218 
1219     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1220 
1221     return 0;
1222 }
1223 #endif
1224 
1225 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1226 /* do_select() must return target values and target errnos. */
1227 static abi_long do_select(int n,
1228                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1229                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1230 {
1231     fd_set rfds, wfds, efds;
1232     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1233     struct timeval tv;
1234     struct timespec ts, *ts_ptr;
1235     abi_long ret;
1236 
1237     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1238     if (ret) {
1239         return ret;
1240     }
1241     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1242     if (ret) {
1243         return ret;
1244     }
1245     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1246     if (ret) {
1247         return ret;
1248     }
1249 
1250     if (target_tv_addr) {
1251         if (copy_from_user_timeval(&tv, target_tv_addr))
1252             return -TARGET_EFAULT;
1253         ts.tv_sec = tv.tv_sec;
1254         ts.tv_nsec = tv.tv_usec * 1000;
1255         ts_ptr = &ts;
1256     } else {
1257         ts_ptr = NULL;
1258     }
1259 
1260     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1261                                   ts_ptr, NULL));
1262 
1263     if (!is_error(ret)) {
1264         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1265             return -TARGET_EFAULT;
1266         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1267             return -TARGET_EFAULT;
1268         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1269             return -TARGET_EFAULT;
1270 
1271         if (target_tv_addr) {
1272             tv.tv_sec = ts.tv_sec;
1273             tv.tv_usec = ts.tv_nsec / 1000;
1274             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1275                 return -TARGET_EFAULT;
1276             }
1277         }
1278     }
1279 
1280     return ret;
1281 }
1282 
1283 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1284 static abi_long do_old_select(abi_ulong arg1)
1285 {
1286     struct target_sel_arg_struct *sel;
1287     abi_ulong inp, outp, exp, tvp;
1288     long nsel;
1289 
1290     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1291         return -TARGET_EFAULT;
1292     }
1293 
1294     nsel = tswapal(sel->n);
1295     inp = tswapal(sel->inp);
1296     outp = tswapal(sel->outp);
1297     exp = tswapal(sel->exp);
1298     tvp = tswapal(sel->tvp);
1299 
1300     unlock_user_struct(sel, arg1, 0);
1301 
1302     return do_select(nsel, inp, outp, exp, tvp);
1303 }
1304 #endif
1305 #endif
1306 
1307 static abi_long do_pipe2(int host_pipe[], int flags)
1308 {
1309 #ifdef CONFIG_PIPE2
1310     return pipe2(host_pipe, flags);
1311 #else
1312     return -ENOSYS;
1313 #endif
1314 }
1315 
1316 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1317                         int flags, int is_pipe2)
1318 {
1319     int host_pipe[2];
1320     abi_long ret;
1321     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1322 
1323     if (is_error(ret))
1324         return get_errno(ret);
1325 
1326     /* Several targets have special calling conventions for the original
1327        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1328     if (!is_pipe2) {
1329 #if defined(TARGET_ALPHA)
1330         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1331         return host_pipe[0];
1332 #elif defined(TARGET_MIPS)
1333         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1334         return host_pipe[0];
1335 #elif defined(TARGET_SH4)
1336         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1337         return host_pipe[0];
1338 #elif defined(TARGET_SPARC)
1339         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1340         return host_pipe[0];
1341 #endif
1342     }
1343 
1344     if (put_user_s32(host_pipe[0], pipedes)
1345         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1346         return -TARGET_EFAULT;
1347     return get_errno(ret);
1348 }
1349 
1350 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1351                                               abi_ulong target_addr,
1352                                               socklen_t len)
1353 {
1354     struct target_ip_mreqn *target_smreqn;
1355 
1356     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1357     if (!target_smreqn)
1358         return -TARGET_EFAULT;
1359     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1360     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1361     if (len == sizeof(struct target_ip_mreqn))
1362         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1363     unlock_user(target_smreqn, target_addr, 0);
1364 
1365     return 0;
1366 }
1367 
1368 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1369                                                abi_ulong target_addr,
1370                                                socklen_t len)
1371 {
1372     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1373     sa_family_t sa_family;
1374     struct target_sockaddr *target_saddr;
1375 
1376     if (fd_trans_target_to_host_addr(fd)) {
1377         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1378     }
1379 
1380     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1381     if (!target_saddr)
1382         return -TARGET_EFAULT;
1383 
1384     sa_family = tswap16(target_saddr->sa_family);
1385 
1386     /* Oops. The caller might send a incomplete sun_path; sun_path
1387      * must be terminated by \0 (see the manual page), but
1388      * unfortunately it is quite common to specify sockaddr_un
1389      * length as "strlen(x->sun_path)" while it should be
1390      * "strlen(...) + 1". We'll fix that here if needed.
1391      * Linux kernel has a similar feature.
1392      */
1393 
1394     if (sa_family == AF_UNIX) {
1395         if (len < unix_maxlen && len > 0) {
1396             char *cp = (char*)target_saddr;
1397 
1398             if ( cp[len-1] && !cp[len] )
1399                 len++;
1400         }
1401         if (len > unix_maxlen)
1402             len = unix_maxlen;
1403     }
1404 
1405     memcpy(addr, target_saddr, len);
1406     addr->sa_family = sa_family;
1407     if (sa_family == AF_NETLINK) {
1408         struct sockaddr_nl *nladdr;
1409 
1410         nladdr = (struct sockaddr_nl *)addr;
1411         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1412         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1413     } else if (sa_family == AF_PACKET) {
1414 	struct target_sockaddr_ll *lladdr;
1415 
1416 	lladdr = (struct target_sockaddr_ll *)addr;
1417 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1418 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1419     }
1420     unlock_user(target_saddr, target_addr, 0);
1421 
1422     return 0;
1423 }
1424 
1425 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1426                                                struct sockaddr *addr,
1427                                                socklen_t len)
1428 {
1429     struct target_sockaddr *target_saddr;
1430 
1431     if (len == 0) {
1432         return 0;
1433     }
1434     assert(addr);
1435 
1436     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439     memcpy(target_saddr, addr, len);
1440     if (len >= offsetof(struct target_sockaddr, sa_family) +
1441         sizeof(target_saddr->sa_family)) {
1442         target_saddr->sa_family = tswap16(addr->sa_family);
1443     }
1444     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1445         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1446         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1447         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1448     } else if (addr->sa_family == AF_PACKET) {
1449         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1450         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1451         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1452     } else if (addr->sa_family == AF_INET6 &&
1453                len >= sizeof(struct target_sockaddr_in6)) {
1454         struct target_sockaddr_in6 *target_in6 =
1455                (struct target_sockaddr_in6 *)target_saddr;
1456         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1457     }
1458     unlock_user(target_saddr, target_addr, len);
1459 
1460     return 0;
1461 }
1462 
1463 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1464                                            struct target_msghdr *target_msgh)
1465 {
1466     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1467     abi_long msg_controllen;
1468     abi_ulong target_cmsg_addr;
1469     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1470     socklen_t space = 0;
1471 
1472     msg_controllen = tswapal(target_msgh->msg_controllen);
1473     if (msg_controllen < sizeof (struct target_cmsghdr))
1474         goto the_end;
1475     target_cmsg_addr = tswapal(target_msgh->msg_control);
1476     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1477     target_cmsg_start = target_cmsg;
1478     if (!target_cmsg)
1479         return -TARGET_EFAULT;
1480 
1481     while (cmsg && target_cmsg) {
1482         void *data = CMSG_DATA(cmsg);
1483         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1484 
1485         int len = tswapal(target_cmsg->cmsg_len)
1486             - sizeof(struct target_cmsghdr);
1487 
1488         space += CMSG_SPACE(len);
1489         if (space > msgh->msg_controllen) {
1490             space -= CMSG_SPACE(len);
1491             /* This is a QEMU bug, since we allocated the payload
1492              * area ourselves (unlike overflow in host-to-target
1493              * conversion, which is just the guest giving us a buffer
1494              * that's too small). It can't happen for the payload types
1495              * we currently support; if it becomes an issue in future
1496              * we would need to improve our allocation strategy to
1497              * something more intelligent than "twice the size of the
1498              * target buffer we're reading from".
1499              */
1500             gemu_log("Host cmsg overflow\n");
1501             break;
1502         }
1503 
1504         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1505             cmsg->cmsg_level = SOL_SOCKET;
1506         } else {
1507             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1508         }
1509         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1510         cmsg->cmsg_len = CMSG_LEN(len);
1511 
1512         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1513             int *fd = (int *)data;
1514             int *target_fd = (int *)target_data;
1515             int i, numfds = len / sizeof(int);
1516 
1517             for (i = 0; i < numfds; i++) {
1518                 __get_user(fd[i], target_fd + i);
1519             }
1520         } else if (cmsg->cmsg_level == SOL_SOCKET
1521                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1522             struct ucred *cred = (struct ucred *)data;
1523             struct target_ucred *target_cred =
1524                 (struct target_ucred *)target_data;
1525 
1526             __get_user(cred->pid, &target_cred->pid);
1527             __get_user(cred->uid, &target_cred->uid);
1528             __get_user(cred->gid, &target_cred->gid);
1529         } else {
1530             gemu_log("Unsupported ancillary data: %d/%d\n",
1531                                         cmsg->cmsg_level, cmsg->cmsg_type);
1532             memcpy(data, target_data, len);
1533         }
1534 
1535         cmsg = CMSG_NXTHDR(msgh, cmsg);
1536         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1537                                          target_cmsg_start);
1538     }
1539     unlock_user(target_cmsg, target_cmsg_addr, 0);
1540  the_end:
1541     msgh->msg_controllen = space;
1542     return 0;
1543 }
1544 
1545 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1546                                            struct msghdr *msgh)
1547 {
1548     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1549     abi_long msg_controllen;
1550     abi_ulong target_cmsg_addr;
1551     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1552     socklen_t space = 0;
1553 
1554     msg_controllen = tswapal(target_msgh->msg_controllen);
1555     if (msg_controllen < sizeof (struct target_cmsghdr))
1556         goto the_end;
1557     target_cmsg_addr = tswapal(target_msgh->msg_control);
1558     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1559     target_cmsg_start = target_cmsg;
1560     if (!target_cmsg)
1561         return -TARGET_EFAULT;
1562 
1563     while (cmsg && target_cmsg) {
1564         void *data = CMSG_DATA(cmsg);
1565         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1566 
1567         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1568         int tgt_len, tgt_space;
1569 
1570         /* We never copy a half-header but may copy half-data;
1571          * this is Linux's behaviour in put_cmsg(). Note that
1572          * truncation here is a guest problem (which we report
1573          * to the guest via the CTRUNC bit), unlike truncation
1574          * in target_to_host_cmsg, which is a QEMU bug.
1575          */
1576         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1577             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1578             break;
1579         }
1580 
1581         if (cmsg->cmsg_level == SOL_SOCKET) {
1582             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1583         } else {
1584             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1585         }
1586         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1587 
1588         /* Payload types which need a different size of payload on
1589          * the target must adjust tgt_len here.
1590          */
1591         tgt_len = len;
1592         switch (cmsg->cmsg_level) {
1593         case SOL_SOCKET:
1594             switch (cmsg->cmsg_type) {
1595             case SO_TIMESTAMP:
1596                 tgt_len = sizeof(struct target_timeval);
1597                 break;
1598             default:
1599                 break;
1600             }
1601             break;
1602         default:
1603             break;
1604         }
1605 
1606         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1607             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1608             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1609         }
1610 
1611         /* We must now copy-and-convert len bytes of payload
1612          * into tgt_len bytes of destination space. Bear in mind
1613          * that in both source and destination we may be dealing
1614          * with a truncated value!
1615          */
1616         switch (cmsg->cmsg_level) {
1617         case SOL_SOCKET:
1618             switch (cmsg->cmsg_type) {
1619             case SCM_RIGHTS:
1620             {
1621                 int *fd = (int *)data;
1622                 int *target_fd = (int *)target_data;
1623                 int i, numfds = tgt_len / sizeof(int);
1624 
1625                 for (i = 0; i < numfds; i++) {
1626                     __put_user(fd[i], target_fd + i);
1627                 }
1628                 break;
1629             }
1630             case SO_TIMESTAMP:
1631             {
1632                 struct timeval *tv = (struct timeval *)data;
1633                 struct target_timeval *target_tv =
1634                     (struct target_timeval *)target_data;
1635 
1636                 if (len != sizeof(struct timeval) ||
1637                     tgt_len != sizeof(struct target_timeval)) {
1638                     goto unimplemented;
1639                 }
1640 
1641                 /* copy struct timeval to target */
1642                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1643                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1644                 break;
1645             }
1646             case SCM_CREDENTIALS:
1647             {
1648                 struct ucred *cred = (struct ucred *)data;
1649                 struct target_ucred *target_cred =
1650                     (struct target_ucred *)target_data;
1651 
1652                 __put_user(cred->pid, &target_cred->pid);
1653                 __put_user(cred->uid, &target_cred->uid);
1654                 __put_user(cred->gid, &target_cred->gid);
1655                 break;
1656             }
1657             default:
1658                 goto unimplemented;
1659             }
1660             break;
1661 
1662         case SOL_IP:
1663             switch (cmsg->cmsg_type) {
1664             case IP_TTL:
1665             {
1666                 uint32_t *v = (uint32_t *)data;
1667                 uint32_t *t_int = (uint32_t *)target_data;
1668 
1669                 if (len != sizeof(uint32_t) ||
1670                     tgt_len != sizeof(uint32_t)) {
1671                     goto unimplemented;
1672                 }
1673                 __put_user(*v, t_int);
1674                 break;
1675             }
1676             case IP_RECVERR:
1677             {
1678                 struct errhdr_t {
1679                    struct sock_extended_err ee;
1680                    struct sockaddr_in offender;
1681                 };
1682                 struct errhdr_t *errh = (struct errhdr_t *)data;
1683                 struct errhdr_t *target_errh =
1684                     (struct errhdr_t *)target_data;
1685 
1686                 if (len != sizeof(struct errhdr_t) ||
1687                     tgt_len != sizeof(struct errhdr_t)) {
1688                     goto unimplemented;
1689                 }
1690                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1691                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1692                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1693                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1694                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1695                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1696                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1697                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1698                     (void *) &errh->offender, sizeof(errh->offender));
1699                 break;
1700             }
1701             default:
1702                 goto unimplemented;
1703             }
1704             break;
1705 
1706         case SOL_IPV6:
1707             switch (cmsg->cmsg_type) {
1708             case IPV6_HOPLIMIT:
1709             {
1710                 uint32_t *v = (uint32_t *)data;
1711                 uint32_t *t_int = (uint32_t *)target_data;
1712 
1713                 if (len != sizeof(uint32_t) ||
1714                     tgt_len != sizeof(uint32_t)) {
1715                     goto unimplemented;
1716                 }
1717                 __put_user(*v, t_int);
1718                 break;
1719             }
1720             case IPV6_RECVERR:
1721             {
1722                 struct errhdr6_t {
1723                    struct sock_extended_err ee;
1724                    struct sockaddr_in6 offender;
1725                 };
1726                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1727                 struct errhdr6_t *target_errh =
1728                     (struct errhdr6_t *)target_data;
1729 
1730                 if (len != sizeof(struct errhdr6_t) ||
1731                     tgt_len != sizeof(struct errhdr6_t)) {
1732                     goto unimplemented;
1733                 }
1734                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1735                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1736                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1737                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1738                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1739                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1740                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1741                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1742                     (void *) &errh->offender, sizeof(errh->offender));
1743                 break;
1744             }
1745             default:
1746                 goto unimplemented;
1747             }
1748             break;
1749 
1750         default:
1751         unimplemented:
1752             gemu_log("Unsupported ancillary data: %d/%d\n",
1753                                         cmsg->cmsg_level, cmsg->cmsg_type);
1754             memcpy(target_data, data, MIN(len, tgt_len));
1755             if (tgt_len > len) {
1756                 memset(target_data + len, 0, tgt_len - len);
1757             }
1758         }
1759 
1760         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1761         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1762         if (msg_controllen < tgt_space) {
1763             tgt_space = msg_controllen;
1764         }
1765         msg_controllen -= tgt_space;
1766         space += tgt_space;
1767         cmsg = CMSG_NXTHDR(msgh, cmsg);
1768         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1769                                          target_cmsg_start);
1770     }
1771     unlock_user(target_cmsg, target_cmsg_addr, space);
1772  the_end:
1773     target_msgh->msg_controllen = tswapal(space);
1774     return 0;
1775 }
1776 
1777 /* do_setsockopt() Must return target values and target errnos. */
1778 static abi_long do_setsockopt(int sockfd, int level, int optname,
1779                               abi_ulong optval_addr, socklen_t optlen)
1780 {
1781     abi_long ret;
1782     int val;
1783     struct ip_mreqn *ip_mreq;
1784     struct ip_mreq_source *ip_mreq_source;
1785 
1786     switch(level) {
1787     case SOL_TCP:
1788         /* TCP options all take an 'int' value.  */
1789         if (optlen < sizeof(uint32_t))
1790             return -TARGET_EINVAL;
1791 
1792         if (get_user_u32(val, optval_addr))
1793             return -TARGET_EFAULT;
1794         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1795         break;
1796     case SOL_IP:
1797         switch(optname) {
1798         case IP_TOS:
1799         case IP_TTL:
1800         case IP_HDRINCL:
1801         case IP_ROUTER_ALERT:
1802         case IP_RECVOPTS:
1803         case IP_RETOPTS:
1804         case IP_PKTINFO:
1805         case IP_MTU_DISCOVER:
1806         case IP_RECVERR:
1807         case IP_RECVTTL:
1808         case IP_RECVTOS:
1809 #ifdef IP_FREEBIND
1810         case IP_FREEBIND:
1811 #endif
1812         case IP_MULTICAST_TTL:
1813         case IP_MULTICAST_LOOP:
1814             val = 0;
1815             if (optlen >= sizeof(uint32_t)) {
1816                 if (get_user_u32(val, optval_addr))
1817                     return -TARGET_EFAULT;
1818             } else if (optlen >= 1) {
1819                 if (get_user_u8(val, optval_addr))
1820                     return -TARGET_EFAULT;
1821             }
1822             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1823             break;
1824         case IP_ADD_MEMBERSHIP:
1825         case IP_DROP_MEMBERSHIP:
1826             if (optlen < sizeof (struct target_ip_mreq) ||
1827                 optlen > sizeof (struct target_ip_mreqn))
1828                 return -TARGET_EINVAL;
1829 
1830             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1831             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1832             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1833             break;
1834 
1835         case IP_BLOCK_SOURCE:
1836         case IP_UNBLOCK_SOURCE:
1837         case IP_ADD_SOURCE_MEMBERSHIP:
1838         case IP_DROP_SOURCE_MEMBERSHIP:
1839             if (optlen != sizeof (struct target_ip_mreq_source))
1840                 return -TARGET_EINVAL;
1841 
1842             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1843             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1844             unlock_user (ip_mreq_source, optval_addr, 0);
1845             break;
1846 
1847         default:
1848             goto unimplemented;
1849         }
1850         break;
1851     case SOL_IPV6:
1852         switch (optname) {
1853         case IPV6_MTU_DISCOVER:
1854         case IPV6_MTU:
1855         case IPV6_V6ONLY:
1856         case IPV6_RECVPKTINFO:
1857         case IPV6_UNICAST_HOPS:
1858         case IPV6_MULTICAST_HOPS:
1859         case IPV6_MULTICAST_LOOP:
1860         case IPV6_RECVERR:
1861         case IPV6_RECVHOPLIMIT:
1862         case IPV6_2292HOPLIMIT:
1863         case IPV6_CHECKSUM:
1864         case IPV6_ADDRFORM:
1865         case IPV6_2292PKTINFO:
1866         case IPV6_RECVTCLASS:
1867         case IPV6_RECVRTHDR:
1868         case IPV6_2292RTHDR:
1869         case IPV6_RECVHOPOPTS:
1870         case IPV6_2292HOPOPTS:
1871         case IPV6_RECVDSTOPTS:
1872         case IPV6_2292DSTOPTS:
1873         case IPV6_TCLASS:
1874 #ifdef IPV6_RECVPATHMTU
1875         case IPV6_RECVPATHMTU:
1876 #endif
1877 #ifdef IPV6_TRANSPARENT
1878         case IPV6_TRANSPARENT:
1879 #endif
1880 #ifdef IPV6_FREEBIND
1881         case IPV6_FREEBIND:
1882 #endif
1883 #ifdef IPV6_RECVORIGDSTADDR
1884         case IPV6_RECVORIGDSTADDR:
1885 #endif
1886             val = 0;
1887             if (optlen < sizeof(uint32_t)) {
1888                 return -TARGET_EINVAL;
1889             }
1890             if (get_user_u32(val, optval_addr)) {
1891                 return -TARGET_EFAULT;
1892             }
1893             ret = get_errno(setsockopt(sockfd, level, optname,
1894                                        &val, sizeof(val)));
1895             break;
1896         case IPV6_PKTINFO:
1897         {
1898             struct in6_pktinfo pki;
1899 
1900             if (optlen < sizeof(pki)) {
1901                 return -TARGET_EINVAL;
1902             }
1903 
1904             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1905                 return -TARGET_EFAULT;
1906             }
1907 
1908             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1909 
1910             ret = get_errno(setsockopt(sockfd, level, optname,
1911                                        &pki, sizeof(pki)));
1912             break;
1913         }
1914         default:
1915             goto unimplemented;
1916         }
1917         break;
1918     case SOL_ICMPV6:
1919         switch (optname) {
1920         case ICMPV6_FILTER:
1921         {
1922             struct icmp6_filter icmp6f;
1923 
1924             if (optlen > sizeof(icmp6f)) {
1925                 optlen = sizeof(icmp6f);
1926             }
1927 
1928             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1929                 return -TARGET_EFAULT;
1930             }
1931 
1932             for (val = 0; val < 8; val++) {
1933                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1934             }
1935 
1936             ret = get_errno(setsockopt(sockfd, level, optname,
1937                                        &icmp6f, optlen));
1938             break;
1939         }
1940         default:
1941             goto unimplemented;
1942         }
1943         break;
1944     case SOL_RAW:
1945         switch (optname) {
1946         case ICMP_FILTER:
1947         case IPV6_CHECKSUM:
1948             /* those take an u32 value */
1949             if (optlen < sizeof(uint32_t)) {
1950                 return -TARGET_EINVAL;
1951             }
1952 
1953             if (get_user_u32(val, optval_addr)) {
1954                 return -TARGET_EFAULT;
1955             }
1956             ret = get_errno(setsockopt(sockfd, level, optname,
1957                                        &val, sizeof(val)));
1958             break;
1959 
1960         default:
1961             goto unimplemented;
1962         }
1963         break;
1964     case TARGET_SOL_SOCKET:
1965         switch (optname) {
1966         case TARGET_SO_RCVTIMEO:
1967         {
1968                 struct timeval tv;
1969 
1970                 optname = SO_RCVTIMEO;
1971 
1972 set_timeout:
1973                 if (optlen != sizeof(struct target_timeval)) {
1974                     return -TARGET_EINVAL;
1975                 }
1976 
1977                 if (copy_from_user_timeval(&tv, optval_addr)) {
1978                     return -TARGET_EFAULT;
1979                 }
1980 
1981                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1982                                 &tv, sizeof(tv)));
1983                 return ret;
1984         }
1985         case TARGET_SO_SNDTIMEO:
1986                 optname = SO_SNDTIMEO;
1987                 goto set_timeout;
1988         case TARGET_SO_ATTACH_FILTER:
1989         {
1990                 struct target_sock_fprog *tfprog;
1991                 struct target_sock_filter *tfilter;
1992                 struct sock_fprog fprog;
1993                 struct sock_filter *filter;
1994                 int i;
1995 
1996                 if (optlen != sizeof(*tfprog)) {
1997                     return -TARGET_EINVAL;
1998                 }
1999                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2000                     return -TARGET_EFAULT;
2001                 }
2002                 if (!lock_user_struct(VERIFY_READ, tfilter,
2003                                       tswapal(tfprog->filter), 0)) {
2004                     unlock_user_struct(tfprog, optval_addr, 1);
2005                     return -TARGET_EFAULT;
2006                 }
2007 
2008                 fprog.len = tswap16(tfprog->len);
2009                 filter = g_try_new(struct sock_filter, fprog.len);
2010                 if (filter == NULL) {
2011                     unlock_user_struct(tfilter, tfprog->filter, 1);
2012                     unlock_user_struct(tfprog, optval_addr, 1);
2013                     return -TARGET_ENOMEM;
2014                 }
2015                 for (i = 0; i < fprog.len; i++) {
2016                     filter[i].code = tswap16(tfilter[i].code);
2017                     filter[i].jt = tfilter[i].jt;
2018                     filter[i].jf = tfilter[i].jf;
2019                     filter[i].k = tswap32(tfilter[i].k);
2020                 }
2021                 fprog.filter = filter;
2022 
2023                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2024                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2025                 g_free(filter);
2026 
2027                 unlock_user_struct(tfilter, tfprog->filter, 1);
2028                 unlock_user_struct(tfprog, optval_addr, 1);
2029                 return ret;
2030         }
2031 	case TARGET_SO_BINDTODEVICE:
2032 	{
2033 		char *dev_ifname, *addr_ifname;
2034 
2035 		if (optlen > IFNAMSIZ - 1) {
2036 		    optlen = IFNAMSIZ - 1;
2037 		}
2038 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2039 		if (!dev_ifname) {
2040 		    return -TARGET_EFAULT;
2041 		}
2042 		optname = SO_BINDTODEVICE;
2043 		addr_ifname = alloca(IFNAMSIZ);
2044 		memcpy(addr_ifname, dev_ifname, optlen);
2045 		addr_ifname[optlen] = 0;
2046 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2047                                            addr_ifname, optlen));
2048 		unlock_user (dev_ifname, optval_addr, 0);
2049 		return ret;
2050 	}
2051         case TARGET_SO_LINGER:
2052         {
2053                 struct linger lg;
2054                 struct target_linger *tlg;
2055 
2056                 if (optlen != sizeof(struct target_linger)) {
2057                     return -TARGET_EINVAL;
2058                 }
2059                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2060                     return -TARGET_EFAULT;
2061                 }
2062                 __get_user(lg.l_onoff, &tlg->l_onoff);
2063                 __get_user(lg.l_linger, &tlg->l_linger);
2064                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2065                                 &lg, sizeof(lg)));
2066                 unlock_user_struct(tlg, optval_addr, 0);
2067                 return ret;
2068         }
2069             /* Options with 'int' argument.  */
2070         case TARGET_SO_DEBUG:
2071 		optname = SO_DEBUG;
2072 		break;
2073         case TARGET_SO_REUSEADDR:
2074 		optname = SO_REUSEADDR;
2075 		break;
2076 #ifdef SO_REUSEPORT
2077         case TARGET_SO_REUSEPORT:
2078                 optname = SO_REUSEPORT;
2079                 break;
2080 #endif
2081         case TARGET_SO_TYPE:
2082 		optname = SO_TYPE;
2083 		break;
2084         case TARGET_SO_ERROR:
2085 		optname = SO_ERROR;
2086 		break;
2087         case TARGET_SO_DONTROUTE:
2088 		optname = SO_DONTROUTE;
2089 		break;
2090         case TARGET_SO_BROADCAST:
2091 		optname = SO_BROADCAST;
2092 		break;
2093         case TARGET_SO_SNDBUF:
2094 		optname = SO_SNDBUF;
2095 		break;
2096         case TARGET_SO_SNDBUFFORCE:
2097                 optname = SO_SNDBUFFORCE;
2098                 break;
2099         case TARGET_SO_RCVBUF:
2100 		optname = SO_RCVBUF;
2101 		break;
2102         case TARGET_SO_RCVBUFFORCE:
2103                 optname = SO_RCVBUFFORCE;
2104                 break;
2105         case TARGET_SO_KEEPALIVE:
2106 		optname = SO_KEEPALIVE;
2107 		break;
2108         case TARGET_SO_OOBINLINE:
2109 		optname = SO_OOBINLINE;
2110 		break;
2111         case TARGET_SO_NO_CHECK:
2112 		optname = SO_NO_CHECK;
2113 		break;
2114         case TARGET_SO_PRIORITY:
2115 		optname = SO_PRIORITY;
2116 		break;
2117 #ifdef SO_BSDCOMPAT
2118         case TARGET_SO_BSDCOMPAT:
2119 		optname = SO_BSDCOMPAT;
2120 		break;
2121 #endif
2122         case TARGET_SO_PASSCRED:
2123 		optname = SO_PASSCRED;
2124 		break;
2125         case TARGET_SO_PASSSEC:
2126                 optname = SO_PASSSEC;
2127                 break;
2128         case TARGET_SO_TIMESTAMP:
2129 		optname = SO_TIMESTAMP;
2130 		break;
2131         case TARGET_SO_RCVLOWAT:
2132 		optname = SO_RCVLOWAT;
2133 		break;
2134         default:
2135             goto unimplemented;
2136         }
2137 	if (optlen < sizeof(uint32_t))
2138             return -TARGET_EINVAL;
2139 
2140 	if (get_user_u32(val, optval_addr))
2141             return -TARGET_EFAULT;
2142 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2143         break;
2144     default:
2145     unimplemented:
2146         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2147         ret = -TARGET_ENOPROTOOPT;
2148     }
2149     return ret;
2150 }
2151 
2152 /* do_getsockopt() Must return target values and target errnos. */
2153 static abi_long do_getsockopt(int sockfd, int level, int optname,
2154                               abi_ulong optval_addr, abi_ulong optlen)
2155 {
2156     abi_long ret;
2157     int len, val;
2158     socklen_t lv;
2159 
2160     switch(level) {
2161     case TARGET_SOL_SOCKET:
2162         level = SOL_SOCKET;
2163         switch (optname) {
2164         /* These don't just return a single integer */
2165         case TARGET_SO_RCVTIMEO:
2166         case TARGET_SO_SNDTIMEO:
2167         case TARGET_SO_PEERNAME:
2168             goto unimplemented;
2169         case TARGET_SO_PEERCRED: {
2170             struct ucred cr;
2171             socklen_t crlen;
2172             struct target_ucred *tcr;
2173 
2174             if (get_user_u32(len, optlen)) {
2175                 return -TARGET_EFAULT;
2176             }
2177             if (len < 0) {
2178                 return -TARGET_EINVAL;
2179             }
2180 
2181             crlen = sizeof(cr);
2182             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2183                                        &cr, &crlen));
2184             if (ret < 0) {
2185                 return ret;
2186             }
2187             if (len > crlen) {
2188                 len = crlen;
2189             }
2190             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2191                 return -TARGET_EFAULT;
2192             }
2193             __put_user(cr.pid, &tcr->pid);
2194             __put_user(cr.uid, &tcr->uid);
2195             __put_user(cr.gid, &tcr->gid);
2196             unlock_user_struct(tcr, optval_addr, 1);
2197             if (put_user_u32(len, optlen)) {
2198                 return -TARGET_EFAULT;
2199             }
2200             break;
2201         }
2202         case TARGET_SO_LINGER:
2203         {
2204             struct linger lg;
2205             socklen_t lglen;
2206             struct target_linger *tlg;
2207 
2208             if (get_user_u32(len, optlen)) {
2209                 return -TARGET_EFAULT;
2210             }
2211             if (len < 0) {
2212                 return -TARGET_EINVAL;
2213             }
2214 
2215             lglen = sizeof(lg);
2216             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2217                                        &lg, &lglen));
2218             if (ret < 0) {
2219                 return ret;
2220             }
2221             if (len > lglen) {
2222                 len = lglen;
2223             }
2224             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2225                 return -TARGET_EFAULT;
2226             }
2227             __put_user(lg.l_onoff, &tlg->l_onoff);
2228             __put_user(lg.l_linger, &tlg->l_linger);
2229             unlock_user_struct(tlg, optval_addr, 1);
2230             if (put_user_u32(len, optlen)) {
2231                 return -TARGET_EFAULT;
2232             }
2233             break;
2234         }
2235         /* Options with 'int' argument.  */
2236         case TARGET_SO_DEBUG:
2237             optname = SO_DEBUG;
2238             goto int_case;
2239         case TARGET_SO_REUSEADDR:
2240             optname = SO_REUSEADDR;
2241             goto int_case;
2242 #ifdef SO_REUSEPORT
2243         case TARGET_SO_REUSEPORT:
2244             optname = SO_REUSEPORT;
2245             goto int_case;
2246 #endif
2247         case TARGET_SO_TYPE:
2248             optname = SO_TYPE;
2249             goto int_case;
2250         case TARGET_SO_ERROR:
2251             optname = SO_ERROR;
2252             goto int_case;
2253         case TARGET_SO_DONTROUTE:
2254             optname = SO_DONTROUTE;
2255             goto int_case;
2256         case TARGET_SO_BROADCAST:
2257             optname = SO_BROADCAST;
2258             goto int_case;
2259         case TARGET_SO_SNDBUF:
2260             optname = SO_SNDBUF;
2261             goto int_case;
2262         case TARGET_SO_RCVBUF:
2263             optname = SO_RCVBUF;
2264             goto int_case;
2265         case TARGET_SO_KEEPALIVE:
2266             optname = SO_KEEPALIVE;
2267             goto int_case;
2268         case TARGET_SO_OOBINLINE:
2269             optname = SO_OOBINLINE;
2270             goto int_case;
2271         case TARGET_SO_NO_CHECK:
2272             optname = SO_NO_CHECK;
2273             goto int_case;
2274         case TARGET_SO_PRIORITY:
2275             optname = SO_PRIORITY;
2276             goto int_case;
2277 #ifdef SO_BSDCOMPAT
2278         case TARGET_SO_BSDCOMPAT:
2279             optname = SO_BSDCOMPAT;
2280             goto int_case;
2281 #endif
2282         case TARGET_SO_PASSCRED:
2283             optname = SO_PASSCRED;
2284             goto int_case;
2285         case TARGET_SO_TIMESTAMP:
2286             optname = SO_TIMESTAMP;
2287             goto int_case;
2288         case TARGET_SO_RCVLOWAT:
2289             optname = SO_RCVLOWAT;
2290             goto int_case;
2291         case TARGET_SO_ACCEPTCONN:
2292             optname = SO_ACCEPTCONN;
2293             goto int_case;
2294         default:
2295             goto int_case;
2296         }
2297         break;
2298     case SOL_TCP:
2299         /* TCP options all take an 'int' value.  */
2300     int_case:
2301         if (get_user_u32(len, optlen))
2302             return -TARGET_EFAULT;
2303         if (len < 0)
2304             return -TARGET_EINVAL;
2305         lv = sizeof(lv);
2306         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2307         if (ret < 0)
2308             return ret;
2309         if (optname == SO_TYPE) {
2310             val = host_to_target_sock_type(val);
2311         }
2312         if (len > lv)
2313             len = lv;
2314         if (len == 4) {
2315             if (put_user_u32(val, optval_addr))
2316                 return -TARGET_EFAULT;
2317         } else {
2318             if (put_user_u8(val, optval_addr))
2319                 return -TARGET_EFAULT;
2320         }
2321         if (put_user_u32(len, optlen))
2322             return -TARGET_EFAULT;
2323         break;
2324     case SOL_IP:
2325         switch(optname) {
2326         case IP_TOS:
2327         case IP_TTL:
2328         case IP_HDRINCL:
2329         case IP_ROUTER_ALERT:
2330         case IP_RECVOPTS:
2331         case IP_RETOPTS:
2332         case IP_PKTINFO:
2333         case IP_MTU_DISCOVER:
2334         case IP_RECVERR:
2335         case IP_RECVTOS:
2336 #ifdef IP_FREEBIND
2337         case IP_FREEBIND:
2338 #endif
2339         case IP_MULTICAST_TTL:
2340         case IP_MULTICAST_LOOP:
2341             if (get_user_u32(len, optlen))
2342                 return -TARGET_EFAULT;
2343             if (len < 0)
2344                 return -TARGET_EINVAL;
2345             lv = sizeof(lv);
2346             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2347             if (ret < 0)
2348                 return ret;
2349             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2350                 len = 1;
2351                 if (put_user_u32(len, optlen)
2352                     || put_user_u8(val, optval_addr))
2353                     return -TARGET_EFAULT;
2354             } else {
2355                 if (len > sizeof(int))
2356                     len = sizeof(int);
2357                 if (put_user_u32(len, optlen)
2358                     || put_user_u32(val, optval_addr))
2359                     return -TARGET_EFAULT;
2360             }
2361             break;
2362         default:
2363             ret = -TARGET_ENOPROTOOPT;
2364             break;
2365         }
2366         break;
2367     case SOL_IPV6:
2368         switch (optname) {
2369         case IPV6_MTU_DISCOVER:
2370         case IPV6_MTU:
2371         case IPV6_V6ONLY:
2372         case IPV6_RECVPKTINFO:
2373         case IPV6_UNICAST_HOPS:
2374         case IPV6_MULTICAST_HOPS:
2375         case IPV6_MULTICAST_LOOP:
2376         case IPV6_RECVERR:
2377         case IPV6_RECVHOPLIMIT:
2378         case IPV6_2292HOPLIMIT:
2379         case IPV6_CHECKSUM:
2380         case IPV6_ADDRFORM:
2381         case IPV6_2292PKTINFO:
2382         case IPV6_RECVTCLASS:
2383         case IPV6_RECVRTHDR:
2384         case IPV6_2292RTHDR:
2385         case IPV6_RECVHOPOPTS:
2386         case IPV6_2292HOPOPTS:
2387         case IPV6_RECVDSTOPTS:
2388         case IPV6_2292DSTOPTS:
2389         case IPV6_TCLASS:
2390 #ifdef IPV6_RECVPATHMTU
2391         case IPV6_RECVPATHMTU:
2392 #endif
2393 #ifdef IPV6_TRANSPARENT
2394         case IPV6_TRANSPARENT:
2395 #endif
2396 #ifdef IPV6_FREEBIND
2397         case IPV6_FREEBIND:
2398 #endif
2399 #ifdef IPV6_RECVORIGDSTADDR
2400         case IPV6_RECVORIGDSTADDR:
2401 #endif
2402             if (get_user_u32(len, optlen))
2403                 return -TARGET_EFAULT;
2404             if (len < 0)
2405                 return -TARGET_EINVAL;
2406             lv = sizeof(lv);
2407             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2408             if (ret < 0)
2409                 return ret;
2410             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2411                 len = 1;
2412                 if (put_user_u32(len, optlen)
2413                     || put_user_u8(val, optval_addr))
2414                     return -TARGET_EFAULT;
2415             } else {
2416                 if (len > sizeof(int))
2417                     len = sizeof(int);
2418                 if (put_user_u32(len, optlen)
2419                     || put_user_u32(val, optval_addr))
2420                     return -TARGET_EFAULT;
2421             }
2422             break;
2423         default:
2424             ret = -TARGET_ENOPROTOOPT;
2425             break;
2426         }
2427         break;
2428     default:
2429     unimplemented:
2430         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2431                  level, optname);
2432         ret = -TARGET_EOPNOTSUPP;
2433         break;
2434     }
2435     return ret;
2436 }
2437 
2438 /* Convert target low/high pair representing file offset into the host
2439  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2440  * as the kernel doesn't handle them either.
2441  */
2442 static void target_to_host_low_high(abi_ulong tlow,
2443                                     abi_ulong thigh,
2444                                     unsigned long *hlow,
2445                                     unsigned long *hhigh)
2446 {
2447     uint64_t off = tlow |
2448         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2449         TARGET_LONG_BITS / 2;
2450 
2451     *hlow = off;
2452     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2453 }
2454 
2455 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2456                                 abi_ulong count, int copy)
2457 {
2458     struct target_iovec *target_vec;
2459     struct iovec *vec;
2460     abi_ulong total_len, max_len;
2461     int i;
2462     int err = 0;
2463     bool bad_address = false;
2464 
2465     if (count == 0) {
2466         errno = 0;
2467         return NULL;
2468     }
2469     if (count > IOV_MAX) {
2470         errno = EINVAL;
2471         return NULL;
2472     }
2473 
2474     vec = g_try_new0(struct iovec, count);
2475     if (vec == NULL) {
2476         errno = ENOMEM;
2477         return NULL;
2478     }
2479 
2480     target_vec = lock_user(VERIFY_READ, target_addr,
2481                            count * sizeof(struct target_iovec), 1);
2482     if (target_vec == NULL) {
2483         err = EFAULT;
2484         goto fail2;
2485     }
2486 
2487     /* ??? If host page size > target page size, this will result in a
2488        value larger than what we can actually support.  */
2489     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2490     total_len = 0;
2491 
2492     for (i = 0; i < count; i++) {
2493         abi_ulong base = tswapal(target_vec[i].iov_base);
2494         abi_long len = tswapal(target_vec[i].iov_len);
2495 
2496         if (len < 0) {
2497             err = EINVAL;
2498             goto fail;
2499         } else if (len == 0) {
2500             /* Zero length pointer is ignored.  */
2501             vec[i].iov_base = 0;
2502         } else {
2503             vec[i].iov_base = lock_user(type, base, len, copy);
2504             /* If the first buffer pointer is bad, this is a fault.  But
2505              * subsequent bad buffers will result in a partial write; this
2506              * is realized by filling the vector with null pointers and
2507              * zero lengths. */
2508             if (!vec[i].iov_base) {
2509                 if (i == 0) {
2510                     err = EFAULT;
2511                     goto fail;
2512                 } else {
2513                     bad_address = true;
2514                 }
2515             }
2516             if (bad_address) {
2517                 len = 0;
2518             }
2519             if (len > max_len - total_len) {
2520                 len = max_len - total_len;
2521             }
2522         }
2523         vec[i].iov_len = len;
2524         total_len += len;
2525     }
2526 
2527     unlock_user(target_vec, target_addr, 0);
2528     return vec;
2529 
2530  fail:
2531     while (--i >= 0) {
2532         if (tswapal(target_vec[i].iov_len) > 0) {
2533             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2534         }
2535     }
2536     unlock_user(target_vec, target_addr, 0);
2537  fail2:
2538     g_free(vec);
2539     errno = err;
2540     return NULL;
2541 }
2542 
2543 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2544                          abi_ulong count, int copy)
2545 {
2546     struct target_iovec *target_vec;
2547     int i;
2548 
2549     target_vec = lock_user(VERIFY_READ, target_addr,
2550                            count * sizeof(struct target_iovec), 1);
2551     if (target_vec) {
2552         for (i = 0; i < count; i++) {
2553             abi_ulong base = tswapal(target_vec[i].iov_base);
2554             abi_long len = tswapal(target_vec[i].iov_len);
2555             if (len < 0) {
2556                 break;
2557             }
2558             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2559         }
2560         unlock_user(target_vec, target_addr, 0);
2561     }
2562 
2563     g_free(vec);
2564 }
2565 
2566 static inline int target_to_host_sock_type(int *type)
2567 {
2568     int host_type = 0;
2569     int target_type = *type;
2570 
2571     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2572     case TARGET_SOCK_DGRAM:
2573         host_type = SOCK_DGRAM;
2574         break;
2575     case TARGET_SOCK_STREAM:
2576         host_type = SOCK_STREAM;
2577         break;
2578     default:
2579         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2580         break;
2581     }
2582     if (target_type & TARGET_SOCK_CLOEXEC) {
2583 #if defined(SOCK_CLOEXEC)
2584         host_type |= SOCK_CLOEXEC;
2585 #else
2586         return -TARGET_EINVAL;
2587 #endif
2588     }
2589     if (target_type & TARGET_SOCK_NONBLOCK) {
2590 #if defined(SOCK_NONBLOCK)
2591         host_type |= SOCK_NONBLOCK;
2592 #elif !defined(O_NONBLOCK)
2593         return -TARGET_EINVAL;
2594 #endif
2595     }
2596     *type = host_type;
2597     return 0;
2598 }
2599 
2600 /* Try to emulate socket type flags after socket creation.  */
2601 static int sock_flags_fixup(int fd, int target_type)
2602 {
2603 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2604     if (target_type & TARGET_SOCK_NONBLOCK) {
2605         int flags = fcntl(fd, F_GETFL);
2606         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2607             close(fd);
2608             return -TARGET_EINVAL;
2609         }
2610     }
2611 #endif
2612     return fd;
2613 }
2614 
2615 /* do_socket() Must return target values and target errnos. */
2616 static abi_long do_socket(int domain, int type, int protocol)
2617 {
2618     int target_type = type;
2619     int ret;
2620 
2621     ret = target_to_host_sock_type(&type);
2622     if (ret) {
2623         return ret;
2624     }
2625 
2626     if (domain == PF_NETLINK && !(
2627 #ifdef CONFIG_RTNETLINK
2628          protocol == NETLINK_ROUTE ||
2629 #endif
2630          protocol == NETLINK_KOBJECT_UEVENT ||
2631          protocol == NETLINK_AUDIT)) {
2632         return -EPFNOSUPPORT;
2633     }
2634 
2635     if (domain == AF_PACKET ||
2636         (domain == AF_INET && type == SOCK_PACKET)) {
2637         protocol = tswap16(protocol);
2638     }
2639 
2640     ret = get_errno(socket(domain, type, protocol));
2641     if (ret >= 0) {
2642         ret = sock_flags_fixup(ret, target_type);
2643         if (type == SOCK_PACKET) {
2644             /* Manage an obsolete case :
2645              * if socket type is SOCK_PACKET, bind by name
2646              */
2647             fd_trans_register(ret, &target_packet_trans);
2648         } else if (domain == PF_NETLINK) {
2649             switch (protocol) {
2650 #ifdef CONFIG_RTNETLINK
2651             case NETLINK_ROUTE:
2652                 fd_trans_register(ret, &target_netlink_route_trans);
2653                 break;
2654 #endif
2655             case NETLINK_KOBJECT_UEVENT:
2656                 /* nothing to do: messages are strings */
2657                 break;
2658             case NETLINK_AUDIT:
2659                 fd_trans_register(ret, &target_netlink_audit_trans);
2660                 break;
2661             default:
2662                 g_assert_not_reached();
2663             }
2664         }
2665     }
2666     return ret;
2667 }
2668 
2669 /* do_bind() Must return target values and target errnos. */
2670 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2671                         socklen_t addrlen)
2672 {
2673     void *addr;
2674     abi_long ret;
2675 
2676     if ((int)addrlen < 0) {
2677         return -TARGET_EINVAL;
2678     }
2679 
2680     addr = alloca(addrlen+1);
2681 
2682     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2683     if (ret)
2684         return ret;
2685 
2686     return get_errno(bind(sockfd, addr, addrlen));
2687 }
2688 
2689 /* do_connect() Must return target values and target errnos. */
2690 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2691                            socklen_t addrlen)
2692 {
2693     void *addr;
2694     abi_long ret;
2695 
2696     if ((int)addrlen < 0) {
2697         return -TARGET_EINVAL;
2698     }
2699 
2700     addr = alloca(addrlen+1);
2701 
2702     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2703     if (ret)
2704         return ret;
2705 
2706     return get_errno(safe_connect(sockfd, addr, addrlen));
2707 }
2708 
2709 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2710 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2711                                       int flags, int send)
2712 {
2713     abi_long ret, len;
2714     struct msghdr msg;
2715     abi_ulong count;
2716     struct iovec *vec;
2717     abi_ulong target_vec;
2718 
2719     if (msgp->msg_name) {
2720         msg.msg_namelen = tswap32(msgp->msg_namelen);
2721         msg.msg_name = alloca(msg.msg_namelen+1);
2722         ret = target_to_host_sockaddr(fd, msg.msg_name,
2723                                       tswapal(msgp->msg_name),
2724                                       msg.msg_namelen);
2725         if (ret == -TARGET_EFAULT) {
2726             /* For connected sockets msg_name and msg_namelen must
2727              * be ignored, so returning EFAULT immediately is wrong.
2728              * Instead, pass a bad msg_name to the host kernel, and
2729              * let it decide whether to return EFAULT or not.
2730              */
2731             msg.msg_name = (void *)-1;
2732         } else if (ret) {
2733             goto out2;
2734         }
2735     } else {
2736         msg.msg_name = NULL;
2737         msg.msg_namelen = 0;
2738     }
2739     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2740     msg.msg_control = alloca(msg.msg_controllen);
2741     memset(msg.msg_control, 0, msg.msg_controllen);
2742 
2743     msg.msg_flags = tswap32(msgp->msg_flags);
2744 
2745     count = tswapal(msgp->msg_iovlen);
2746     target_vec = tswapal(msgp->msg_iov);
2747 
2748     if (count > IOV_MAX) {
2749         /* sendrcvmsg returns a different errno for this condition than
2750          * readv/writev, so we must catch it here before lock_iovec() does.
2751          */
2752         ret = -TARGET_EMSGSIZE;
2753         goto out2;
2754     }
2755 
2756     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2757                      target_vec, count, send);
2758     if (vec == NULL) {
2759         ret = -host_to_target_errno(errno);
2760         goto out2;
2761     }
2762     msg.msg_iovlen = count;
2763     msg.msg_iov = vec;
2764 
2765     if (send) {
2766         if (fd_trans_target_to_host_data(fd)) {
2767             void *host_msg;
2768 
2769             host_msg = g_malloc(msg.msg_iov->iov_len);
2770             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2771             ret = fd_trans_target_to_host_data(fd)(host_msg,
2772                                                    msg.msg_iov->iov_len);
2773             if (ret >= 0) {
2774                 msg.msg_iov->iov_base = host_msg;
2775                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2776             }
2777             g_free(host_msg);
2778         } else {
2779             ret = target_to_host_cmsg(&msg, msgp);
2780             if (ret == 0) {
2781                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2782             }
2783         }
2784     } else {
2785         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2786         if (!is_error(ret)) {
2787             len = ret;
2788             if (fd_trans_host_to_target_data(fd)) {
2789                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2790                                                MIN(msg.msg_iov->iov_len, len));
2791             } else {
2792                 ret = host_to_target_cmsg(msgp, &msg);
2793             }
2794             if (!is_error(ret)) {
2795                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2796                 msgp->msg_flags = tswap32(msg.msg_flags);
2797                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2798                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2799                                     msg.msg_name, msg.msg_namelen);
2800                     if (ret) {
2801                         goto out;
2802                     }
2803                 }
2804 
2805                 ret = len;
2806             }
2807         }
2808     }
2809 
2810 out:
2811     unlock_iovec(vec, target_vec, count, !send);
2812 out2:
2813     return ret;
2814 }
2815 
2816 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2817                                int flags, int send)
2818 {
2819     abi_long ret;
2820     struct target_msghdr *msgp;
2821 
2822     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2823                           msgp,
2824                           target_msg,
2825                           send ? 1 : 0)) {
2826         return -TARGET_EFAULT;
2827     }
2828     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2829     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2830     return ret;
2831 }
2832 
2833 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2834  * so it might not have this *mmsg-specific flag either.
2835  */
2836 #ifndef MSG_WAITFORONE
2837 #define MSG_WAITFORONE 0x10000
2838 #endif
2839 
2840 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2841                                 unsigned int vlen, unsigned int flags,
2842                                 int send)
2843 {
2844     struct target_mmsghdr *mmsgp;
2845     abi_long ret = 0;
2846     int i;
2847 
2848     if (vlen > UIO_MAXIOV) {
2849         vlen = UIO_MAXIOV;
2850     }
2851 
2852     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2853     if (!mmsgp) {
2854         return -TARGET_EFAULT;
2855     }
2856 
2857     for (i = 0; i < vlen; i++) {
2858         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2859         if (is_error(ret)) {
2860             break;
2861         }
2862         mmsgp[i].msg_len = tswap32(ret);
2863         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2864         if (flags & MSG_WAITFORONE) {
2865             flags |= MSG_DONTWAIT;
2866         }
2867     }
2868 
2869     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2870 
2871     /* Return number of datagrams sent if we sent any at all;
2872      * otherwise return the error.
2873      */
2874     if (i) {
2875         return i;
2876     }
2877     return ret;
2878 }
2879 
2880 /* do_accept4() Must return target values and target errnos. */
2881 static abi_long do_accept4(int fd, abi_ulong target_addr,
2882                            abi_ulong target_addrlen_addr, int flags)
2883 {
2884     socklen_t addrlen, ret_addrlen;
2885     void *addr;
2886     abi_long ret;
2887     int host_flags;
2888 
2889     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2890 
2891     if (target_addr == 0) {
2892         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2893     }
2894 
2895     /* linux returns EINVAL if addrlen pointer is invalid */
2896     if (get_user_u32(addrlen, target_addrlen_addr))
2897         return -TARGET_EINVAL;
2898 
2899     if ((int)addrlen < 0) {
2900         return -TARGET_EINVAL;
2901     }
2902 
2903     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2904         return -TARGET_EINVAL;
2905 
2906     addr = alloca(addrlen);
2907 
2908     ret_addrlen = addrlen;
2909     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2910     if (!is_error(ret)) {
2911         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2912         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2913             ret = -TARGET_EFAULT;
2914         }
2915     }
2916     return ret;
2917 }
2918 
2919 /* do_getpeername() Must return target values and target errnos. */
2920 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2921                                abi_ulong target_addrlen_addr)
2922 {
2923     socklen_t addrlen, ret_addrlen;
2924     void *addr;
2925     abi_long ret;
2926 
2927     if (get_user_u32(addrlen, target_addrlen_addr))
2928         return -TARGET_EFAULT;
2929 
2930     if ((int)addrlen < 0) {
2931         return -TARGET_EINVAL;
2932     }
2933 
2934     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2935         return -TARGET_EFAULT;
2936 
2937     addr = alloca(addrlen);
2938 
2939     ret_addrlen = addrlen;
2940     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2941     if (!is_error(ret)) {
2942         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2943         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2944             ret = -TARGET_EFAULT;
2945         }
2946     }
2947     return ret;
2948 }
2949 
2950 /* do_getsockname() Must return target values and target errnos. */
2951 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2952                                abi_ulong target_addrlen_addr)
2953 {
2954     socklen_t addrlen, ret_addrlen;
2955     void *addr;
2956     abi_long ret;
2957 
2958     if (get_user_u32(addrlen, target_addrlen_addr))
2959         return -TARGET_EFAULT;
2960 
2961     if ((int)addrlen < 0) {
2962         return -TARGET_EINVAL;
2963     }
2964 
2965     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2966         return -TARGET_EFAULT;
2967 
2968     addr = alloca(addrlen);
2969 
2970     ret_addrlen = addrlen;
2971     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2972     if (!is_error(ret)) {
2973         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2974         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2975             ret = -TARGET_EFAULT;
2976         }
2977     }
2978     return ret;
2979 }
2980 
2981 /* do_socketpair() Must return target values and target errnos. */
2982 static abi_long do_socketpair(int domain, int type, int protocol,
2983                               abi_ulong target_tab_addr)
2984 {
2985     int tab[2];
2986     abi_long ret;
2987 
2988     target_to_host_sock_type(&type);
2989 
2990     ret = get_errno(socketpair(domain, type, protocol, tab));
2991     if (!is_error(ret)) {
2992         if (put_user_s32(tab[0], target_tab_addr)
2993             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2994             ret = -TARGET_EFAULT;
2995     }
2996     return ret;
2997 }
2998 
2999 /* do_sendto() Must return target values and target errnos. */
3000 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3001                           abi_ulong target_addr, socklen_t addrlen)
3002 {
3003     void *addr;
3004     void *host_msg;
3005     void *copy_msg = NULL;
3006     abi_long ret;
3007 
3008     if ((int)addrlen < 0) {
3009         return -TARGET_EINVAL;
3010     }
3011 
3012     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3013     if (!host_msg)
3014         return -TARGET_EFAULT;
3015     if (fd_trans_target_to_host_data(fd)) {
3016         copy_msg = host_msg;
3017         host_msg = g_malloc(len);
3018         memcpy(host_msg, copy_msg, len);
3019         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3020         if (ret < 0) {
3021             goto fail;
3022         }
3023     }
3024     if (target_addr) {
3025         addr = alloca(addrlen+1);
3026         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3027         if (ret) {
3028             goto fail;
3029         }
3030         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3031     } else {
3032         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3033     }
3034 fail:
3035     if (copy_msg) {
3036         g_free(host_msg);
3037         host_msg = copy_msg;
3038     }
3039     unlock_user(host_msg, msg, 0);
3040     return ret;
3041 }
3042 
3043 /* do_recvfrom() Must return target values and target errnos. */
3044 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3045                             abi_ulong target_addr,
3046                             abi_ulong target_addrlen)
3047 {
3048     socklen_t addrlen, ret_addrlen;
3049     void *addr;
3050     void *host_msg;
3051     abi_long ret;
3052 
3053     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3054     if (!host_msg)
3055         return -TARGET_EFAULT;
3056     if (target_addr) {
3057         if (get_user_u32(addrlen, target_addrlen)) {
3058             ret = -TARGET_EFAULT;
3059             goto fail;
3060         }
3061         if ((int)addrlen < 0) {
3062             ret = -TARGET_EINVAL;
3063             goto fail;
3064         }
3065         addr = alloca(addrlen);
3066         ret_addrlen = addrlen;
3067         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3068                                       addr, &ret_addrlen));
3069     } else {
3070         addr = NULL; /* To keep compiler quiet.  */
3071         addrlen = 0; /* To keep compiler quiet.  */
3072         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3073     }
3074     if (!is_error(ret)) {
3075         if (fd_trans_host_to_target_data(fd)) {
3076             abi_long trans;
3077             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3078             if (is_error(trans)) {
3079                 ret = trans;
3080                 goto fail;
3081             }
3082         }
3083         if (target_addr) {
3084             host_to_target_sockaddr(target_addr, addr,
3085                                     MIN(addrlen, ret_addrlen));
3086             if (put_user_u32(ret_addrlen, target_addrlen)) {
3087                 ret = -TARGET_EFAULT;
3088                 goto fail;
3089             }
3090         }
3091         unlock_user(host_msg, msg, len);
3092     } else {
3093 fail:
3094         unlock_user(host_msg, msg, 0);
3095     }
3096     return ret;
3097 }
3098 
3099 #ifdef TARGET_NR_socketcall
3100 /* do_socketcall() must return target values and target errnos. */
3101 static abi_long do_socketcall(int num, abi_ulong vptr)
3102 {
3103     static const unsigned nargs[] = { /* number of arguments per operation */
3104         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3105         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3106         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3107         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3108         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3109         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3110         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3111         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3112         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3113         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3114         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3115         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3116         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3117         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3118         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3119         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3120         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3121         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3122         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3123         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3124     };
3125     abi_long a[6]; /* max 6 args */
3126     unsigned i;
3127 
3128     /* check the range of the first argument num */
3129     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3130     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3131         return -TARGET_EINVAL;
3132     }
3133     /* ensure we have space for args */
3134     if (nargs[num] > ARRAY_SIZE(a)) {
3135         return -TARGET_EINVAL;
3136     }
3137     /* collect the arguments in a[] according to nargs[] */
3138     for (i = 0; i < nargs[num]; ++i) {
3139         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3140             return -TARGET_EFAULT;
3141         }
3142     }
3143     /* now when we have the args, invoke the appropriate underlying function */
3144     switch (num) {
3145     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3146         return do_socket(a[0], a[1], a[2]);
3147     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3148         return do_bind(a[0], a[1], a[2]);
3149     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3150         return do_connect(a[0], a[1], a[2]);
3151     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3152         return get_errno(listen(a[0], a[1]));
3153     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3154         return do_accept4(a[0], a[1], a[2], 0);
3155     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3156         return do_getsockname(a[0], a[1], a[2]);
3157     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3158         return do_getpeername(a[0], a[1], a[2]);
3159     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3160         return do_socketpair(a[0], a[1], a[2], a[3]);
3161     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3162         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3163     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3164         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3165     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3166         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3167     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3168         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3169     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3170         return get_errno(shutdown(a[0], a[1]));
3171     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3172         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3173     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3174         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3175     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3176         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3177     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3178         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3179     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3180         return do_accept4(a[0], a[1], a[2], a[3]);
3181     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3182         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3183     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3184         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3185     default:
3186         gemu_log("Unsupported socketcall: %d\n", num);
3187         return -TARGET_EINVAL;
3188     }
3189 }
3190 #endif
3191 
3192 #define N_SHM_REGIONS	32
3193 
3194 static struct shm_region {
3195     abi_ulong start;
3196     abi_ulong size;
3197     bool in_use;
3198 } shm_regions[N_SHM_REGIONS];
3199 
3200 #ifndef TARGET_SEMID64_DS
3201 /* asm-generic version of this struct */
3202 struct target_semid64_ds
3203 {
3204   struct target_ipc_perm sem_perm;
3205   abi_ulong sem_otime;
3206 #if TARGET_ABI_BITS == 32
3207   abi_ulong __unused1;
3208 #endif
3209   abi_ulong sem_ctime;
3210 #if TARGET_ABI_BITS == 32
3211   abi_ulong __unused2;
3212 #endif
3213   abi_ulong sem_nsems;
3214   abi_ulong __unused3;
3215   abi_ulong __unused4;
3216 };
3217 #endif
3218 
3219 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3220                                                abi_ulong target_addr)
3221 {
3222     struct target_ipc_perm *target_ip;
3223     struct target_semid64_ds *target_sd;
3224 
3225     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3226         return -TARGET_EFAULT;
3227     target_ip = &(target_sd->sem_perm);
3228     host_ip->__key = tswap32(target_ip->__key);
3229     host_ip->uid = tswap32(target_ip->uid);
3230     host_ip->gid = tswap32(target_ip->gid);
3231     host_ip->cuid = tswap32(target_ip->cuid);
3232     host_ip->cgid = tswap32(target_ip->cgid);
3233 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3234     host_ip->mode = tswap32(target_ip->mode);
3235 #else
3236     host_ip->mode = tswap16(target_ip->mode);
3237 #endif
3238 #if defined(TARGET_PPC)
3239     host_ip->__seq = tswap32(target_ip->__seq);
3240 #else
3241     host_ip->__seq = tswap16(target_ip->__seq);
3242 #endif
3243     unlock_user_struct(target_sd, target_addr, 0);
3244     return 0;
3245 }
3246 
3247 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3248                                                struct ipc_perm *host_ip)
3249 {
3250     struct target_ipc_perm *target_ip;
3251     struct target_semid64_ds *target_sd;
3252 
3253     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3254         return -TARGET_EFAULT;
3255     target_ip = &(target_sd->sem_perm);
3256     target_ip->__key = tswap32(host_ip->__key);
3257     target_ip->uid = tswap32(host_ip->uid);
3258     target_ip->gid = tswap32(host_ip->gid);
3259     target_ip->cuid = tswap32(host_ip->cuid);
3260     target_ip->cgid = tswap32(host_ip->cgid);
3261 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3262     target_ip->mode = tswap32(host_ip->mode);
3263 #else
3264     target_ip->mode = tswap16(host_ip->mode);
3265 #endif
3266 #if defined(TARGET_PPC)
3267     target_ip->__seq = tswap32(host_ip->__seq);
3268 #else
3269     target_ip->__seq = tswap16(host_ip->__seq);
3270 #endif
3271     unlock_user_struct(target_sd, target_addr, 1);
3272     return 0;
3273 }
3274 
3275 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3276                                                abi_ulong target_addr)
3277 {
3278     struct target_semid64_ds *target_sd;
3279 
3280     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3281         return -TARGET_EFAULT;
3282     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3283         return -TARGET_EFAULT;
3284     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3285     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3286     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3287     unlock_user_struct(target_sd, target_addr, 0);
3288     return 0;
3289 }
3290 
3291 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3292                                                struct semid_ds *host_sd)
3293 {
3294     struct target_semid64_ds *target_sd;
3295 
3296     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3297         return -TARGET_EFAULT;
3298     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3299         return -TARGET_EFAULT;
3300     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3301     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3302     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3303     unlock_user_struct(target_sd, target_addr, 1);
3304     return 0;
3305 }
3306 
3307 struct target_seminfo {
3308     int semmap;
3309     int semmni;
3310     int semmns;
3311     int semmnu;
3312     int semmsl;
3313     int semopm;
3314     int semume;
3315     int semusz;
3316     int semvmx;
3317     int semaem;
3318 };
3319 
3320 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3321                                               struct seminfo *host_seminfo)
3322 {
3323     struct target_seminfo *target_seminfo;
3324     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3325         return -TARGET_EFAULT;
3326     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3327     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3328     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3329     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3330     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3331     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3332     __put_user(host_seminfo->semume, &target_seminfo->semume);
3333     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3334     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3335     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3336     unlock_user_struct(target_seminfo, target_addr, 1);
3337     return 0;
3338 }
3339 
3340 union semun {
3341 	int val;
3342 	struct semid_ds *buf;
3343 	unsigned short *array;
3344 	struct seminfo *__buf;
3345 };
3346 
3347 union target_semun {
3348 	int val;
3349 	abi_ulong buf;
3350 	abi_ulong array;
3351 	abi_ulong __buf;
3352 };
3353 
3354 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3355                                                abi_ulong target_addr)
3356 {
3357     int nsems;
3358     unsigned short *array;
3359     union semun semun;
3360     struct semid_ds semid_ds;
3361     int i, ret;
3362 
3363     semun.buf = &semid_ds;
3364 
3365     ret = semctl(semid, 0, IPC_STAT, semun);
3366     if (ret == -1)
3367         return get_errno(ret);
3368 
3369     nsems = semid_ds.sem_nsems;
3370 
3371     *host_array = g_try_new(unsigned short, nsems);
3372     if (!*host_array) {
3373         return -TARGET_ENOMEM;
3374     }
3375     array = lock_user(VERIFY_READ, target_addr,
3376                       nsems*sizeof(unsigned short), 1);
3377     if (!array) {
3378         g_free(*host_array);
3379         return -TARGET_EFAULT;
3380     }
3381 
3382     for(i=0; i<nsems; i++) {
3383         __get_user((*host_array)[i], &array[i]);
3384     }
3385     unlock_user(array, target_addr, 0);
3386 
3387     return 0;
3388 }
3389 
3390 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3391                                                unsigned short **host_array)
3392 {
3393     int nsems;
3394     unsigned short *array;
3395     union semun semun;
3396     struct semid_ds semid_ds;
3397     int i, ret;
3398 
3399     semun.buf = &semid_ds;
3400 
3401     ret = semctl(semid, 0, IPC_STAT, semun);
3402     if (ret == -1)
3403         return get_errno(ret);
3404 
3405     nsems = semid_ds.sem_nsems;
3406 
3407     array = lock_user(VERIFY_WRITE, target_addr,
3408                       nsems*sizeof(unsigned short), 0);
3409     if (!array)
3410         return -TARGET_EFAULT;
3411 
3412     for(i=0; i<nsems; i++) {
3413         __put_user((*host_array)[i], &array[i]);
3414     }
3415     g_free(*host_array);
3416     unlock_user(array, target_addr, 1);
3417 
3418     return 0;
3419 }
3420 
3421 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3422                                  abi_ulong target_arg)
3423 {
3424     union target_semun target_su = { .buf = target_arg };
3425     union semun arg;
3426     struct semid_ds dsarg;
3427     unsigned short *array = NULL;
3428     struct seminfo seminfo;
3429     abi_long ret = -TARGET_EINVAL;
3430     abi_long err;
3431     cmd &= 0xff;
3432 
3433     switch( cmd ) {
3434 	case GETVAL:
3435 	case SETVAL:
3436             /* In 64 bit cross-endian situations, we will erroneously pick up
3437              * the wrong half of the union for the "val" element.  To rectify
3438              * this, the entire 8-byte structure is byteswapped, followed by
3439 	     * a swap of the 4 byte val field. In other cases, the data is
3440 	     * already in proper host byte order. */
3441 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3442 		target_su.buf = tswapal(target_su.buf);
3443 		arg.val = tswap32(target_su.val);
3444 	    } else {
3445 		arg.val = target_su.val;
3446 	    }
3447             ret = get_errno(semctl(semid, semnum, cmd, arg));
3448             break;
3449 	case GETALL:
3450 	case SETALL:
3451             err = target_to_host_semarray(semid, &array, target_su.array);
3452             if (err)
3453                 return err;
3454             arg.array = array;
3455             ret = get_errno(semctl(semid, semnum, cmd, arg));
3456             err = host_to_target_semarray(semid, target_su.array, &array);
3457             if (err)
3458                 return err;
3459             break;
3460 	case IPC_STAT:
3461 	case IPC_SET:
3462 	case SEM_STAT:
3463             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3464             if (err)
3465                 return err;
3466             arg.buf = &dsarg;
3467             ret = get_errno(semctl(semid, semnum, cmd, arg));
3468             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3469             if (err)
3470                 return err;
3471             break;
3472 	case IPC_INFO:
3473 	case SEM_INFO:
3474             arg.__buf = &seminfo;
3475             ret = get_errno(semctl(semid, semnum, cmd, arg));
3476             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3477             if (err)
3478                 return err;
3479             break;
3480 	case IPC_RMID:
3481 	case GETPID:
3482 	case GETNCNT:
3483 	case GETZCNT:
3484             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3485             break;
3486     }
3487 
3488     return ret;
3489 }
3490 
3491 struct target_sembuf {
3492     unsigned short sem_num;
3493     short sem_op;
3494     short sem_flg;
3495 };
3496 
3497 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3498                                              abi_ulong target_addr,
3499                                              unsigned nsops)
3500 {
3501     struct target_sembuf *target_sembuf;
3502     int i;
3503 
3504     target_sembuf = lock_user(VERIFY_READ, target_addr,
3505                               nsops*sizeof(struct target_sembuf), 1);
3506     if (!target_sembuf)
3507         return -TARGET_EFAULT;
3508 
3509     for(i=0; i<nsops; i++) {
3510         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3511         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3512         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3513     }
3514 
3515     unlock_user(target_sembuf, target_addr, 0);
3516 
3517     return 0;
3518 }
3519 
3520 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3521 {
3522     struct sembuf sops[nsops];
3523 
3524     if (target_to_host_sembuf(sops, ptr, nsops))
3525         return -TARGET_EFAULT;
3526 
3527     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3528 }
3529 
3530 struct target_msqid_ds
3531 {
3532     struct target_ipc_perm msg_perm;
3533     abi_ulong msg_stime;
3534 #if TARGET_ABI_BITS == 32
3535     abi_ulong __unused1;
3536 #endif
3537     abi_ulong msg_rtime;
3538 #if TARGET_ABI_BITS == 32
3539     abi_ulong __unused2;
3540 #endif
3541     abi_ulong msg_ctime;
3542 #if TARGET_ABI_BITS == 32
3543     abi_ulong __unused3;
3544 #endif
3545     abi_ulong __msg_cbytes;
3546     abi_ulong msg_qnum;
3547     abi_ulong msg_qbytes;
3548     abi_ulong msg_lspid;
3549     abi_ulong msg_lrpid;
3550     abi_ulong __unused4;
3551     abi_ulong __unused5;
3552 };
3553 
3554 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3555                                                abi_ulong target_addr)
3556 {
3557     struct target_msqid_ds *target_md;
3558 
3559     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3560         return -TARGET_EFAULT;
3561     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3562         return -TARGET_EFAULT;
3563     host_md->msg_stime = tswapal(target_md->msg_stime);
3564     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3565     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3566     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3567     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3568     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3569     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3570     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3571     unlock_user_struct(target_md, target_addr, 0);
3572     return 0;
3573 }
3574 
3575 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3576                                                struct msqid_ds *host_md)
3577 {
3578     struct target_msqid_ds *target_md;
3579 
3580     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3581         return -TARGET_EFAULT;
3582     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3583         return -TARGET_EFAULT;
3584     target_md->msg_stime = tswapal(host_md->msg_stime);
3585     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3586     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3587     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3588     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3589     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3590     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3591     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3592     unlock_user_struct(target_md, target_addr, 1);
3593     return 0;
3594 }
3595 
3596 struct target_msginfo {
3597     int msgpool;
3598     int msgmap;
3599     int msgmax;
3600     int msgmnb;
3601     int msgmni;
3602     int msgssz;
3603     int msgtql;
3604     unsigned short int msgseg;
3605 };
3606 
3607 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3608                                               struct msginfo *host_msginfo)
3609 {
3610     struct target_msginfo *target_msginfo;
3611     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3612         return -TARGET_EFAULT;
3613     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3614     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3615     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3616     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3617     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3618     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3619     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3620     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3621     unlock_user_struct(target_msginfo, target_addr, 1);
3622     return 0;
3623 }
3624 
3625 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3626 {
3627     struct msqid_ds dsarg;
3628     struct msginfo msginfo;
3629     abi_long ret = -TARGET_EINVAL;
3630 
3631     cmd &= 0xff;
3632 
3633     switch (cmd) {
3634     case IPC_STAT:
3635     case IPC_SET:
3636     case MSG_STAT:
3637         if (target_to_host_msqid_ds(&dsarg,ptr))
3638             return -TARGET_EFAULT;
3639         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3640         if (host_to_target_msqid_ds(ptr,&dsarg))
3641             return -TARGET_EFAULT;
3642         break;
3643     case IPC_RMID:
3644         ret = get_errno(msgctl(msgid, cmd, NULL));
3645         break;
3646     case IPC_INFO:
3647     case MSG_INFO:
3648         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3649         if (host_to_target_msginfo(ptr, &msginfo))
3650             return -TARGET_EFAULT;
3651         break;
3652     }
3653 
3654     return ret;
3655 }
3656 
3657 struct target_msgbuf {
3658     abi_long mtype;
3659     char	mtext[1];
3660 };
3661 
3662 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3663                                  ssize_t msgsz, int msgflg)
3664 {
3665     struct target_msgbuf *target_mb;
3666     struct msgbuf *host_mb;
3667     abi_long ret = 0;
3668 
3669     if (msgsz < 0) {
3670         return -TARGET_EINVAL;
3671     }
3672 
3673     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3674         return -TARGET_EFAULT;
3675     host_mb = g_try_malloc(msgsz + sizeof(long));
3676     if (!host_mb) {
3677         unlock_user_struct(target_mb, msgp, 0);
3678         return -TARGET_ENOMEM;
3679     }
3680     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3681     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3682     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3683     g_free(host_mb);
3684     unlock_user_struct(target_mb, msgp, 0);
3685 
3686     return ret;
3687 }
3688 
3689 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3690                                  ssize_t msgsz, abi_long msgtyp,
3691                                  int msgflg)
3692 {
3693     struct target_msgbuf *target_mb;
3694     char *target_mtext;
3695     struct msgbuf *host_mb;
3696     abi_long ret = 0;
3697 
3698     if (msgsz < 0) {
3699         return -TARGET_EINVAL;
3700     }
3701 
3702     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3703         return -TARGET_EFAULT;
3704 
3705     host_mb = g_try_malloc(msgsz + sizeof(long));
3706     if (!host_mb) {
3707         ret = -TARGET_ENOMEM;
3708         goto end;
3709     }
3710     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3711 
3712     if (ret > 0) {
3713         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3714         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3715         if (!target_mtext) {
3716             ret = -TARGET_EFAULT;
3717             goto end;
3718         }
3719         memcpy(target_mb->mtext, host_mb->mtext, ret);
3720         unlock_user(target_mtext, target_mtext_addr, ret);
3721     }
3722 
3723     target_mb->mtype = tswapal(host_mb->mtype);
3724 
3725 end:
3726     if (target_mb)
3727         unlock_user_struct(target_mb, msgp, 1);
3728     g_free(host_mb);
3729     return ret;
3730 }
3731 
3732 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3733                                                abi_ulong target_addr)
3734 {
3735     struct target_shmid_ds *target_sd;
3736 
3737     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3738         return -TARGET_EFAULT;
3739     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3740         return -TARGET_EFAULT;
3741     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3742     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3743     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3744     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3745     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3746     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3747     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3748     unlock_user_struct(target_sd, target_addr, 0);
3749     return 0;
3750 }
3751 
3752 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3753                                                struct shmid_ds *host_sd)
3754 {
3755     struct target_shmid_ds *target_sd;
3756 
3757     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3758         return -TARGET_EFAULT;
3759     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3760         return -TARGET_EFAULT;
3761     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3762     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3763     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3764     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3765     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3766     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3767     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3768     unlock_user_struct(target_sd, target_addr, 1);
3769     return 0;
3770 }
3771 
3772 struct  target_shminfo {
3773     abi_ulong shmmax;
3774     abi_ulong shmmin;
3775     abi_ulong shmmni;
3776     abi_ulong shmseg;
3777     abi_ulong shmall;
3778 };
3779 
3780 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3781                                               struct shminfo *host_shminfo)
3782 {
3783     struct target_shminfo *target_shminfo;
3784     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3785         return -TARGET_EFAULT;
3786     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3787     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3788     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3789     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3790     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3791     unlock_user_struct(target_shminfo, target_addr, 1);
3792     return 0;
3793 }
3794 
3795 struct target_shm_info {
3796     int used_ids;
3797     abi_ulong shm_tot;
3798     abi_ulong shm_rss;
3799     abi_ulong shm_swp;
3800     abi_ulong swap_attempts;
3801     abi_ulong swap_successes;
3802 };
3803 
3804 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3805                                                struct shm_info *host_shm_info)
3806 {
3807     struct target_shm_info *target_shm_info;
3808     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3809         return -TARGET_EFAULT;
3810     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3811     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3812     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3813     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3814     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3815     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3816     unlock_user_struct(target_shm_info, target_addr, 1);
3817     return 0;
3818 }
3819 
3820 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3821 {
3822     struct shmid_ds dsarg;
3823     struct shminfo shminfo;
3824     struct shm_info shm_info;
3825     abi_long ret = -TARGET_EINVAL;
3826 
3827     cmd &= 0xff;
3828 
3829     switch(cmd) {
3830     case IPC_STAT:
3831     case IPC_SET:
3832     case SHM_STAT:
3833         if (target_to_host_shmid_ds(&dsarg, buf))
3834             return -TARGET_EFAULT;
3835         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3836         if (host_to_target_shmid_ds(buf, &dsarg))
3837             return -TARGET_EFAULT;
3838         break;
3839     case IPC_INFO:
3840         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3841         if (host_to_target_shminfo(buf, &shminfo))
3842             return -TARGET_EFAULT;
3843         break;
3844     case SHM_INFO:
3845         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3846         if (host_to_target_shm_info(buf, &shm_info))
3847             return -TARGET_EFAULT;
3848         break;
3849     case IPC_RMID:
3850     case SHM_LOCK:
3851     case SHM_UNLOCK:
3852         ret = get_errno(shmctl(shmid, cmd, NULL));
3853         break;
3854     }
3855 
3856     return ret;
3857 }
3858 
3859 #ifndef TARGET_FORCE_SHMLBA
3860 /* For most architectures, SHMLBA is the same as the page size;
3861  * some architectures have larger values, in which case they should
3862  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3863  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3864  * and defining its own value for SHMLBA.
3865  *
3866  * The kernel also permits SHMLBA to be set by the architecture to a
3867  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3868  * this means that addresses are rounded to the large size if
3869  * SHM_RND is set but addresses not aligned to that size are not rejected
3870  * as long as they are at least page-aligned. Since the only architecture
3871  * which uses this is ia64 this code doesn't provide for that oddity.
3872  */
3873 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3874 {
3875     return TARGET_PAGE_SIZE;
3876 }
3877 #endif
3878 
3879 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3880                                  int shmid, abi_ulong shmaddr, int shmflg)
3881 {
3882     abi_long raddr;
3883     void *host_raddr;
3884     struct shmid_ds shm_info;
3885     int i,ret;
3886     abi_ulong shmlba;
3887 
3888     /* find out the length of the shared memory segment */
3889     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3890     if (is_error(ret)) {
3891         /* can't get length, bail out */
3892         return ret;
3893     }
3894 
3895     shmlba = target_shmlba(cpu_env);
3896 
3897     if (shmaddr & (shmlba - 1)) {
3898         if (shmflg & SHM_RND) {
3899             shmaddr &= ~(shmlba - 1);
3900         } else {
3901             return -TARGET_EINVAL;
3902         }
3903     }
3904     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3905         return -TARGET_EINVAL;
3906     }
3907 
3908     mmap_lock();
3909 
3910     if (shmaddr)
3911         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3912     else {
3913         abi_ulong mmap_start;
3914 
3915         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3916 
3917         if (mmap_start == -1) {
3918             errno = ENOMEM;
3919             host_raddr = (void *)-1;
3920         } else
3921             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3922     }
3923 
3924     if (host_raddr == (void *)-1) {
3925         mmap_unlock();
3926         return get_errno((long)host_raddr);
3927     }
3928     raddr=h2g((unsigned long)host_raddr);
3929 
3930     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3931                    PAGE_VALID | PAGE_READ |
3932                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3933 
3934     for (i = 0; i < N_SHM_REGIONS; i++) {
3935         if (!shm_regions[i].in_use) {
3936             shm_regions[i].in_use = true;
3937             shm_regions[i].start = raddr;
3938             shm_regions[i].size = shm_info.shm_segsz;
3939             break;
3940         }
3941     }
3942 
3943     mmap_unlock();
3944     return raddr;
3945 
3946 }
3947 
3948 static inline abi_long do_shmdt(abi_ulong shmaddr)
3949 {
3950     int i;
3951     abi_long rv;
3952 
3953     mmap_lock();
3954 
3955     for (i = 0; i < N_SHM_REGIONS; ++i) {
3956         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3957             shm_regions[i].in_use = false;
3958             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3959             break;
3960         }
3961     }
3962     rv = get_errno(shmdt(g2h(shmaddr)));
3963 
3964     mmap_unlock();
3965 
3966     return rv;
3967 }
3968 
3969 #ifdef TARGET_NR_ipc
3970 /* ??? This only works with linear mappings.  */
3971 /* do_ipc() must return target values and target errnos. */
3972 static abi_long do_ipc(CPUArchState *cpu_env,
3973                        unsigned int call, abi_long first,
3974                        abi_long second, abi_long third,
3975                        abi_long ptr, abi_long fifth)
3976 {
3977     int version;
3978     abi_long ret = 0;
3979 
3980     version = call >> 16;
3981     call &= 0xffff;
3982 
3983     switch (call) {
3984     case IPCOP_semop:
3985         ret = do_semop(first, ptr, second);
3986         break;
3987 
3988     case IPCOP_semget:
3989         ret = get_errno(semget(first, second, third));
3990         break;
3991 
3992     case IPCOP_semctl: {
3993         /* The semun argument to semctl is passed by value, so dereference the
3994          * ptr argument. */
3995         abi_ulong atptr;
3996         get_user_ual(atptr, ptr);
3997         ret = do_semctl(first, second, third, atptr);
3998         break;
3999     }
4000 
4001     case IPCOP_msgget:
4002         ret = get_errno(msgget(first, second));
4003         break;
4004 
4005     case IPCOP_msgsnd:
4006         ret = do_msgsnd(first, ptr, second, third);
4007         break;
4008 
4009     case IPCOP_msgctl:
4010         ret = do_msgctl(first, second, ptr);
4011         break;
4012 
4013     case IPCOP_msgrcv:
4014         switch (version) {
4015         case 0:
4016             {
4017                 struct target_ipc_kludge {
4018                     abi_long msgp;
4019                     abi_long msgtyp;
4020                 } *tmp;
4021 
4022                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4023                     ret = -TARGET_EFAULT;
4024                     break;
4025                 }
4026 
4027                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4028 
4029                 unlock_user_struct(tmp, ptr, 0);
4030                 break;
4031             }
4032         default:
4033             ret = do_msgrcv(first, ptr, second, fifth, third);
4034         }
4035         break;
4036 
4037     case IPCOP_shmat:
4038         switch (version) {
4039         default:
4040         {
4041             abi_ulong raddr;
4042             raddr = do_shmat(cpu_env, first, ptr, second);
4043             if (is_error(raddr))
4044                 return get_errno(raddr);
4045             if (put_user_ual(raddr, third))
4046                 return -TARGET_EFAULT;
4047             break;
4048         }
4049         case 1:
4050             ret = -TARGET_EINVAL;
4051             break;
4052         }
4053 	break;
4054     case IPCOP_shmdt:
4055         ret = do_shmdt(ptr);
4056 	break;
4057 
4058     case IPCOP_shmget:
4059 	/* IPC_* flag values are the same on all linux platforms */
4060 	ret = get_errno(shmget(first, second, third));
4061 	break;
4062 
4063 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4064     case IPCOP_shmctl:
4065         ret = do_shmctl(first, second, ptr);
4066         break;
4067     default:
4068 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4069 	ret = -TARGET_ENOSYS;
4070 	break;
4071     }
4072     return ret;
4073 }
4074 #endif
4075 
4076 /* kernel structure types definitions */
4077 
4078 #define STRUCT(name, ...) STRUCT_ ## name,
4079 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4080 enum {
4081 #include "syscall_types.h"
4082 STRUCT_MAX
4083 };
4084 #undef STRUCT
4085 #undef STRUCT_SPECIAL
4086 
4087 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4088 #define STRUCT_SPECIAL(name)
4089 #include "syscall_types.h"
4090 #undef STRUCT
4091 #undef STRUCT_SPECIAL
4092 
4093 typedef struct IOCTLEntry IOCTLEntry;
4094 
4095 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4096                              int fd, int cmd, abi_long arg);
4097 
4098 struct IOCTLEntry {
4099     int target_cmd;
4100     unsigned int host_cmd;
4101     const char *name;
4102     int access;
4103     do_ioctl_fn *do_ioctl;
4104     const argtype arg_type[5];
4105 };
4106 
4107 #define IOC_R 0x0001
4108 #define IOC_W 0x0002
4109 #define IOC_RW (IOC_R | IOC_W)
4110 
4111 #define MAX_STRUCT_SIZE 4096
4112 
4113 #ifdef CONFIG_FIEMAP
4114 /* So fiemap access checks don't overflow on 32 bit systems.
4115  * This is very slightly smaller than the limit imposed by
4116  * the underlying kernel.
4117  */
4118 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4119                             / sizeof(struct fiemap_extent))
4120 
4121 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4122                                        int fd, int cmd, abi_long arg)
4123 {
4124     /* The parameter for this ioctl is a struct fiemap followed
4125      * by an array of struct fiemap_extent whose size is set
4126      * in fiemap->fm_extent_count. The array is filled in by the
4127      * ioctl.
4128      */
4129     int target_size_in, target_size_out;
4130     struct fiemap *fm;
4131     const argtype *arg_type = ie->arg_type;
4132     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4133     void *argptr, *p;
4134     abi_long ret;
4135     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4136     uint32_t outbufsz;
4137     int free_fm = 0;
4138 
4139     assert(arg_type[0] == TYPE_PTR);
4140     assert(ie->access == IOC_RW);
4141     arg_type++;
4142     target_size_in = thunk_type_size(arg_type, 0);
4143     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4144     if (!argptr) {
4145         return -TARGET_EFAULT;
4146     }
4147     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4148     unlock_user(argptr, arg, 0);
4149     fm = (struct fiemap *)buf_temp;
4150     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4151         return -TARGET_EINVAL;
4152     }
4153 
4154     outbufsz = sizeof (*fm) +
4155         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4156 
4157     if (outbufsz > MAX_STRUCT_SIZE) {
4158         /* We can't fit all the extents into the fixed size buffer.
4159          * Allocate one that is large enough and use it instead.
4160          */
4161         fm = g_try_malloc(outbufsz);
4162         if (!fm) {
4163             return -TARGET_ENOMEM;
4164         }
4165         memcpy(fm, buf_temp, sizeof(struct fiemap));
4166         free_fm = 1;
4167     }
4168     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4169     if (!is_error(ret)) {
4170         target_size_out = target_size_in;
4171         /* An extent_count of 0 means we were only counting the extents
4172          * so there are no structs to copy
4173          */
4174         if (fm->fm_extent_count != 0) {
4175             target_size_out += fm->fm_mapped_extents * extent_size;
4176         }
4177         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4178         if (!argptr) {
4179             ret = -TARGET_EFAULT;
4180         } else {
4181             /* Convert the struct fiemap */
4182             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4183             if (fm->fm_extent_count != 0) {
4184                 p = argptr + target_size_in;
4185                 /* ...and then all the struct fiemap_extents */
4186                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4187                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4188                                   THUNK_TARGET);
4189                     p += extent_size;
4190                 }
4191             }
4192             unlock_user(argptr, arg, target_size_out);
4193         }
4194     }
4195     if (free_fm) {
4196         g_free(fm);
4197     }
4198     return ret;
4199 }
4200 #endif
4201 
4202 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4203                                 int fd, int cmd, abi_long arg)
4204 {
4205     const argtype *arg_type = ie->arg_type;
4206     int target_size;
4207     void *argptr;
4208     int ret;
4209     struct ifconf *host_ifconf;
4210     uint32_t outbufsz;
4211     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4212     int target_ifreq_size;
4213     int nb_ifreq;
4214     int free_buf = 0;
4215     int i;
4216     int target_ifc_len;
4217     abi_long target_ifc_buf;
4218     int host_ifc_len;
4219     char *host_ifc_buf;
4220 
4221     assert(arg_type[0] == TYPE_PTR);
4222     assert(ie->access == IOC_RW);
4223 
4224     arg_type++;
4225     target_size = thunk_type_size(arg_type, 0);
4226 
4227     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4228     if (!argptr)
4229         return -TARGET_EFAULT;
4230     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4231     unlock_user(argptr, arg, 0);
4232 
4233     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4234     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4235     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4236 
4237     if (target_ifc_buf != 0) {
4238         target_ifc_len = host_ifconf->ifc_len;
4239         nb_ifreq = target_ifc_len / target_ifreq_size;
4240         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4241 
4242         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4243         if (outbufsz > MAX_STRUCT_SIZE) {
4244             /*
4245              * We can't fit all the extents into the fixed size buffer.
4246              * Allocate one that is large enough and use it instead.
4247              */
4248             host_ifconf = malloc(outbufsz);
4249             if (!host_ifconf) {
4250                 return -TARGET_ENOMEM;
4251             }
4252             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4253             free_buf = 1;
4254         }
4255         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4256 
4257         host_ifconf->ifc_len = host_ifc_len;
4258     } else {
4259       host_ifc_buf = NULL;
4260     }
4261     host_ifconf->ifc_buf = host_ifc_buf;
4262 
4263     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4264     if (!is_error(ret)) {
4265 	/* convert host ifc_len to target ifc_len */
4266 
4267         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4268         target_ifc_len = nb_ifreq * target_ifreq_size;
4269         host_ifconf->ifc_len = target_ifc_len;
4270 
4271 	/* restore target ifc_buf */
4272 
4273         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4274 
4275 	/* copy struct ifconf to target user */
4276 
4277         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4278         if (!argptr)
4279             return -TARGET_EFAULT;
4280         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4281         unlock_user(argptr, arg, target_size);
4282 
4283         if (target_ifc_buf != 0) {
4284             /* copy ifreq[] to target user */
4285             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4286             for (i = 0; i < nb_ifreq ; i++) {
4287                 thunk_convert(argptr + i * target_ifreq_size,
4288                               host_ifc_buf + i * sizeof(struct ifreq),
4289                               ifreq_arg_type, THUNK_TARGET);
4290             }
4291             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4292         }
4293     }
4294 
4295     if (free_buf) {
4296         free(host_ifconf);
4297     }
4298 
4299     return ret;
4300 }
4301 
4302 #if defined(CONFIG_USBFS)
4303 #if HOST_LONG_BITS > 64
4304 #error USBDEVFS thunks do not support >64 bit hosts yet.
4305 #endif
4306 struct live_urb {
4307     uint64_t target_urb_adr;
4308     uint64_t target_buf_adr;
4309     char *target_buf_ptr;
4310     struct usbdevfs_urb host_urb;
4311 };
4312 
4313 static GHashTable *usbdevfs_urb_hashtable(void)
4314 {
4315     static GHashTable *urb_hashtable;
4316 
4317     if (!urb_hashtable) {
4318         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4319     }
4320     return urb_hashtable;
4321 }
4322 
4323 static void urb_hashtable_insert(struct live_urb *urb)
4324 {
4325     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4326     g_hash_table_insert(urb_hashtable, urb, urb);
4327 }
4328 
4329 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4330 {
4331     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4332     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4333 }
4334 
4335 static void urb_hashtable_remove(struct live_urb *urb)
4336 {
4337     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4338     g_hash_table_remove(urb_hashtable, urb);
4339 }
4340 
4341 static abi_long
4342 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4343                           int fd, int cmd, abi_long arg)
4344 {
4345     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4346     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4347     struct live_urb *lurb;
4348     void *argptr;
4349     uint64_t hurb;
4350     int target_size;
4351     uintptr_t target_urb_adr;
4352     abi_long ret;
4353 
4354     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4355 
4356     memset(buf_temp, 0, sizeof(uint64_t));
4357     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4358     if (is_error(ret)) {
4359         return ret;
4360     }
4361 
4362     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4363     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4364     if (!lurb->target_urb_adr) {
4365         return -TARGET_EFAULT;
4366     }
4367     urb_hashtable_remove(lurb);
4368     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4369         lurb->host_urb.buffer_length);
4370     lurb->target_buf_ptr = NULL;
4371 
4372     /* restore the guest buffer pointer */
4373     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4374 
4375     /* update the guest urb struct */
4376     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4377     if (!argptr) {
4378         g_free(lurb);
4379         return -TARGET_EFAULT;
4380     }
4381     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4382     unlock_user(argptr, lurb->target_urb_adr, target_size);
4383 
4384     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4385     /* write back the urb handle */
4386     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4387     if (!argptr) {
4388         g_free(lurb);
4389         return -TARGET_EFAULT;
4390     }
4391 
4392     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4393     target_urb_adr = lurb->target_urb_adr;
4394     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4395     unlock_user(argptr, arg, target_size);
4396 
4397     g_free(lurb);
4398     return ret;
4399 }
4400 
4401 static abi_long
4402 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4403                              uint8_t *buf_temp __attribute__((unused)),
4404                              int fd, int cmd, abi_long arg)
4405 {
4406     struct live_urb *lurb;
4407 
4408     /* map target address back to host URB with metadata. */
4409     lurb = urb_hashtable_lookup(arg);
4410     if (!lurb) {
4411         return -TARGET_EFAULT;
4412     }
4413     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4414 }
4415 
4416 static abi_long
4417 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4418                             int fd, int cmd, abi_long arg)
4419 {
4420     const argtype *arg_type = ie->arg_type;
4421     int target_size;
4422     abi_long ret;
4423     void *argptr;
4424     int rw_dir;
4425     struct live_urb *lurb;
4426 
4427     /*
4428      * each submitted URB needs to map to a unique ID for the
4429      * kernel, and that unique ID needs to be a pointer to
4430      * host memory.  hence, we need to malloc for each URB.
4431      * isochronous transfers have a variable length struct.
4432      */
4433     arg_type++;
4434     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4435 
4436     /* construct host copy of urb and metadata */
4437     lurb = g_try_malloc0(sizeof(struct live_urb));
4438     if (!lurb) {
4439         return -TARGET_ENOMEM;
4440     }
4441 
4442     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4443     if (!argptr) {
4444         g_free(lurb);
4445         return -TARGET_EFAULT;
4446     }
4447     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4448     unlock_user(argptr, arg, 0);
4449 
4450     lurb->target_urb_adr = arg;
4451     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4452 
4453     /* buffer space used depends on endpoint type so lock the entire buffer */
4454     /* control type urbs should check the buffer contents for true direction */
4455     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4456     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4457         lurb->host_urb.buffer_length, 1);
4458     if (lurb->target_buf_ptr == NULL) {
4459         g_free(lurb);
4460         return -TARGET_EFAULT;
4461     }
4462 
4463     /* update buffer pointer in host copy */
4464     lurb->host_urb.buffer = lurb->target_buf_ptr;
4465 
4466     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4467     if (is_error(ret)) {
4468         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4469         g_free(lurb);
4470     } else {
4471         urb_hashtable_insert(lurb);
4472     }
4473 
4474     return ret;
4475 }
4476 #endif /* CONFIG_USBFS */
4477 
4478 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4479                             int cmd, abi_long arg)
4480 {
4481     void *argptr;
4482     struct dm_ioctl *host_dm;
4483     abi_long guest_data;
4484     uint32_t guest_data_size;
4485     int target_size;
4486     const argtype *arg_type = ie->arg_type;
4487     abi_long ret;
4488     void *big_buf = NULL;
4489     char *host_data;
4490 
4491     arg_type++;
4492     target_size = thunk_type_size(arg_type, 0);
4493     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4494     if (!argptr) {
4495         ret = -TARGET_EFAULT;
4496         goto out;
4497     }
4498     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4499     unlock_user(argptr, arg, 0);
4500 
4501     /* buf_temp is too small, so fetch things into a bigger buffer */
4502     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4503     memcpy(big_buf, buf_temp, target_size);
4504     buf_temp = big_buf;
4505     host_dm = big_buf;
4506 
4507     guest_data = arg + host_dm->data_start;
4508     if ((guest_data - arg) < 0) {
4509         ret = -TARGET_EINVAL;
4510         goto out;
4511     }
4512     guest_data_size = host_dm->data_size - host_dm->data_start;
4513     host_data = (char*)host_dm + host_dm->data_start;
4514 
4515     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4516     if (!argptr) {
4517         ret = -TARGET_EFAULT;
4518         goto out;
4519     }
4520 
4521     switch (ie->host_cmd) {
4522     case DM_REMOVE_ALL:
4523     case DM_LIST_DEVICES:
4524     case DM_DEV_CREATE:
4525     case DM_DEV_REMOVE:
4526     case DM_DEV_SUSPEND:
4527     case DM_DEV_STATUS:
4528     case DM_DEV_WAIT:
4529     case DM_TABLE_STATUS:
4530     case DM_TABLE_CLEAR:
4531     case DM_TABLE_DEPS:
4532     case DM_LIST_VERSIONS:
4533         /* no input data */
4534         break;
4535     case DM_DEV_RENAME:
4536     case DM_DEV_SET_GEOMETRY:
4537         /* data contains only strings */
4538         memcpy(host_data, argptr, guest_data_size);
4539         break;
4540     case DM_TARGET_MSG:
4541         memcpy(host_data, argptr, guest_data_size);
4542         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4543         break;
4544     case DM_TABLE_LOAD:
4545     {
4546         void *gspec = argptr;
4547         void *cur_data = host_data;
4548         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4549         int spec_size = thunk_type_size(arg_type, 0);
4550         int i;
4551 
4552         for (i = 0; i < host_dm->target_count; i++) {
4553             struct dm_target_spec *spec = cur_data;
4554             uint32_t next;
4555             int slen;
4556 
4557             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4558             slen = strlen((char*)gspec + spec_size) + 1;
4559             next = spec->next;
4560             spec->next = sizeof(*spec) + slen;
4561             strcpy((char*)&spec[1], gspec + spec_size);
4562             gspec += next;
4563             cur_data += spec->next;
4564         }
4565         break;
4566     }
4567     default:
4568         ret = -TARGET_EINVAL;
4569         unlock_user(argptr, guest_data, 0);
4570         goto out;
4571     }
4572     unlock_user(argptr, guest_data, 0);
4573 
4574     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4575     if (!is_error(ret)) {
4576         guest_data = arg + host_dm->data_start;
4577         guest_data_size = host_dm->data_size - host_dm->data_start;
4578         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4579         switch (ie->host_cmd) {
4580         case DM_REMOVE_ALL:
4581         case DM_DEV_CREATE:
4582         case DM_DEV_REMOVE:
4583         case DM_DEV_RENAME:
4584         case DM_DEV_SUSPEND:
4585         case DM_DEV_STATUS:
4586         case DM_TABLE_LOAD:
4587         case DM_TABLE_CLEAR:
4588         case DM_TARGET_MSG:
4589         case DM_DEV_SET_GEOMETRY:
4590             /* no return data */
4591             break;
4592         case DM_LIST_DEVICES:
4593         {
4594             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4595             uint32_t remaining_data = guest_data_size;
4596             void *cur_data = argptr;
4597             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4598             int nl_size = 12; /* can't use thunk_size due to alignment */
4599 
4600             while (1) {
4601                 uint32_t next = nl->next;
4602                 if (next) {
4603                     nl->next = nl_size + (strlen(nl->name) + 1);
4604                 }
4605                 if (remaining_data < nl->next) {
4606                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4607                     break;
4608                 }
4609                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4610                 strcpy(cur_data + nl_size, nl->name);
4611                 cur_data += nl->next;
4612                 remaining_data -= nl->next;
4613                 if (!next) {
4614                     break;
4615                 }
4616                 nl = (void*)nl + next;
4617             }
4618             break;
4619         }
4620         case DM_DEV_WAIT:
4621         case DM_TABLE_STATUS:
4622         {
4623             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4624             void *cur_data = argptr;
4625             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4626             int spec_size = thunk_type_size(arg_type, 0);
4627             int i;
4628 
4629             for (i = 0; i < host_dm->target_count; i++) {
4630                 uint32_t next = spec->next;
4631                 int slen = strlen((char*)&spec[1]) + 1;
4632                 spec->next = (cur_data - argptr) + spec_size + slen;
4633                 if (guest_data_size < spec->next) {
4634                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4635                     break;
4636                 }
4637                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4638                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4639                 cur_data = argptr + spec->next;
4640                 spec = (void*)host_dm + host_dm->data_start + next;
4641             }
4642             break;
4643         }
4644         case DM_TABLE_DEPS:
4645         {
4646             void *hdata = (void*)host_dm + host_dm->data_start;
4647             int count = *(uint32_t*)hdata;
4648             uint64_t *hdev = hdata + 8;
4649             uint64_t *gdev = argptr + 8;
4650             int i;
4651 
4652             *(uint32_t*)argptr = tswap32(count);
4653             for (i = 0; i < count; i++) {
4654                 *gdev = tswap64(*hdev);
4655                 gdev++;
4656                 hdev++;
4657             }
4658             break;
4659         }
4660         case DM_LIST_VERSIONS:
4661         {
4662             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4663             uint32_t remaining_data = guest_data_size;
4664             void *cur_data = argptr;
4665             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4666             int vers_size = thunk_type_size(arg_type, 0);
4667 
4668             while (1) {
4669                 uint32_t next = vers->next;
4670                 if (next) {
4671                     vers->next = vers_size + (strlen(vers->name) + 1);
4672                 }
4673                 if (remaining_data < vers->next) {
4674                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4675                     break;
4676                 }
4677                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4678                 strcpy(cur_data + vers_size, vers->name);
4679                 cur_data += vers->next;
4680                 remaining_data -= vers->next;
4681                 if (!next) {
4682                     break;
4683                 }
4684                 vers = (void*)vers + next;
4685             }
4686             break;
4687         }
4688         default:
4689             unlock_user(argptr, guest_data, 0);
4690             ret = -TARGET_EINVAL;
4691             goto out;
4692         }
4693         unlock_user(argptr, guest_data, guest_data_size);
4694 
4695         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4696         if (!argptr) {
4697             ret = -TARGET_EFAULT;
4698             goto out;
4699         }
4700         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4701         unlock_user(argptr, arg, target_size);
4702     }
4703 out:
4704     g_free(big_buf);
4705     return ret;
4706 }
4707 
4708 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4709                                int cmd, abi_long arg)
4710 {
4711     void *argptr;
4712     int target_size;
4713     const argtype *arg_type = ie->arg_type;
4714     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4715     abi_long ret;
4716 
4717     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4718     struct blkpg_partition host_part;
4719 
4720     /* Read and convert blkpg */
4721     arg_type++;
4722     target_size = thunk_type_size(arg_type, 0);
4723     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4724     if (!argptr) {
4725         ret = -TARGET_EFAULT;
4726         goto out;
4727     }
4728     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4729     unlock_user(argptr, arg, 0);
4730 
4731     switch (host_blkpg->op) {
4732     case BLKPG_ADD_PARTITION:
4733     case BLKPG_DEL_PARTITION:
4734         /* payload is struct blkpg_partition */
4735         break;
4736     default:
4737         /* Unknown opcode */
4738         ret = -TARGET_EINVAL;
4739         goto out;
4740     }
4741 
4742     /* Read and convert blkpg->data */
4743     arg = (abi_long)(uintptr_t)host_blkpg->data;
4744     target_size = thunk_type_size(part_arg_type, 0);
4745     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4746     if (!argptr) {
4747         ret = -TARGET_EFAULT;
4748         goto out;
4749     }
4750     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4751     unlock_user(argptr, arg, 0);
4752 
4753     /* Swizzle the data pointer to our local copy and call! */
4754     host_blkpg->data = &host_part;
4755     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4756 
4757 out:
4758     return ret;
4759 }
4760 
4761 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4762                                 int fd, int cmd, abi_long arg)
4763 {
4764     const argtype *arg_type = ie->arg_type;
4765     const StructEntry *se;
4766     const argtype *field_types;
4767     const int *dst_offsets, *src_offsets;
4768     int target_size;
4769     void *argptr;
4770     abi_ulong *target_rt_dev_ptr = NULL;
4771     unsigned long *host_rt_dev_ptr = NULL;
4772     abi_long ret;
4773     int i;
4774 
4775     assert(ie->access == IOC_W);
4776     assert(*arg_type == TYPE_PTR);
4777     arg_type++;
4778     assert(*arg_type == TYPE_STRUCT);
4779     target_size = thunk_type_size(arg_type, 0);
4780     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4781     if (!argptr) {
4782         return -TARGET_EFAULT;
4783     }
4784     arg_type++;
4785     assert(*arg_type == (int)STRUCT_rtentry);
4786     se = struct_entries + *arg_type++;
4787     assert(se->convert[0] == NULL);
4788     /* convert struct here to be able to catch rt_dev string */
4789     field_types = se->field_types;
4790     dst_offsets = se->field_offsets[THUNK_HOST];
4791     src_offsets = se->field_offsets[THUNK_TARGET];
4792     for (i = 0; i < se->nb_fields; i++) {
4793         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4794             assert(*field_types == TYPE_PTRVOID);
4795             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4796             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4797             if (*target_rt_dev_ptr != 0) {
4798                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4799                                                   tswapal(*target_rt_dev_ptr));
4800                 if (!*host_rt_dev_ptr) {
4801                     unlock_user(argptr, arg, 0);
4802                     return -TARGET_EFAULT;
4803                 }
4804             } else {
4805                 *host_rt_dev_ptr = 0;
4806             }
4807             field_types++;
4808             continue;
4809         }
4810         field_types = thunk_convert(buf_temp + dst_offsets[i],
4811                                     argptr + src_offsets[i],
4812                                     field_types, THUNK_HOST);
4813     }
4814     unlock_user(argptr, arg, 0);
4815 
4816     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4817 
4818     assert(host_rt_dev_ptr != NULL);
4819     assert(target_rt_dev_ptr != NULL);
4820     if (*host_rt_dev_ptr != 0) {
4821         unlock_user((void *)*host_rt_dev_ptr,
4822                     *target_rt_dev_ptr, 0);
4823     }
4824     return ret;
4825 }
4826 
4827 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4828                                      int fd, int cmd, abi_long arg)
4829 {
4830     int sig = target_to_host_signal(arg);
4831     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4832 }
4833 
4834 #ifdef TIOCGPTPEER
4835 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4836                                      int fd, int cmd, abi_long arg)
4837 {
4838     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4839     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4840 }
4841 #endif
4842 
4843 static IOCTLEntry ioctl_entries[] = {
4844 #define IOCTL(cmd, access, ...) \
4845     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4846 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4847     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4848 #define IOCTL_IGNORE(cmd) \
4849     { TARGET_ ## cmd, 0, #cmd },
4850 #include "ioctls.h"
4851     { 0, 0, },
4852 };
4853 
4854 /* ??? Implement proper locking for ioctls.  */
4855 /* do_ioctl() Must return target values and target errnos. */
4856 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4857 {
4858     const IOCTLEntry *ie;
4859     const argtype *arg_type;
4860     abi_long ret;
4861     uint8_t buf_temp[MAX_STRUCT_SIZE];
4862     int target_size;
4863     void *argptr;
4864 
4865     ie = ioctl_entries;
4866     for(;;) {
4867         if (ie->target_cmd == 0) {
4868             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4869             return -TARGET_ENOSYS;
4870         }
4871         if (ie->target_cmd == cmd)
4872             break;
4873         ie++;
4874     }
4875     arg_type = ie->arg_type;
4876     if (ie->do_ioctl) {
4877         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4878     } else if (!ie->host_cmd) {
4879         /* Some architectures define BSD ioctls in their headers
4880            that are not implemented in Linux.  */
4881         return -TARGET_ENOSYS;
4882     }
4883 
4884     switch(arg_type[0]) {
4885     case TYPE_NULL:
4886         /* no argument */
4887         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4888         break;
4889     case TYPE_PTRVOID:
4890     case TYPE_INT:
4891         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4892         break;
4893     case TYPE_PTR:
4894         arg_type++;
4895         target_size = thunk_type_size(arg_type, 0);
4896         switch(ie->access) {
4897         case IOC_R:
4898             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4899             if (!is_error(ret)) {
4900                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4901                 if (!argptr)
4902                     return -TARGET_EFAULT;
4903                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4904                 unlock_user(argptr, arg, target_size);
4905             }
4906             break;
4907         case IOC_W:
4908             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4909             if (!argptr)
4910                 return -TARGET_EFAULT;
4911             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4912             unlock_user(argptr, arg, 0);
4913             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4914             break;
4915         default:
4916         case IOC_RW:
4917             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4918             if (!argptr)
4919                 return -TARGET_EFAULT;
4920             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4921             unlock_user(argptr, arg, 0);
4922             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4923             if (!is_error(ret)) {
4924                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4925                 if (!argptr)
4926                     return -TARGET_EFAULT;
4927                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4928                 unlock_user(argptr, arg, target_size);
4929             }
4930             break;
4931         }
4932         break;
4933     default:
4934         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4935                  (long)cmd, arg_type[0]);
4936         ret = -TARGET_ENOSYS;
4937         break;
4938     }
4939     return ret;
4940 }
4941 
4942 static const bitmask_transtbl iflag_tbl[] = {
4943         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4944         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4945         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4946         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4947         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4948         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4949         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4950         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4951         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4952         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4953         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4954         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4955         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4956         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4957         { 0, 0, 0, 0 }
4958 };
4959 
4960 static const bitmask_transtbl oflag_tbl[] = {
4961 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4962 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4963 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4964 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4965 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4966 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4967 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4968 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4969 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4970 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4971 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4972 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4973 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4974 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4975 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4976 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4977 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4978 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4979 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4980 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4981 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4982 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4983 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4984 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4985 	{ 0, 0, 0, 0 }
4986 };
4987 
4988 static const bitmask_transtbl cflag_tbl[] = {
4989 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4990 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4991 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4992 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4993 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4994 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4995 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4996 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4997 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4998 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4999 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5000 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5001 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5002 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5003 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5004 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5005 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5006 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5007 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5008 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5009 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5010 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5011 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5012 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5013 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5014 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5015 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5016 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5017 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5018 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5019 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5020 	{ 0, 0, 0, 0 }
5021 };
5022 
5023 static const bitmask_transtbl lflag_tbl[] = {
5024 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5025 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5026 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5027 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5028 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5029 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5030 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5031 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5032 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5033 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5034 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5035 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5036 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5037 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5038 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5039 	{ 0, 0, 0, 0 }
5040 };
5041 
5042 static void target_to_host_termios (void *dst, const void *src)
5043 {
5044     struct host_termios *host = dst;
5045     const struct target_termios *target = src;
5046 
5047     host->c_iflag =
5048         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5049     host->c_oflag =
5050         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5051     host->c_cflag =
5052         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5053     host->c_lflag =
5054         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5055     host->c_line = target->c_line;
5056 
5057     memset(host->c_cc, 0, sizeof(host->c_cc));
5058     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5059     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5060     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5061     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5062     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5063     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5064     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5065     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5066     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5067     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5068     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5069     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5070     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5071     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5072     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5073     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5074     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5075 }
5076 
5077 static void host_to_target_termios (void *dst, const void *src)
5078 {
5079     struct target_termios *target = dst;
5080     const struct host_termios *host = src;
5081 
5082     target->c_iflag =
5083         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5084     target->c_oflag =
5085         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5086     target->c_cflag =
5087         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5088     target->c_lflag =
5089         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5090     target->c_line = host->c_line;
5091 
5092     memset(target->c_cc, 0, sizeof(target->c_cc));
5093     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5094     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5095     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5096     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5097     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5098     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5099     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5100     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5101     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5102     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5103     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5104     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5105     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5106     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5107     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5108     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5109     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5110 }
5111 
5112 static const StructEntry struct_termios_def = {
5113     .convert = { host_to_target_termios, target_to_host_termios },
5114     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5115     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5116 };
5117 
5118 static bitmask_transtbl mmap_flags_tbl[] = {
5119     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5120     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5121     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5122     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5123       MAP_ANONYMOUS, MAP_ANONYMOUS },
5124     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5125       MAP_GROWSDOWN, MAP_GROWSDOWN },
5126     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5127       MAP_DENYWRITE, MAP_DENYWRITE },
5128     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5129       MAP_EXECUTABLE, MAP_EXECUTABLE },
5130     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5131     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5132       MAP_NORESERVE, MAP_NORESERVE },
5133     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5134     /* MAP_STACK had been ignored by the kernel for quite some time.
5135        Recognize it for the target insofar as we do not want to pass
5136        it through to the host.  */
5137     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5138     { 0, 0, 0, 0 }
5139 };
5140 
5141 #if defined(TARGET_I386)
5142 
5143 /* NOTE: there is really one LDT for all the threads */
5144 static uint8_t *ldt_table;
5145 
5146 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5147 {
5148     int size;
5149     void *p;
5150 
5151     if (!ldt_table)
5152         return 0;
5153     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5154     if (size > bytecount)
5155         size = bytecount;
5156     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5157     if (!p)
5158         return -TARGET_EFAULT;
5159     /* ??? Should this by byteswapped?  */
5160     memcpy(p, ldt_table, size);
5161     unlock_user(p, ptr, size);
5162     return size;
5163 }
5164 
5165 /* XXX: add locking support */
5166 static abi_long write_ldt(CPUX86State *env,
5167                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5168 {
5169     struct target_modify_ldt_ldt_s ldt_info;
5170     struct target_modify_ldt_ldt_s *target_ldt_info;
5171     int seg_32bit, contents, read_exec_only, limit_in_pages;
5172     int seg_not_present, useable, lm;
5173     uint32_t *lp, entry_1, entry_2;
5174 
5175     if (bytecount != sizeof(ldt_info))
5176         return -TARGET_EINVAL;
5177     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5178         return -TARGET_EFAULT;
5179     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5180     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5181     ldt_info.limit = tswap32(target_ldt_info->limit);
5182     ldt_info.flags = tswap32(target_ldt_info->flags);
5183     unlock_user_struct(target_ldt_info, ptr, 0);
5184 
5185     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5186         return -TARGET_EINVAL;
5187     seg_32bit = ldt_info.flags & 1;
5188     contents = (ldt_info.flags >> 1) & 3;
5189     read_exec_only = (ldt_info.flags >> 3) & 1;
5190     limit_in_pages = (ldt_info.flags >> 4) & 1;
5191     seg_not_present = (ldt_info.flags >> 5) & 1;
5192     useable = (ldt_info.flags >> 6) & 1;
5193 #ifdef TARGET_ABI32
5194     lm = 0;
5195 #else
5196     lm = (ldt_info.flags >> 7) & 1;
5197 #endif
5198     if (contents == 3) {
5199         if (oldmode)
5200             return -TARGET_EINVAL;
5201         if (seg_not_present == 0)
5202             return -TARGET_EINVAL;
5203     }
5204     /* allocate the LDT */
5205     if (!ldt_table) {
5206         env->ldt.base = target_mmap(0,
5207                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5208                                     PROT_READ|PROT_WRITE,
5209                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5210         if (env->ldt.base == -1)
5211             return -TARGET_ENOMEM;
5212         memset(g2h(env->ldt.base), 0,
5213                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5214         env->ldt.limit = 0xffff;
5215         ldt_table = g2h(env->ldt.base);
5216     }
5217 
5218     /* NOTE: same code as Linux kernel */
5219     /* Allow LDTs to be cleared by the user. */
5220     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5221         if (oldmode ||
5222             (contents == 0		&&
5223              read_exec_only == 1	&&
5224              seg_32bit == 0		&&
5225              limit_in_pages == 0	&&
5226              seg_not_present == 1	&&
5227              useable == 0 )) {
5228             entry_1 = 0;
5229             entry_2 = 0;
5230             goto install;
5231         }
5232     }
5233 
5234     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5235         (ldt_info.limit & 0x0ffff);
5236     entry_2 = (ldt_info.base_addr & 0xff000000) |
5237         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5238         (ldt_info.limit & 0xf0000) |
5239         ((read_exec_only ^ 1) << 9) |
5240         (contents << 10) |
5241         ((seg_not_present ^ 1) << 15) |
5242         (seg_32bit << 22) |
5243         (limit_in_pages << 23) |
5244         (lm << 21) |
5245         0x7000;
5246     if (!oldmode)
5247         entry_2 |= (useable << 20);
5248 
5249     /* Install the new entry ...  */
5250 install:
5251     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5252     lp[0] = tswap32(entry_1);
5253     lp[1] = tswap32(entry_2);
5254     return 0;
5255 }
5256 
5257 /* specific and weird i386 syscalls */
5258 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5259                               unsigned long bytecount)
5260 {
5261     abi_long ret;
5262 
5263     switch (func) {
5264     case 0:
5265         ret = read_ldt(ptr, bytecount);
5266         break;
5267     case 1:
5268         ret = write_ldt(env, ptr, bytecount, 1);
5269         break;
5270     case 0x11:
5271         ret = write_ldt(env, ptr, bytecount, 0);
5272         break;
5273     default:
5274         ret = -TARGET_ENOSYS;
5275         break;
5276     }
5277     return ret;
5278 }
5279 
5280 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5281 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5282 {
5283     uint64_t *gdt_table = g2h(env->gdt.base);
5284     struct target_modify_ldt_ldt_s ldt_info;
5285     struct target_modify_ldt_ldt_s *target_ldt_info;
5286     int seg_32bit, contents, read_exec_only, limit_in_pages;
5287     int seg_not_present, useable, lm;
5288     uint32_t *lp, entry_1, entry_2;
5289     int i;
5290 
5291     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5292     if (!target_ldt_info)
5293         return -TARGET_EFAULT;
5294     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5295     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5296     ldt_info.limit = tswap32(target_ldt_info->limit);
5297     ldt_info.flags = tswap32(target_ldt_info->flags);
5298     if (ldt_info.entry_number == -1) {
5299         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5300             if (gdt_table[i] == 0) {
5301                 ldt_info.entry_number = i;
5302                 target_ldt_info->entry_number = tswap32(i);
5303                 break;
5304             }
5305         }
5306     }
5307     unlock_user_struct(target_ldt_info, ptr, 1);
5308 
5309     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5310         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5311            return -TARGET_EINVAL;
5312     seg_32bit = ldt_info.flags & 1;
5313     contents = (ldt_info.flags >> 1) & 3;
5314     read_exec_only = (ldt_info.flags >> 3) & 1;
5315     limit_in_pages = (ldt_info.flags >> 4) & 1;
5316     seg_not_present = (ldt_info.flags >> 5) & 1;
5317     useable = (ldt_info.flags >> 6) & 1;
5318 #ifdef TARGET_ABI32
5319     lm = 0;
5320 #else
5321     lm = (ldt_info.flags >> 7) & 1;
5322 #endif
5323 
5324     if (contents == 3) {
5325         if (seg_not_present == 0)
5326             return -TARGET_EINVAL;
5327     }
5328 
5329     /* NOTE: same code as Linux kernel */
5330     /* Allow LDTs to be cleared by the user. */
5331     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5332         if ((contents == 0             &&
5333              read_exec_only == 1       &&
5334              seg_32bit == 0            &&
5335              limit_in_pages == 0       &&
5336              seg_not_present == 1      &&
5337              useable == 0 )) {
5338             entry_1 = 0;
5339             entry_2 = 0;
5340             goto install;
5341         }
5342     }
5343 
5344     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5345         (ldt_info.limit & 0x0ffff);
5346     entry_2 = (ldt_info.base_addr & 0xff000000) |
5347         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5348         (ldt_info.limit & 0xf0000) |
5349         ((read_exec_only ^ 1) << 9) |
5350         (contents << 10) |
5351         ((seg_not_present ^ 1) << 15) |
5352         (seg_32bit << 22) |
5353         (limit_in_pages << 23) |
5354         (useable << 20) |
5355         (lm << 21) |
5356         0x7000;
5357 
5358     /* Install the new entry ...  */
5359 install:
5360     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5361     lp[0] = tswap32(entry_1);
5362     lp[1] = tswap32(entry_2);
5363     return 0;
5364 }
5365 
5366 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5367 {
5368     struct target_modify_ldt_ldt_s *target_ldt_info;
5369     uint64_t *gdt_table = g2h(env->gdt.base);
5370     uint32_t base_addr, limit, flags;
5371     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5372     int seg_not_present, useable, lm;
5373     uint32_t *lp, entry_1, entry_2;
5374 
5375     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5376     if (!target_ldt_info)
5377         return -TARGET_EFAULT;
5378     idx = tswap32(target_ldt_info->entry_number);
5379     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5380         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5381         unlock_user_struct(target_ldt_info, ptr, 1);
5382         return -TARGET_EINVAL;
5383     }
5384     lp = (uint32_t *)(gdt_table + idx);
5385     entry_1 = tswap32(lp[0]);
5386     entry_2 = tswap32(lp[1]);
5387 
5388     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5389     contents = (entry_2 >> 10) & 3;
5390     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5391     seg_32bit = (entry_2 >> 22) & 1;
5392     limit_in_pages = (entry_2 >> 23) & 1;
5393     useable = (entry_2 >> 20) & 1;
5394 #ifdef TARGET_ABI32
5395     lm = 0;
5396 #else
5397     lm = (entry_2 >> 21) & 1;
5398 #endif
5399     flags = (seg_32bit << 0) | (contents << 1) |
5400         (read_exec_only << 3) | (limit_in_pages << 4) |
5401         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5402     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5403     base_addr = (entry_1 >> 16) |
5404         (entry_2 & 0xff000000) |
5405         ((entry_2 & 0xff) << 16);
5406     target_ldt_info->base_addr = tswapal(base_addr);
5407     target_ldt_info->limit = tswap32(limit);
5408     target_ldt_info->flags = tswap32(flags);
5409     unlock_user_struct(target_ldt_info, ptr, 1);
5410     return 0;
5411 }
5412 #endif /* TARGET_I386 && TARGET_ABI32 */
5413 
5414 #ifndef TARGET_ABI32
5415 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5416 {
5417     abi_long ret = 0;
5418     abi_ulong val;
5419     int idx;
5420 
5421     switch(code) {
5422     case TARGET_ARCH_SET_GS:
5423     case TARGET_ARCH_SET_FS:
5424         if (code == TARGET_ARCH_SET_GS)
5425             idx = R_GS;
5426         else
5427             idx = R_FS;
5428         cpu_x86_load_seg(env, idx, 0);
5429         env->segs[idx].base = addr;
5430         break;
5431     case TARGET_ARCH_GET_GS:
5432     case TARGET_ARCH_GET_FS:
5433         if (code == TARGET_ARCH_GET_GS)
5434             idx = R_GS;
5435         else
5436             idx = R_FS;
5437         val = env->segs[idx].base;
5438         if (put_user(val, addr, abi_ulong))
5439             ret = -TARGET_EFAULT;
5440         break;
5441     default:
5442         ret = -TARGET_EINVAL;
5443         break;
5444     }
5445     return ret;
5446 }
5447 #endif
5448 
5449 #endif /* defined(TARGET_I386) */
5450 
5451 #define NEW_STACK_SIZE 0x40000
5452 
5453 
5454 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5455 typedef struct {
5456     CPUArchState *env;
5457     pthread_mutex_t mutex;
5458     pthread_cond_t cond;
5459     pthread_t thread;
5460     uint32_t tid;
5461     abi_ulong child_tidptr;
5462     abi_ulong parent_tidptr;
5463     sigset_t sigmask;
5464 } new_thread_info;
5465 
5466 static void *clone_func(void *arg)
5467 {
5468     new_thread_info *info = arg;
5469     CPUArchState *env;
5470     CPUState *cpu;
5471     TaskState *ts;
5472 
5473     rcu_register_thread();
5474     tcg_register_thread();
5475     env = info->env;
5476     cpu = ENV_GET_CPU(env);
5477     thread_cpu = cpu;
5478     ts = (TaskState *)cpu->opaque;
5479     info->tid = sys_gettid();
5480     task_settid(ts);
5481     if (info->child_tidptr)
5482         put_user_u32(info->tid, info->child_tidptr);
5483     if (info->parent_tidptr)
5484         put_user_u32(info->tid, info->parent_tidptr);
5485     /* Enable signals.  */
5486     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5487     /* Signal to the parent that we're ready.  */
5488     pthread_mutex_lock(&info->mutex);
5489     pthread_cond_broadcast(&info->cond);
5490     pthread_mutex_unlock(&info->mutex);
5491     /* Wait until the parent has finished initializing the tls state.  */
5492     pthread_mutex_lock(&clone_lock);
5493     pthread_mutex_unlock(&clone_lock);
5494     cpu_loop(env);
5495     /* never exits */
5496     return NULL;
5497 }
5498 
5499 /* do_fork() Must return host values and target errnos (unlike most
5500    do_*() functions). */
5501 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5502                    abi_ulong parent_tidptr, target_ulong newtls,
5503                    abi_ulong child_tidptr)
5504 {
5505     CPUState *cpu = ENV_GET_CPU(env);
5506     int ret;
5507     TaskState *ts;
5508     CPUState *new_cpu;
5509     CPUArchState *new_env;
5510     sigset_t sigmask;
5511 
5512     flags &= ~CLONE_IGNORED_FLAGS;
5513 
5514     /* Emulate vfork() with fork() */
5515     if (flags & CLONE_VFORK)
5516         flags &= ~(CLONE_VFORK | CLONE_VM);
5517 
5518     if (flags & CLONE_VM) {
5519         TaskState *parent_ts = (TaskState *)cpu->opaque;
5520         new_thread_info info;
5521         pthread_attr_t attr;
5522 
5523         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5524             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5525             return -TARGET_EINVAL;
5526         }
5527 
5528         ts = g_new0(TaskState, 1);
5529         init_task_state(ts);
5530 
5531         /* Grab a mutex so that thread setup appears atomic.  */
5532         pthread_mutex_lock(&clone_lock);
5533 
5534         /* we create a new CPU instance. */
5535         new_env = cpu_copy(env);
5536         /* Init regs that differ from the parent.  */
5537         cpu_clone_regs(new_env, newsp);
5538         new_cpu = ENV_GET_CPU(new_env);
5539         new_cpu->opaque = ts;
5540         ts->bprm = parent_ts->bprm;
5541         ts->info = parent_ts->info;
5542         ts->signal_mask = parent_ts->signal_mask;
5543 
5544         if (flags & CLONE_CHILD_CLEARTID) {
5545             ts->child_tidptr = child_tidptr;
5546         }
5547 
5548         if (flags & CLONE_SETTLS) {
5549             cpu_set_tls (new_env, newtls);
5550         }
5551 
5552         memset(&info, 0, sizeof(info));
5553         pthread_mutex_init(&info.mutex, NULL);
5554         pthread_mutex_lock(&info.mutex);
5555         pthread_cond_init(&info.cond, NULL);
5556         info.env = new_env;
5557         if (flags & CLONE_CHILD_SETTID) {
5558             info.child_tidptr = child_tidptr;
5559         }
5560         if (flags & CLONE_PARENT_SETTID) {
5561             info.parent_tidptr = parent_tidptr;
5562         }
5563 
5564         ret = pthread_attr_init(&attr);
5565         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5566         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5567         /* It is not safe to deliver signals until the child has finished
5568            initializing, so temporarily block all signals.  */
5569         sigfillset(&sigmask);
5570         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5571 
5572         /* If this is our first additional thread, we need to ensure we
5573          * generate code for parallel execution and flush old translations.
5574          */
5575         if (!parallel_cpus) {
5576             parallel_cpus = true;
5577             tb_flush(cpu);
5578         }
5579 
5580         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5581         /* TODO: Free new CPU state if thread creation failed.  */
5582 
5583         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5584         pthread_attr_destroy(&attr);
5585         if (ret == 0) {
5586             /* Wait for the child to initialize.  */
5587             pthread_cond_wait(&info.cond, &info.mutex);
5588             ret = info.tid;
5589         } else {
5590             ret = -1;
5591         }
5592         pthread_mutex_unlock(&info.mutex);
5593         pthread_cond_destroy(&info.cond);
5594         pthread_mutex_destroy(&info.mutex);
5595         pthread_mutex_unlock(&clone_lock);
5596     } else {
5597         /* if no CLONE_VM, we consider it is a fork */
5598         if (flags & CLONE_INVALID_FORK_FLAGS) {
5599             return -TARGET_EINVAL;
5600         }
5601 
5602         /* We can't support custom termination signals */
5603         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5604             return -TARGET_EINVAL;
5605         }
5606 
5607         if (block_signals()) {
5608             return -TARGET_ERESTARTSYS;
5609         }
5610 
5611         fork_start();
5612         ret = fork();
5613         if (ret == 0) {
5614             /* Child Process.  */
5615             cpu_clone_regs(env, newsp);
5616             fork_end(1);
5617             /* There is a race condition here.  The parent process could
5618                theoretically read the TID in the child process before the child
5619                tid is set.  This would require using either ptrace
5620                (not implemented) or having *_tidptr to point at a shared memory
5621                mapping.  We can't repeat the spinlock hack used above because
5622                the child process gets its own copy of the lock.  */
5623             if (flags & CLONE_CHILD_SETTID)
5624                 put_user_u32(sys_gettid(), child_tidptr);
5625             if (flags & CLONE_PARENT_SETTID)
5626                 put_user_u32(sys_gettid(), parent_tidptr);
5627             ts = (TaskState *)cpu->opaque;
5628             if (flags & CLONE_SETTLS)
5629                 cpu_set_tls (env, newtls);
5630             if (flags & CLONE_CHILD_CLEARTID)
5631                 ts->child_tidptr = child_tidptr;
5632         } else {
5633             fork_end(0);
5634         }
5635     }
5636     return ret;
5637 }
5638 
5639 /* warning : doesn't handle linux specific flags... */
5640 static int target_to_host_fcntl_cmd(int cmd)
5641 {
5642     int ret;
5643 
5644     switch(cmd) {
5645     case TARGET_F_DUPFD:
5646     case TARGET_F_GETFD:
5647     case TARGET_F_SETFD:
5648     case TARGET_F_GETFL:
5649     case TARGET_F_SETFL:
5650         ret = cmd;
5651         break;
5652     case TARGET_F_GETLK:
5653         ret = F_GETLK64;
5654         break;
5655     case TARGET_F_SETLK:
5656         ret = F_SETLK64;
5657         break;
5658     case TARGET_F_SETLKW:
5659         ret = F_SETLKW64;
5660         break;
5661     case TARGET_F_GETOWN:
5662         ret = F_GETOWN;
5663         break;
5664     case TARGET_F_SETOWN:
5665         ret = F_SETOWN;
5666         break;
5667     case TARGET_F_GETSIG:
5668         ret = F_GETSIG;
5669         break;
5670     case TARGET_F_SETSIG:
5671         ret = F_SETSIG;
5672         break;
5673 #if TARGET_ABI_BITS == 32
5674     case TARGET_F_GETLK64:
5675         ret = F_GETLK64;
5676         break;
5677     case TARGET_F_SETLK64:
5678         ret = F_SETLK64;
5679         break;
5680     case TARGET_F_SETLKW64:
5681         ret = F_SETLKW64;
5682         break;
5683 #endif
5684     case TARGET_F_SETLEASE:
5685         ret = F_SETLEASE;
5686         break;
5687     case TARGET_F_GETLEASE:
5688         ret = F_GETLEASE;
5689         break;
5690 #ifdef F_DUPFD_CLOEXEC
5691     case TARGET_F_DUPFD_CLOEXEC:
5692         ret = F_DUPFD_CLOEXEC;
5693         break;
5694 #endif
5695     case TARGET_F_NOTIFY:
5696         ret = F_NOTIFY;
5697         break;
5698 #ifdef F_GETOWN_EX
5699     case TARGET_F_GETOWN_EX:
5700         ret = F_GETOWN_EX;
5701         break;
5702 #endif
5703 #ifdef F_SETOWN_EX
5704     case TARGET_F_SETOWN_EX:
5705         ret = F_SETOWN_EX;
5706         break;
5707 #endif
5708 #ifdef F_SETPIPE_SZ
5709     case TARGET_F_SETPIPE_SZ:
5710         ret = F_SETPIPE_SZ;
5711         break;
5712     case TARGET_F_GETPIPE_SZ:
5713         ret = F_GETPIPE_SZ;
5714         break;
5715 #endif
5716     default:
5717         ret = -TARGET_EINVAL;
5718         break;
5719     }
5720 
5721 #if defined(__powerpc64__)
5722     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5723      * is not supported by kernel. The glibc fcntl call actually adjusts
5724      * them to 5, 6 and 7 before making the syscall(). Since we make the
5725      * syscall directly, adjust to what is supported by the kernel.
5726      */
5727     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5728         ret -= F_GETLK64 - 5;
5729     }
5730 #endif
5731 
5732     return ret;
5733 }
5734 
5735 #define FLOCK_TRANSTBL \
5736     switch (type) { \
5737     TRANSTBL_CONVERT(F_RDLCK); \
5738     TRANSTBL_CONVERT(F_WRLCK); \
5739     TRANSTBL_CONVERT(F_UNLCK); \
5740     TRANSTBL_CONVERT(F_EXLCK); \
5741     TRANSTBL_CONVERT(F_SHLCK); \
5742     }
5743 
5744 static int target_to_host_flock(int type)
5745 {
5746 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5747     FLOCK_TRANSTBL
5748 #undef  TRANSTBL_CONVERT
5749     return -TARGET_EINVAL;
5750 }
5751 
5752 static int host_to_target_flock(int type)
5753 {
5754 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5755     FLOCK_TRANSTBL
5756 #undef  TRANSTBL_CONVERT
5757     /* if we don't know how to convert the value coming
5758      * from the host we copy to the target field as-is
5759      */
5760     return type;
5761 }
5762 
5763 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5764                                             abi_ulong target_flock_addr)
5765 {
5766     struct target_flock *target_fl;
5767     int l_type;
5768 
5769     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5770         return -TARGET_EFAULT;
5771     }
5772 
5773     __get_user(l_type, &target_fl->l_type);
5774     l_type = target_to_host_flock(l_type);
5775     if (l_type < 0) {
5776         return l_type;
5777     }
5778     fl->l_type = l_type;
5779     __get_user(fl->l_whence, &target_fl->l_whence);
5780     __get_user(fl->l_start, &target_fl->l_start);
5781     __get_user(fl->l_len, &target_fl->l_len);
5782     __get_user(fl->l_pid, &target_fl->l_pid);
5783     unlock_user_struct(target_fl, target_flock_addr, 0);
5784     return 0;
5785 }
5786 
5787 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5788                                           const struct flock64 *fl)
5789 {
5790     struct target_flock *target_fl;
5791     short l_type;
5792 
5793     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5794         return -TARGET_EFAULT;
5795     }
5796 
5797     l_type = host_to_target_flock(fl->l_type);
5798     __put_user(l_type, &target_fl->l_type);
5799     __put_user(fl->l_whence, &target_fl->l_whence);
5800     __put_user(fl->l_start, &target_fl->l_start);
5801     __put_user(fl->l_len, &target_fl->l_len);
5802     __put_user(fl->l_pid, &target_fl->l_pid);
5803     unlock_user_struct(target_fl, target_flock_addr, 1);
5804     return 0;
5805 }
5806 
5807 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5808 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5809 
5810 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5811 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5812                                                    abi_ulong target_flock_addr)
5813 {
5814     struct target_oabi_flock64 *target_fl;
5815     int l_type;
5816 
5817     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5818         return -TARGET_EFAULT;
5819     }
5820 
5821     __get_user(l_type, &target_fl->l_type);
5822     l_type = target_to_host_flock(l_type);
5823     if (l_type < 0) {
5824         return l_type;
5825     }
5826     fl->l_type = l_type;
5827     __get_user(fl->l_whence, &target_fl->l_whence);
5828     __get_user(fl->l_start, &target_fl->l_start);
5829     __get_user(fl->l_len, &target_fl->l_len);
5830     __get_user(fl->l_pid, &target_fl->l_pid);
5831     unlock_user_struct(target_fl, target_flock_addr, 0);
5832     return 0;
5833 }
5834 
5835 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5836                                                  const struct flock64 *fl)
5837 {
5838     struct target_oabi_flock64 *target_fl;
5839     short l_type;
5840 
5841     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5842         return -TARGET_EFAULT;
5843     }
5844 
5845     l_type = host_to_target_flock(fl->l_type);
5846     __put_user(l_type, &target_fl->l_type);
5847     __put_user(fl->l_whence, &target_fl->l_whence);
5848     __put_user(fl->l_start, &target_fl->l_start);
5849     __put_user(fl->l_len, &target_fl->l_len);
5850     __put_user(fl->l_pid, &target_fl->l_pid);
5851     unlock_user_struct(target_fl, target_flock_addr, 1);
5852     return 0;
5853 }
5854 #endif
5855 
5856 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5857                                               abi_ulong target_flock_addr)
5858 {
5859     struct target_flock64 *target_fl;
5860     int l_type;
5861 
5862     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5863         return -TARGET_EFAULT;
5864     }
5865 
5866     __get_user(l_type, &target_fl->l_type);
5867     l_type = target_to_host_flock(l_type);
5868     if (l_type < 0) {
5869         return l_type;
5870     }
5871     fl->l_type = l_type;
5872     __get_user(fl->l_whence, &target_fl->l_whence);
5873     __get_user(fl->l_start, &target_fl->l_start);
5874     __get_user(fl->l_len, &target_fl->l_len);
5875     __get_user(fl->l_pid, &target_fl->l_pid);
5876     unlock_user_struct(target_fl, target_flock_addr, 0);
5877     return 0;
5878 }
5879 
5880 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5881                                             const struct flock64 *fl)
5882 {
5883     struct target_flock64 *target_fl;
5884     short l_type;
5885 
5886     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5887         return -TARGET_EFAULT;
5888     }
5889 
5890     l_type = host_to_target_flock(fl->l_type);
5891     __put_user(l_type, &target_fl->l_type);
5892     __put_user(fl->l_whence, &target_fl->l_whence);
5893     __put_user(fl->l_start, &target_fl->l_start);
5894     __put_user(fl->l_len, &target_fl->l_len);
5895     __put_user(fl->l_pid, &target_fl->l_pid);
5896     unlock_user_struct(target_fl, target_flock_addr, 1);
5897     return 0;
5898 }
5899 
5900 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5901 {
5902     struct flock64 fl64;
5903 #ifdef F_GETOWN_EX
5904     struct f_owner_ex fox;
5905     struct target_f_owner_ex *target_fox;
5906 #endif
5907     abi_long ret;
5908     int host_cmd = target_to_host_fcntl_cmd(cmd);
5909 
5910     if (host_cmd == -TARGET_EINVAL)
5911 	    return host_cmd;
5912 
5913     switch(cmd) {
5914     case TARGET_F_GETLK:
5915         ret = copy_from_user_flock(&fl64, arg);
5916         if (ret) {
5917             return ret;
5918         }
5919         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5920         if (ret == 0) {
5921             ret = copy_to_user_flock(arg, &fl64);
5922         }
5923         break;
5924 
5925     case TARGET_F_SETLK:
5926     case TARGET_F_SETLKW:
5927         ret = copy_from_user_flock(&fl64, arg);
5928         if (ret) {
5929             return ret;
5930         }
5931         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5932         break;
5933 
5934     case TARGET_F_GETLK64:
5935         ret = copy_from_user_flock64(&fl64, arg);
5936         if (ret) {
5937             return ret;
5938         }
5939         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5940         if (ret == 0) {
5941             ret = copy_to_user_flock64(arg, &fl64);
5942         }
5943         break;
5944     case TARGET_F_SETLK64:
5945     case TARGET_F_SETLKW64:
5946         ret = copy_from_user_flock64(&fl64, arg);
5947         if (ret) {
5948             return ret;
5949         }
5950         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5951         break;
5952 
5953     case TARGET_F_GETFL:
5954         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5955         if (ret >= 0) {
5956             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5957         }
5958         break;
5959 
5960     case TARGET_F_SETFL:
5961         ret = get_errno(safe_fcntl(fd, host_cmd,
5962                                    target_to_host_bitmask(arg,
5963                                                           fcntl_flags_tbl)));
5964         break;
5965 
5966 #ifdef F_GETOWN_EX
5967     case TARGET_F_GETOWN_EX:
5968         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5969         if (ret >= 0) {
5970             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5971                 return -TARGET_EFAULT;
5972             target_fox->type = tswap32(fox.type);
5973             target_fox->pid = tswap32(fox.pid);
5974             unlock_user_struct(target_fox, arg, 1);
5975         }
5976         break;
5977 #endif
5978 
5979 #ifdef F_SETOWN_EX
5980     case TARGET_F_SETOWN_EX:
5981         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5982             return -TARGET_EFAULT;
5983         fox.type = tswap32(target_fox->type);
5984         fox.pid = tswap32(target_fox->pid);
5985         unlock_user_struct(target_fox, arg, 0);
5986         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5987         break;
5988 #endif
5989 
5990     case TARGET_F_SETOWN:
5991     case TARGET_F_GETOWN:
5992     case TARGET_F_SETSIG:
5993     case TARGET_F_GETSIG:
5994     case TARGET_F_SETLEASE:
5995     case TARGET_F_GETLEASE:
5996     case TARGET_F_SETPIPE_SZ:
5997     case TARGET_F_GETPIPE_SZ:
5998         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5999         break;
6000 
6001     default:
6002         ret = get_errno(safe_fcntl(fd, cmd, arg));
6003         break;
6004     }
6005     return ret;
6006 }
6007 
6008 #ifdef USE_UID16
6009 
6010 static inline int high2lowuid(int uid)
6011 {
6012     if (uid > 65535)
6013         return 65534;
6014     else
6015         return uid;
6016 }
6017 
6018 static inline int high2lowgid(int gid)
6019 {
6020     if (gid > 65535)
6021         return 65534;
6022     else
6023         return gid;
6024 }
6025 
6026 static inline int low2highuid(int uid)
6027 {
6028     if ((int16_t)uid == -1)
6029         return -1;
6030     else
6031         return uid;
6032 }
6033 
6034 static inline int low2highgid(int gid)
6035 {
6036     if ((int16_t)gid == -1)
6037         return -1;
6038     else
6039         return gid;
6040 }
6041 static inline int tswapid(int id)
6042 {
6043     return tswap16(id);
6044 }
6045 
6046 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6047 
6048 #else /* !USE_UID16 */
6049 static inline int high2lowuid(int uid)
6050 {
6051     return uid;
6052 }
6053 static inline int high2lowgid(int gid)
6054 {
6055     return gid;
6056 }
6057 static inline int low2highuid(int uid)
6058 {
6059     return uid;
6060 }
6061 static inline int low2highgid(int gid)
6062 {
6063     return gid;
6064 }
6065 static inline int tswapid(int id)
6066 {
6067     return tswap32(id);
6068 }
6069 
6070 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6071 
6072 #endif /* USE_UID16 */
6073 
6074 /* We must do direct syscalls for setting UID/GID, because we want to
6075  * implement the Linux system call semantics of "change only for this thread",
6076  * not the libc/POSIX semantics of "change for all threads in process".
6077  * (See http://ewontfix.com/17/ for more details.)
6078  * We use the 32-bit version of the syscalls if present; if it is not
6079  * then either the host architecture supports 32-bit UIDs natively with
6080  * the standard syscall, or the 16-bit UID is the best we can do.
6081  */
6082 #ifdef __NR_setuid32
6083 #define __NR_sys_setuid __NR_setuid32
6084 #else
6085 #define __NR_sys_setuid __NR_setuid
6086 #endif
6087 #ifdef __NR_setgid32
6088 #define __NR_sys_setgid __NR_setgid32
6089 #else
6090 #define __NR_sys_setgid __NR_setgid
6091 #endif
6092 #ifdef __NR_setresuid32
6093 #define __NR_sys_setresuid __NR_setresuid32
6094 #else
6095 #define __NR_sys_setresuid __NR_setresuid
6096 #endif
6097 #ifdef __NR_setresgid32
6098 #define __NR_sys_setresgid __NR_setresgid32
6099 #else
6100 #define __NR_sys_setresgid __NR_setresgid
6101 #endif
6102 
6103 _syscall1(int, sys_setuid, uid_t, uid)
6104 _syscall1(int, sys_setgid, gid_t, gid)
6105 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6106 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6107 
6108 void syscall_init(void)
6109 {
6110     IOCTLEntry *ie;
6111     const argtype *arg_type;
6112     int size;
6113     int i;
6114 
6115     thunk_init(STRUCT_MAX);
6116 
6117 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6118 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6119 #include "syscall_types.h"
6120 #undef STRUCT
6121 #undef STRUCT_SPECIAL
6122 
6123     /* Build target_to_host_errno_table[] table from
6124      * host_to_target_errno_table[]. */
6125     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6126         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6127     }
6128 
6129     /* we patch the ioctl size if necessary. We rely on the fact that
6130        no ioctl has all the bits at '1' in the size field */
6131     ie = ioctl_entries;
6132     while (ie->target_cmd != 0) {
6133         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6134             TARGET_IOC_SIZEMASK) {
6135             arg_type = ie->arg_type;
6136             if (arg_type[0] != TYPE_PTR) {
6137                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6138                         ie->target_cmd);
6139                 exit(1);
6140             }
6141             arg_type++;
6142             size = thunk_type_size(arg_type, 0);
6143             ie->target_cmd = (ie->target_cmd &
6144                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6145                 (size << TARGET_IOC_SIZESHIFT);
6146         }
6147 
6148         /* automatic consistency check if same arch */
6149 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6150     (defined(__x86_64__) && defined(TARGET_X86_64))
6151         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6152             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6153                     ie->name, ie->target_cmd, ie->host_cmd);
6154         }
6155 #endif
6156         ie++;
6157     }
6158 }
6159 
6160 #if TARGET_ABI_BITS == 32
6161 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6162 {
6163 #ifdef TARGET_WORDS_BIGENDIAN
6164     return ((uint64_t)word0 << 32) | word1;
6165 #else
6166     return ((uint64_t)word1 << 32) | word0;
6167 #endif
6168 }
6169 #else /* TARGET_ABI_BITS == 32 */
6170 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6171 {
6172     return word0;
6173 }
6174 #endif /* TARGET_ABI_BITS != 32 */
6175 
6176 #ifdef TARGET_NR_truncate64
6177 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6178                                          abi_long arg2,
6179                                          abi_long arg3,
6180                                          abi_long arg4)
6181 {
6182     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6183         arg2 = arg3;
6184         arg3 = arg4;
6185     }
6186     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6187 }
6188 #endif
6189 
6190 #ifdef TARGET_NR_ftruncate64
6191 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6192                                           abi_long arg2,
6193                                           abi_long arg3,
6194                                           abi_long arg4)
6195 {
6196     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6197         arg2 = arg3;
6198         arg3 = arg4;
6199     }
6200     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6201 }
6202 #endif
6203 
6204 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6205                                                abi_ulong target_addr)
6206 {
6207     struct target_timespec *target_ts;
6208 
6209     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6210         return -TARGET_EFAULT;
6211     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6212     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6213     unlock_user_struct(target_ts, target_addr, 0);
6214     return 0;
6215 }
6216 
6217 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6218                                                struct timespec *host_ts)
6219 {
6220     struct target_timespec *target_ts;
6221 
6222     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6223         return -TARGET_EFAULT;
6224     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6225     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6226     unlock_user_struct(target_ts, target_addr, 1);
6227     return 0;
6228 }
6229 
6230 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6231                                                  abi_ulong target_addr)
6232 {
6233     struct target_itimerspec *target_itspec;
6234 
6235     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6236         return -TARGET_EFAULT;
6237     }
6238 
6239     host_itspec->it_interval.tv_sec =
6240                             tswapal(target_itspec->it_interval.tv_sec);
6241     host_itspec->it_interval.tv_nsec =
6242                             tswapal(target_itspec->it_interval.tv_nsec);
6243     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6244     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6245 
6246     unlock_user_struct(target_itspec, target_addr, 1);
6247     return 0;
6248 }
6249 
6250 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6251                                                struct itimerspec *host_its)
6252 {
6253     struct target_itimerspec *target_itspec;
6254 
6255     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6256         return -TARGET_EFAULT;
6257     }
6258 
6259     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6260     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6261 
6262     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6263     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6264 
6265     unlock_user_struct(target_itspec, target_addr, 0);
6266     return 0;
6267 }
6268 
6269 static inline abi_long target_to_host_timex(struct timex *host_tx,
6270                                             abi_long target_addr)
6271 {
6272     struct target_timex *target_tx;
6273 
6274     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6275         return -TARGET_EFAULT;
6276     }
6277 
6278     __get_user(host_tx->modes, &target_tx->modes);
6279     __get_user(host_tx->offset, &target_tx->offset);
6280     __get_user(host_tx->freq, &target_tx->freq);
6281     __get_user(host_tx->maxerror, &target_tx->maxerror);
6282     __get_user(host_tx->esterror, &target_tx->esterror);
6283     __get_user(host_tx->status, &target_tx->status);
6284     __get_user(host_tx->constant, &target_tx->constant);
6285     __get_user(host_tx->precision, &target_tx->precision);
6286     __get_user(host_tx->tolerance, &target_tx->tolerance);
6287     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6288     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6289     __get_user(host_tx->tick, &target_tx->tick);
6290     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6291     __get_user(host_tx->jitter, &target_tx->jitter);
6292     __get_user(host_tx->shift, &target_tx->shift);
6293     __get_user(host_tx->stabil, &target_tx->stabil);
6294     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6295     __get_user(host_tx->calcnt, &target_tx->calcnt);
6296     __get_user(host_tx->errcnt, &target_tx->errcnt);
6297     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6298     __get_user(host_tx->tai, &target_tx->tai);
6299 
6300     unlock_user_struct(target_tx, target_addr, 0);
6301     return 0;
6302 }
6303 
6304 static inline abi_long host_to_target_timex(abi_long target_addr,
6305                                             struct timex *host_tx)
6306 {
6307     struct target_timex *target_tx;
6308 
6309     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6310         return -TARGET_EFAULT;
6311     }
6312 
6313     __put_user(host_tx->modes, &target_tx->modes);
6314     __put_user(host_tx->offset, &target_tx->offset);
6315     __put_user(host_tx->freq, &target_tx->freq);
6316     __put_user(host_tx->maxerror, &target_tx->maxerror);
6317     __put_user(host_tx->esterror, &target_tx->esterror);
6318     __put_user(host_tx->status, &target_tx->status);
6319     __put_user(host_tx->constant, &target_tx->constant);
6320     __put_user(host_tx->precision, &target_tx->precision);
6321     __put_user(host_tx->tolerance, &target_tx->tolerance);
6322     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6323     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6324     __put_user(host_tx->tick, &target_tx->tick);
6325     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6326     __put_user(host_tx->jitter, &target_tx->jitter);
6327     __put_user(host_tx->shift, &target_tx->shift);
6328     __put_user(host_tx->stabil, &target_tx->stabil);
6329     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6330     __put_user(host_tx->calcnt, &target_tx->calcnt);
6331     __put_user(host_tx->errcnt, &target_tx->errcnt);
6332     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6333     __put_user(host_tx->tai, &target_tx->tai);
6334 
6335     unlock_user_struct(target_tx, target_addr, 1);
6336     return 0;
6337 }
6338 
6339 
6340 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6341                                                abi_ulong target_addr)
6342 {
6343     struct target_sigevent *target_sevp;
6344 
6345     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6346         return -TARGET_EFAULT;
6347     }
6348 
6349     /* This union is awkward on 64 bit systems because it has a 32 bit
6350      * integer and a pointer in it; we follow the conversion approach
6351      * used for handling sigval types in signal.c so the guest should get
6352      * the correct value back even if we did a 64 bit byteswap and it's
6353      * using the 32 bit integer.
6354      */
6355     host_sevp->sigev_value.sival_ptr =
6356         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6357     host_sevp->sigev_signo =
6358         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6359     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6360     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6361 
6362     unlock_user_struct(target_sevp, target_addr, 1);
6363     return 0;
6364 }
6365 
6366 #if defined(TARGET_NR_mlockall)
6367 static inline int target_to_host_mlockall_arg(int arg)
6368 {
6369     int result = 0;
6370 
6371     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6372         result |= MCL_CURRENT;
6373     }
6374     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6375         result |= MCL_FUTURE;
6376     }
6377     return result;
6378 }
6379 #endif
6380 
6381 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6382      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6383      defined(TARGET_NR_newfstatat))
6384 static inline abi_long host_to_target_stat64(void *cpu_env,
6385                                              abi_ulong target_addr,
6386                                              struct stat *host_st)
6387 {
6388 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6389     if (((CPUARMState *)cpu_env)->eabi) {
6390         struct target_eabi_stat64 *target_st;
6391 
6392         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6393             return -TARGET_EFAULT;
6394         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6395         __put_user(host_st->st_dev, &target_st->st_dev);
6396         __put_user(host_st->st_ino, &target_st->st_ino);
6397 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6398         __put_user(host_st->st_ino, &target_st->__st_ino);
6399 #endif
6400         __put_user(host_st->st_mode, &target_st->st_mode);
6401         __put_user(host_st->st_nlink, &target_st->st_nlink);
6402         __put_user(host_st->st_uid, &target_st->st_uid);
6403         __put_user(host_st->st_gid, &target_st->st_gid);
6404         __put_user(host_st->st_rdev, &target_st->st_rdev);
6405         __put_user(host_st->st_size, &target_st->st_size);
6406         __put_user(host_st->st_blksize, &target_st->st_blksize);
6407         __put_user(host_st->st_blocks, &target_st->st_blocks);
6408         __put_user(host_st->st_atime, &target_st->target_st_atime);
6409         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6410         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6411         unlock_user_struct(target_st, target_addr, 1);
6412     } else
6413 #endif
6414     {
6415 #if defined(TARGET_HAS_STRUCT_STAT64)
6416         struct target_stat64 *target_st;
6417 #else
6418         struct target_stat *target_st;
6419 #endif
6420 
6421         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6422             return -TARGET_EFAULT;
6423         memset(target_st, 0, sizeof(*target_st));
6424         __put_user(host_st->st_dev, &target_st->st_dev);
6425         __put_user(host_st->st_ino, &target_st->st_ino);
6426 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6427         __put_user(host_st->st_ino, &target_st->__st_ino);
6428 #endif
6429         __put_user(host_st->st_mode, &target_st->st_mode);
6430         __put_user(host_st->st_nlink, &target_st->st_nlink);
6431         __put_user(host_st->st_uid, &target_st->st_uid);
6432         __put_user(host_st->st_gid, &target_st->st_gid);
6433         __put_user(host_st->st_rdev, &target_st->st_rdev);
6434         /* XXX: better use of kernel struct */
6435         __put_user(host_st->st_size, &target_st->st_size);
6436         __put_user(host_st->st_blksize, &target_st->st_blksize);
6437         __put_user(host_st->st_blocks, &target_st->st_blocks);
6438         __put_user(host_st->st_atime, &target_st->target_st_atime);
6439         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6440         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6441         unlock_user_struct(target_st, target_addr, 1);
6442     }
6443 
6444     return 0;
6445 }
6446 #endif
6447 
6448 /* ??? Using host futex calls even when target atomic operations
6449    are not really atomic probably breaks things.  However implementing
6450    futexes locally would make futexes shared between multiple processes
6451    tricky.  However they're probably useless because guest atomic
6452    operations won't work either.  */
6453 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6454                     target_ulong uaddr2, int val3)
6455 {
6456     struct timespec ts, *pts;
6457     int base_op;
6458 
6459     /* ??? We assume FUTEX_* constants are the same on both host
6460        and target.  */
6461 #ifdef FUTEX_CMD_MASK
6462     base_op = op & FUTEX_CMD_MASK;
6463 #else
6464     base_op = op;
6465 #endif
6466     switch (base_op) {
6467     case FUTEX_WAIT:
6468     case FUTEX_WAIT_BITSET:
6469         if (timeout) {
6470             pts = &ts;
6471             target_to_host_timespec(pts, timeout);
6472         } else {
6473             pts = NULL;
6474         }
6475         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6476                          pts, NULL, val3));
6477     case FUTEX_WAKE:
6478         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6479     case FUTEX_FD:
6480         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6481     case FUTEX_REQUEUE:
6482     case FUTEX_CMP_REQUEUE:
6483     case FUTEX_WAKE_OP:
6484         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6485            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6486            But the prototype takes a `struct timespec *'; insert casts
6487            to satisfy the compiler.  We do not need to tswap TIMEOUT
6488            since it's not compared to guest memory.  */
6489         pts = (struct timespec *)(uintptr_t) timeout;
6490         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6491                                     g2h(uaddr2),
6492                                     (base_op == FUTEX_CMP_REQUEUE
6493                                      ? tswap32(val3)
6494                                      : val3)));
6495     default:
6496         return -TARGET_ENOSYS;
6497     }
6498 }
6499 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6500 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6501                                      abi_long handle, abi_long mount_id,
6502                                      abi_long flags)
6503 {
6504     struct file_handle *target_fh;
6505     struct file_handle *fh;
6506     int mid = 0;
6507     abi_long ret;
6508     char *name;
6509     unsigned int size, total_size;
6510 
6511     if (get_user_s32(size, handle)) {
6512         return -TARGET_EFAULT;
6513     }
6514 
6515     name = lock_user_string(pathname);
6516     if (!name) {
6517         return -TARGET_EFAULT;
6518     }
6519 
6520     total_size = sizeof(struct file_handle) + size;
6521     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6522     if (!target_fh) {
6523         unlock_user(name, pathname, 0);
6524         return -TARGET_EFAULT;
6525     }
6526 
6527     fh = g_malloc0(total_size);
6528     fh->handle_bytes = size;
6529 
6530     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6531     unlock_user(name, pathname, 0);
6532 
6533     /* man name_to_handle_at(2):
6534      * Other than the use of the handle_bytes field, the caller should treat
6535      * the file_handle structure as an opaque data type
6536      */
6537 
6538     memcpy(target_fh, fh, total_size);
6539     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6540     target_fh->handle_type = tswap32(fh->handle_type);
6541     g_free(fh);
6542     unlock_user(target_fh, handle, total_size);
6543 
6544     if (put_user_s32(mid, mount_id)) {
6545         return -TARGET_EFAULT;
6546     }
6547 
6548     return ret;
6549 
6550 }
6551 #endif
6552 
6553 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6554 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6555                                      abi_long flags)
6556 {
6557     struct file_handle *target_fh;
6558     struct file_handle *fh;
6559     unsigned int size, total_size;
6560     abi_long ret;
6561 
6562     if (get_user_s32(size, handle)) {
6563         return -TARGET_EFAULT;
6564     }
6565 
6566     total_size = sizeof(struct file_handle) + size;
6567     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6568     if (!target_fh) {
6569         return -TARGET_EFAULT;
6570     }
6571 
6572     fh = g_memdup(target_fh, total_size);
6573     fh->handle_bytes = size;
6574     fh->handle_type = tswap32(target_fh->handle_type);
6575 
6576     ret = get_errno(open_by_handle_at(mount_fd, fh,
6577                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6578 
6579     g_free(fh);
6580 
6581     unlock_user(target_fh, handle, total_size);
6582 
6583     return ret;
6584 }
6585 #endif
6586 
6587 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6588 
6589 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6590 {
6591     int host_flags;
6592     target_sigset_t *target_mask;
6593     sigset_t host_mask;
6594     abi_long ret;
6595 
6596     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6597         return -TARGET_EINVAL;
6598     }
6599     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6600         return -TARGET_EFAULT;
6601     }
6602 
6603     target_to_host_sigset(&host_mask, target_mask);
6604 
6605     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6606 
6607     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6608     if (ret >= 0) {
6609         fd_trans_register(ret, &target_signalfd_trans);
6610     }
6611 
6612     unlock_user_struct(target_mask, mask, 0);
6613 
6614     return ret;
6615 }
6616 #endif
6617 
6618 /* Map host to target signal numbers for the wait family of syscalls.
6619    Assume all other status bits are the same.  */
6620 int host_to_target_waitstatus(int status)
6621 {
6622     if (WIFSIGNALED(status)) {
6623         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6624     }
6625     if (WIFSTOPPED(status)) {
6626         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6627                | (status & 0xff);
6628     }
6629     return status;
6630 }
6631 
6632 static int open_self_cmdline(void *cpu_env, int fd)
6633 {
6634     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6635     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6636     int i;
6637 
6638     for (i = 0; i < bprm->argc; i++) {
6639         size_t len = strlen(bprm->argv[i]) + 1;
6640 
6641         if (write(fd, bprm->argv[i], len) != len) {
6642             return -1;
6643         }
6644     }
6645 
6646     return 0;
6647 }
6648 
6649 static int open_self_maps(void *cpu_env, int fd)
6650 {
6651     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6652     TaskState *ts = cpu->opaque;
6653     FILE *fp;
6654     char *line = NULL;
6655     size_t len = 0;
6656     ssize_t read;
6657 
6658     fp = fopen("/proc/self/maps", "r");
6659     if (fp == NULL) {
6660         return -1;
6661     }
6662 
6663     while ((read = getline(&line, &len, fp)) != -1) {
6664         int fields, dev_maj, dev_min, inode;
6665         uint64_t min, max, offset;
6666         char flag_r, flag_w, flag_x, flag_p;
6667         char path[512] = "";
6668         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6669                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6670                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6671 
6672         if ((fields < 10) || (fields > 11)) {
6673             continue;
6674         }
6675         if (h2g_valid(min)) {
6676             int flags = page_get_flags(h2g(min));
6677             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6678             if (page_check_range(h2g(min), max - min, flags) == -1) {
6679                 continue;
6680             }
6681             if (h2g(min) == ts->info->stack_limit) {
6682                 pstrcpy(path, sizeof(path), "      [stack]");
6683             }
6684             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6685                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6686                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6687                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6688                     path[0] ? "         " : "", path);
6689         }
6690     }
6691 
6692     free(line);
6693     fclose(fp);
6694 
6695     return 0;
6696 }
6697 
6698 static int open_self_stat(void *cpu_env, int fd)
6699 {
6700     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6701     TaskState *ts = cpu->opaque;
6702     abi_ulong start_stack = ts->info->start_stack;
6703     int i;
6704 
6705     for (i = 0; i < 44; i++) {
6706       char buf[128];
6707       int len;
6708       uint64_t val = 0;
6709 
6710       if (i == 0) {
6711         /* pid */
6712         val = getpid();
6713         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6714       } else if (i == 1) {
6715         /* app name */
6716         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6717       } else if (i == 27) {
6718         /* stack bottom */
6719         val = start_stack;
6720         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6721       } else {
6722         /* for the rest, there is MasterCard */
6723         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6724       }
6725 
6726       len = strlen(buf);
6727       if (write(fd, buf, len) != len) {
6728           return -1;
6729       }
6730     }
6731 
6732     return 0;
6733 }
6734 
6735 static int open_self_auxv(void *cpu_env, int fd)
6736 {
6737     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6738     TaskState *ts = cpu->opaque;
6739     abi_ulong auxv = ts->info->saved_auxv;
6740     abi_ulong len = ts->info->auxv_len;
6741     char *ptr;
6742 
6743     /*
6744      * Auxiliary vector is stored in target process stack.
6745      * read in whole auxv vector and copy it to file
6746      */
6747     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6748     if (ptr != NULL) {
6749         while (len > 0) {
6750             ssize_t r;
6751             r = write(fd, ptr, len);
6752             if (r <= 0) {
6753                 break;
6754             }
6755             len -= r;
6756             ptr += r;
6757         }
6758         lseek(fd, 0, SEEK_SET);
6759         unlock_user(ptr, auxv, len);
6760     }
6761 
6762     return 0;
6763 }
6764 
6765 static int is_proc_myself(const char *filename, const char *entry)
6766 {
6767     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6768         filename += strlen("/proc/");
6769         if (!strncmp(filename, "self/", strlen("self/"))) {
6770             filename += strlen("self/");
6771         } else if (*filename >= '1' && *filename <= '9') {
6772             char myself[80];
6773             snprintf(myself, sizeof(myself), "%d/", getpid());
6774             if (!strncmp(filename, myself, strlen(myself))) {
6775                 filename += strlen(myself);
6776             } else {
6777                 return 0;
6778             }
6779         } else {
6780             return 0;
6781         }
6782         if (!strcmp(filename, entry)) {
6783             return 1;
6784         }
6785     }
6786     return 0;
6787 }
6788 
6789 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6790 static int is_proc(const char *filename, const char *entry)
6791 {
6792     return strcmp(filename, entry) == 0;
6793 }
6794 
6795 static int open_net_route(void *cpu_env, int fd)
6796 {
6797     FILE *fp;
6798     char *line = NULL;
6799     size_t len = 0;
6800     ssize_t read;
6801 
6802     fp = fopen("/proc/net/route", "r");
6803     if (fp == NULL) {
6804         return -1;
6805     }
6806 
6807     /* read header */
6808 
6809     read = getline(&line, &len, fp);
6810     dprintf(fd, "%s", line);
6811 
6812     /* read routes */
6813 
6814     while ((read = getline(&line, &len, fp)) != -1) {
6815         char iface[16];
6816         uint32_t dest, gw, mask;
6817         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6818         int fields;
6819 
6820         fields = sscanf(line,
6821                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6822                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6823                         &mask, &mtu, &window, &irtt);
6824         if (fields != 11) {
6825             continue;
6826         }
6827         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6828                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6829                 metric, tswap32(mask), mtu, window, irtt);
6830     }
6831 
6832     free(line);
6833     fclose(fp);
6834 
6835     return 0;
6836 }
6837 #endif
6838 
6839 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6840 {
6841     struct fake_open {
6842         const char *filename;
6843         int (*fill)(void *cpu_env, int fd);
6844         int (*cmp)(const char *s1, const char *s2);
6845     };
6846     const struct fake_open *fake_open;
6847     static const struct fake_open fakes[] = {
6848         { "maps", open_self_maps, is_proc_myself },
6849         { "stat", open_self_stat, is_proc_myself },
6850         { "auxv", open_self_auxv, is_proc_myself },
6851         { "cmdline", open_self_cmdline, is_proc_myself },
6852 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6853         { "/proc/net/route", open_net_route, is_proc },
6854 #endif
6855         { NULL, NULL, NULL }
6856     };
6857 
6858     if (is_proc_myself(pathname, "exe")) {
6859         int execfd = qemu_getauxval(AT_EXECFD);
6860         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6861     }
6862 
6863     for (fake_open = fakes; fake_open->filename; fake_open++) {
6864         if (fake_open->cmp(pathname, fake_open->filename)) {
6865             break;
6866         }
6867     }
6868 
6869     if (fake_open->filename) {
6870         const char *tmpdir;
6871         char filename[PATH_MAX];
6872         int fd, r;
6873 
6874         /* create temporary file to map stat to */
6875         tmpdir = getenv("TMPDIR");
6876         if (!tmpdir)
6877             tmpdir = "/tmp";
6878         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6879         fd = mkstemp(filename);
6880         if (fd < 0) {
6881             return fd;
6882         }
6883         unlink(filename);
6884 
6885         if ((r = fake_open->fill(cpu_env, fd))) {
6886             int e = errno;
6887             close(fd);
6888             errno = e;
6889             return r;
6890         }
6891         lseek(fd, 0, SEEK_SET);
6892 
6893         return fd;
6894     }
6895 
6896     return safe_openat(dirfd, path(pathname), flags, mode);
6897 }
6898 
6899 #define TIMER_MAGIC 0x0caf0000
6900 #define TIMER_MAGIC_MASK 0xffff0000
6901 
6902 /* Convert QEMU provided timer ID back to internal 16bit index format */
6903 static target_timer_t get_timer_id(abi_long arg)
6904 {
6905     target_timer_t timerid = arg;
6906 
6907     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6908         return -TARGET_EINVAL;
6909     }
6910 
6911     timerid &= 0xffff;
6912 
6913     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6914         return -TARGET_EINVAL;
6915     }
6916 
6917     return timerid;
6918 }
6919 
6920 static int target_to_host_cpu_mask(unsigned long *host_mask,
6921                                    size_t host_size,
6922                                    abi_ulong target_addr,
6923                                    size_t target_size)
6924 {
6925     unsigned target_bits = sizeof(abi_ulong) * 8;
6926     unsigned host_bits = sizeof(*host_mask) * 8;
6927     abi_ulong *target_mask;
6928     unsigned i, j;
6929 
6930     assert(host_size >= target_size);
6931 
6932     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6933     if (!target_mask) {
6934         return -TARGET_EFAULT;
6935     }
6936     memset(host_mask, 0, host_size);
6937 
6938     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6939         unsigned bit = i * target_bits;
6940         abi_ulong val;
6941 
6942         __get_user(val, &target_mask[i]);
6943         for (j = 0; j < target_bits; j++, bit++) {
6944             if (val & (1UL << j)) {
6945                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6946             }
6947         }
6948     }
6949 
6950     unlock_user(target_mask, target_addr, 0);
6951     return 0;
6952 }
6953 
6954 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6955                                    size_t host_size,
6956                                    abi_ulong target_addr,
6957                                    size_t target_size)
6958 {
6959     unsigned target_bits = sizeof(abi_ulong) * 8;
6960     unsigned host_bits = sizeof(*host_mask) * 8;
6961     abi_ulong *target_mask;
6962     unsigned i, j;
6963 
6964     assert(host_size >= target_size);
6965 
6966     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6967     if (!target_mask) {
6968         return -TARGET_EFAULT;
6969     }
6970 
6971     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6972         unsigned bit = i * target_bits;
6973         abi_ulong val = 0;
6974 
6975         for (j = 0; j < target_bits; j++, bit++) {
6976             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6977                 val |= 1UL << j;
6978             }
6979         }
6980         __put_user(val, &target_mask[i]);
6981     }
6982 
6983     unlock_user(target_mask, target_addr, target_size);
6984     return 0;
6985 }
6986 
6987 /* This is an internal helper for do_syscall so that it is easier
6988  * to have a single return point, so that actions, such as logging
6989  * of syscall results, can be performed.
6990  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6991  */
6992 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6993                             abi_long arg2, abi_long arg3, abi_long arg4,
6994                             abi_long arg5, abi_long arg6, abi_long arg7,
6995                             abi_long arg8)
6996 {
6997     CPUState *cpu = ENV_GET_CPU(cpu_env);
6998     abi_long ret;
6999 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7000     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7001     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7002     struct stat st;
7003 #endif
7004 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7005     || defined(TARGET_NR_fstatfs)
7006     struct statfs stfs;
7007 #endif
7008     void *p;
7009 
7010     switch(num) {
7011     case TARGET_NR_exit:
7012         /* In old applications this may be used to implement _exit(2).
7013            However in threaded applictions it is used for thread termination,
7014            and _exit_group is used for application termination.
7015            Do thread termination if we have more then one thread.  */
7016 
7017         if (block_signals()) {
7018             return -TARGET_ERESTARTSYS;
7019         }
7020 
7021         cpu_list_lock();
7022 
7023         if (CPU_NEXT(first_cpu)) {
7024             TaskState *ts;
7025 
7026             /* Remove the CPU from the list.  */
7027             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7028 
7029             cpu_list_unlock();
7030 
7031             ts = cpu->opaque;
7032             if (ts->child_tidptr) {
7033                 put_user_u32(0, ts->child_tidptr);
7034                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7035                           NULL, NULL, 0);
7036             }
7037             thread_cpu = NULL;
7038             object_unref(OBJECT(cpu));
7039             g_free(ts);
7040             rcu_unregister_thread();
7041             pthread_exit(NULL);
7042         }
7043 
7044         cpu_list_unlock();
7045         preexit_cleanup(cpu_env, arg1);
7046         _exit(arg1);
7047         return 0; /* avoid warning */
7048     case TARGET_NR_read:
7049         if (arg2 == 0 && arg3 == 0) {
7050             return get_errno(safe_read(arg1, 0, 0));
7051         } else {
7052             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7053                 return -TARGET_EFAULT;
7054             ret = get_errno(safe_read(arg1, p, arg3));
7055             if (ret >= 0 &&
7056                 fd_trans_host_to_target_data(arg1)) {
7057                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7058             }
7059             unlock_user(p, arg2, ret);
7060         }
7061         return ret;
7062     case TARGET_NR_write:
7063         if (arg2 == 0 && arg3 == 0) {
7064             return get_errno(safe_write(arg1, 0, 0));
7065         }
7066         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7067             return -TARGET_EFAULT;
7068         if (fd_trans_target_to_host_data(arg1)) {
7069             void *copy = g_malloc(arg3);
7070             memcpy(copy, p, arg3);
7071             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7072             if (ret >= 0) {
7073                 ret = get_errno(safe_write(arg1, copy, ret));
7074             }
7075             g_free(copy);
7076         } else {
7077             ret = get_errno(safe_write(arg1, p, arg3));
7078         }
7079         unlock_user(p, arg2, 0);
7080         return ret;
7081 
7082 #ifdef TARGET_NR_open
7083     case TARGET_NR_open:
7084         if (!(p = lock_user_string(arg1)))
7085             return -TARGET_EFAULT;
7086         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7087                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7088                                   arg3));
7089         fd_trans_unregister(ret);
7090         unlock_user(p, arg1, 0);
7091         return ret;
7092 #endif
7093     case TARGET_NR_openat:
7094         if (!(p = lock_user_string(arg2)))
7095             return -TARGET_EFAULT;
7096         ret = get_errno(do_openat(cpu_env, arg1, p,
7097                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7098                                   arg4));
7099         fd_trans_unregister(ret);
7100         unlock_user(p, arg2, 0);
7101         return ret;
7102 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7103     case TARGET_NR_name_to_handle_at:
7104         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7105         return ret;
7106 #endif
7107 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7108     case TARGET_NR_open_by_handle_at:
7109         ret = do_open_by_handle_at(arg1, arg2, arg3);
7110         fd_trans_unregister(ret);
7111         return ret;
7112 #endif
7113     case TARGET_NR_close:
7114         fd_trans_unregister(arg1);
7115         return get_errno(close(arg1));
7116 
7117     case TARGET_NR_brk:
7118         return do_brk(arg1);
7119 #ifdef TARGET_NR_fork
7120     case TARGET_NR_fork:
7121         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7122 #endif
7123 #ifdef TARGET_NR_waitpid
7124     case TARGET_NR_waitpid:
7125         {
7126             int status;
7127             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7128             if (!is_error(ret) && arg2 && ret
7129                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7130                 return -TARGET_EFAULT;
7131         }
7132         return ret;
7133 #endif
7134 #ifdef TARGET_NR_waitid
7135     case TARGET_NR_waitid:
7136         {
7137             siginfo_t info;
7138             info.si_pid = 0;
7139             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7140             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7141                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7142                     return -TARGET_EFAULT;
7143                 host_to_target_siginfo(p, &info);
7144                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7145             }
7146         }
7147         return ret;
7148 #endif
7149 #ifdef TARGET_NR_creat /* not on alpha */
7150     case TARGET_NR_creat:
7151         if (!(p = lock_user_string(arg1)))
7152             return -TARGET_EFAULT;
7153         ret = get_errno(creat(p, arg2));
7154         fd_trans_unregister(ret);
7155         unlock_user(p, arg1, 0);
7156         return ret;
7157 #endif
7158 #ifdef TARGET_NR_link
7159     case TARGET_NR_link:
7160         {
7161             void * p2;
7162             p = lock_user_string(arg1);
7163             p2 = lock_user_string(arg2);
7164             if (!p || !p2)
7165                 ret = -TARGET_EFAULT;
7166             else
7167                 ret = get_errno(link(p, p2));
7168             unlock_user(p2, arg2, 0);
7169             unlock_user(p, arg1, 0);
7170         }
7171         return ret;
7172 #endif
7173 #if defined(TARGET_NR_linkat)
7174     case TARGET_NR_linkat:
7175         {
7176             void * p2 = NULL;
7177             if (!arg2 || !arg4)
7178                 return -TARGET_EFAULT;
7179             p  = lock_user_string(arg2);
7180             p2 = lock_user_string(arg4);
7181             if (!p || !p2)
7182                 ret = -TARGET_EFAULT;
7183             else
7184                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7185             unlock_user(p, arg2, 0);
7186             unlock_user(p2, arg4, 0);
7187         }
7188         return ret;
7189 #endif
7190 #ifdef TARGET_NR_unlink
7191     case TARGET_NR_unlink:
7192         if (!(p = lock_user_string(arg1)))
7193             return -TARGET_EFAULT;
7194         ret = get_errno(unlink(p));
7195         unlock_user(p, arg1, 0);
7196         return ret;
7197 #endif
7198 #if defined(TARGET_NR_unlinkat)
7199     case TARGET_NR_unlinkat:
7200         if (!(p = lock_user_string(arg2)))
7201             return -TARGET_EFAULT;
7202         ret = get_errno(unlinkat(arg1, p, arg3));
7203         unlock_user(p, arg2, 0);
7204         return ret;
7205 #endif
7206     case TARGET_NR_execve:
7207         {
7208             char **argp, **envp;
7209             int argc, envc;
7210             abi_ulong gp;
7211             abi_ulong guest_argp;
7212             abi_ulong guest_envp;
7213             abi_ulong addr;
7214             char **q;
7215             int total_size = 0;
7216 
7217             argc = 0;
7218             guest_argp = arg2;
7219             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7220                 if (get_user_ual(addr, gp))
7221                     return -TARGET_EFAULT;
7222                 if (!addr)
7223                     break;
7224                 argc++;
7225             }
7226             envc = 0;
7227             guest_envp = arg3;
7228             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7229                 if (get_user_ual(addr, gp))
7230                     return -TARGET_EFAULT;
7231                 if (!addr)
7232                     break;
7233                 envc++;
7234             }
7235 
7236             argp = g_new0(char *, argc + 1);
7237             envp = g_new0(char *, envc + 1);
7238 
7239             for (gp = guest_argp, q = argp; gp;
7240                   gp += sizeof(abi_ulong), q++) {
7241                 if (get_user_ual(addr, gp))
7242                     goto execve_efault;
7243                 if (!addr)
7244                     break;
7245                 if (!(*q = lock_user_string(addr)))
7246                     goto execve_efault;
7247                 total_size += strlen(*q) + 1;
7248             }
7249             *q = NULL;
7250 
7251             for (gp = guest_envp, q = envp; gp;
7252                   gp += sizeof(abi_ulong), q++) {
7253                 if (get_user_ual(addr, gp))
7254                     goto execve_efault;
7255                 if (!addr)
7256                     break;
7257                 if (!(*q = lock_user_string(addr)))
7258                     goto execve_efault;
7259                 total_size += strlen(*q) + 1;
7260             }
7261             *q = NULL;
7262 
7263             if (!(p = lock_user_string(arg1)))
7264                 goto execve_efault;
7265             /* Although execve() is not an interruptible syscall it is
7266              * a special case where we must use the safe_syscall wrapper:
7267              * if we allow a signal to happen before we make the host
7268              * syscall then we will 'lose' it, because at the point of
7269              * execve the process leaves QEMU's control. So we use the
7270              * safe syscall wrapper to ensure that we either take the
7271              * signal as a guest signal, or else it does not happen
7272              * before the execve completes and makes it the other
7273              * program's problem.
7274              */
7275             ret = get_errno(safe_execve(p, argp, envp));
7276             unlock_user(p, arg1, 0);
7277 
7278             goto execve_end;
7279 
7280         execve_efault:
7281             ret = -TARGET_EFAULT;
7282 
7283         execve_end:
7284             for (gp = guest_argp, q = argp; *q;
7285                   gp += sizeof(abi_ulong), q++) {
7286                 if (get_user_ual(addr, gp)
7287                     || !addr)
7288                     break;
7289                 unlock_user(*q, addr, 0);
7290             }
7291             for (gp = guest_envp, q = envp; *q;
7292                   gp += sizeof(abi_ulong), q++) {
7293                 if (get_user_ual(addr, gp)
7294                     || !addr)
7295                     break;
7296                 unlock_user(*q, addr, 0);
7297             }
7298 
7299             g_free(argp);
7300             g_free(envp);
7301         }
7302         return ret;
7303     case TARGET_NR_chdir:
7304         if (!(p = lock_user_string(arg1)))
7305             return -TARGET_EFAULT;
7306         ret = get_errno(chdir(p));
7307         unlock_user(p, arg1, 0);
7308         return ret;
7309 #ifdef TARGET_NR_time
7310     case TARGET_NR_time:
7311         {
7312             time_t host_time;
7313             ret = get_errno(time(&host_time));
7314             if (!is_error(ret)
7315                 && arg1
7316                 && put_user_sal(host_time, arg1))
7317                 return -TARGET_EFAULT;
7318         }
7319         return ret;
7320 #endif
7321 #ifdef TARGET_NR_mknod
7322     case TARGET_NR_mknod:
7323         if (!(p = lock_user_string(arg1)))
7324             return -TARGET_EFAULT;
7325         ret = get_errno(mknod(p, arg2, arg3));
7326         unlock_user(p, arg1, 0);
7327         return ret;
7328 #endif
7329 #if defined(TARGET_NR_mknodat)
7330     case TARGET_NR_mknodat:
7331         if (!(p = lock_user_string(arg2)))
7332             return -TARGET_EFAULT;
7333         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7334         unlock_user(p, arg2, 0);
7335         return ret;
7336 #endif
7337 #ifdef TARGET_NR_chmod
7338     case TARGET_NR_chmod:
7339         if (!(p = lock_user_string(arg1)))
7340             return -TARGET_EFAULT;
7341         ret = get_errno(chmod(p, arg2));
7342         unlock_user(p, arg1, 0);
7343         return ret;
7344 #endif
7345 #ifdef TARGET_NR_lseek
7346     case TARGET_NR_lseek:
7347         return get_errno(lseek(arg1, arg2, arg3));
7348 #endif
7349 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7350     /* Alpha specific */
7351     case TARGET_NR_getxpid:
7352         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7353         return get_errno(getpid());
7354 #endif
7355 #ifdef TARGET_NR_getpid
7356     case TARGET_NR_getpid:
7357         return get_errno(getpid());
7358 #endif
7359     case TARGET_NR_mount:
7360         {
7361             /* need to look at the data field */
7362             void *p2, *p3;
7363 
7364             if (arg1) {
7365                 p = lock_user_string(arg1);
7366                 if (!p) {
7367                     return -TARGET_EFAULT;
7368                 }
7369             } else {
7370                 p = NULL;
7371             }
7372 
7373             p2 = lock_user_string(arg2);
7374             if (!p2) {
7375                 if (arg1) {
7376                     unlock_user(p, arg1, 0);
7377                 }
7378                 return -TARGET_EFAULT;
7379             }
7380 
7381             if (arg3) {
7382                 p3 = lock_user_string(arg3);
7383                 if (!p3) {
7384                     if (arg1) {
7385                         unlock_user(p, arg1, 0);
7386                     }
7387                     unlock_user(p2, arg2, 0);
7388                     return -TARGET_EFAULT;
7389                 }
7390             } else {
7391                 p3 = NULL;
7392             }
7393 
7394             /* FIXME - arg5 should be locked, but it isn't clear how to
7395              * do that since it's not guaranteed to be a NULL-terminated
7396              * string.
7397              */
7398             if (!arg5) {
7399                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7400             } else {
7401                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7402             }
7403             ret = get_errno(ret);
7404 
7405             if (arg1) {
7406                 unlock_user(p, arg1, 0);
7407             }
7408             unlock_user(p2, arg2, 0);
7409             if (arg3) {
7410                 unlock_user(p3, arg3, 0);
7411             }
7412         }
7413         return ret;
7414 #ifdef TARGET_NR_umount
7415     case TARGET_NR_umount:
7416         if (!(p = lock_user_string(arg1)))
7417             return -TARGET_EFAULT;
7418         ret = get_errno(umount(p));
7419         unlock_user(p, arg1, 0);
7420         return ret;
7421 #endif
7422 #ifdef TARGET_NR_stime /* not on alpha */
7423     case TARGET_NR_stime:
7424         {
7425             time_t host_time;
7426             if (get_user_sal(host_time, arg1))
7427                 return -TARGET_EFAULT;
7428             return get_errno(stime(&host_time));
7429         }
7430 #endif
7431 #ifdef TARGET_NR_alarm /* not on alpha */
7432     case TARGET_NR_alarm:
7433         return alarm(arg1);
7434 #endif
7435 #ifdef TARGET_NR_pause /* not on alpha */
7436     case TARGET_NR_pause:
7437         if (!block_signals()) {
7438             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7439         }
7440         return -TARGET_EINTR;
7441 #endif
7442 #ifdef TARGET_NR_utime
7443     case TARGET_NR_utime:
7444         {
7445             struct utimbuf tbuf, *host_tbuf;
7446             struct target_utimbuf *target_tbuf;
7447             if (arg2) {
7448                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7449                     return -TARGET_EFAULT;
7450                 tbuf.actime = tswapal(target_tbuf->actime);
7451                 tbuf.modtime = tswapal(target_tbuf->modtime);
7452                 unlock_user_struct(target_tbuf, arg2, 0);
7453                 host_tbuf = &tbuf;
7454             } else {
7455                 host_tbuf = NULL;
7456             }
7457             if (!(p = lock_user_string(arg1)))
7458                 return -TARGET_EFAULT;
7459             ret = get_errno(utime(p, host_tbuf));
7460             unlock_user(p, arg1, 0);
7461         }
7462         return ret;
7463 #endif
7464 #ifdef TARGET_NR_utimes
7465     case TARGET_NR_utimes:
7466         {
7467             struct timeval *tvp, tv[2];
7468             if (arg2) {
7469                 if (copy_from_user_timeval(&tv[0], arg2)
7470                     || copy_from_user_timeval(&tv[1],
7471                                               arg2 + sizeof(struct target_timeval)))
7472                     return -TARGET_EFAULT;
7473                 tvp = tv;
7474             } else {
7475                 tvp = NULL;
7476             }
7477             if (!(p = lock_user_string(arg1)))
7478                 return -TARGET_EFAULT;
7479             ret = get_errno(utimes(p, tvp));
7480             unlock_user(p, arg1, 0);
7481         }
7482         return ret;
7483 #endif
7484 #if defined(TARGET_NR_futimesat)
7485     case TARGET_NR_futimesat:
7486         {
7487             struct timeval *tvp, tv[2];
7488             if (arg3) {
7489                 if (copy_from_user_timeval(&tv[0], arg3)
7490                     || copy_from_user_timeval(&tv[1],
7491                                               arg3 + sizeof(struct target_timeval)))
7492                     return -TARGET_EFAULT;
7493                 tvp = tv;
7494             } else {
7495                 tvp = NULL;
7496             }
7497             if (!(p = lock_user_string(arg2))) {
7498                 return -TARGET_EFAULT;
7499             }
7500             ret = get_errno(futimesat(arg1, path(p), tvp));
7501             unlock_user(p, arg2, 0);
7502         }
7503         return ret;
7504 #endif
7505 #ifdef TARGET_NR_access
7506     case TARGET_NR_access:
7507         if (!(p = lock_user_string(arg1))) {
7508             return -TARGET_EFAULT;
7509         }
7510         ret = get_errno(access(path(p), arg2));
7511         unlock_user(p, arg1, 0);
7512         return ret;
7513 #endif
7514 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7515     case TARGET_NR_faccessat:
7516         if (!(p = lock_user_string(arg2))) {
7517             return -TARGET_EFAULT;
7518         }
7519         ret = get_errno(faccessat(arg1, p, arg3, 0));
7520         unlock_user(p, arg2, 0);
7521         return ret;
7522 #endif
7523 #ifdef TARGET_NR_nice /* not on alpha */
7524     case TARGET_NR_nice:
7525         return get_errno(nice(arg1));
7526 #endif
7527     case TARGET_NR_sync:
7528         sync();
7529         return 0;
7530 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7531     case TARGET_NR_syncfs:
7532         return get_errno(syncfs(arg1));
7533 #endif
7534     case TARGET_NR_kill:
7535         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7536 #ifdef TARGET_NR_rename
7537     case TARGET_NR_rename:
7538         {
7539             void *p2;
7540             p = lock_user_string(arg1);
7541             p2 = lock_user_string(arg2);
7542             if (!p || !p2)
7543                 ret = -TARGET_EFAULT;
7544             else
7545                 ret = get_errno(rename(p, p2));
7546             unlock_user(p2, arg2, 0);
7547             unlock_user(p, arg1, 0);
7548         }
7549         return ret;
7550 #endif
7551 #if defined(TARGET_NR_renameat)
7552     case TARGET_NR_renameat:
7553         {
7554             void *p2;
7555             p  = lock_user_string(arg2);
7556             p2 = lock_user_string(arg4);
7557             if (!p || !p2)
7558                 ret = -TARGET_EFAULT;
7559             else
7560                 ret = get_errno(renameat(arg1, p, arg3, p2));
7561             unlock_user(p2, arg4, 0);
7562             unlock_user(p, arg2, 0);
7563         }
7564         return ret;
7565 #endif
7566 #if defined(TARGET_NR_renameat2)
7567     case TARGET_NR_renameat2:
7568         {
7569             void *p2;
7570             p  = lock_user_string(arg2);
7571             p2 = lock_user_string(arg4);
7572             if (!p || !p2) {
7573                 ret = -TARGET_EFAULT;
7574             } else {
7575                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7576             }
7577             unlock_user(p2, arg4, 0);
7578             unlock_user(p, arg2, 0);
7579         }
7580         return ret;
7581 #endif
7582 #ifdef TARGET_NR_mkdir
7583     case TARGET_NR_mkdir:
7584         if (!(p = lock_user_string(arg1)))
7585             return -TARGET_EFAULT;
7586         ret = get_errno(mkdir(p, arg2));
7587         unlock_user(p, arg1, 0);
7588         return ret;
7589 #endif
7590 #if defined(TARGET_NR_mkdirat)
7591     case TARGET_NR_mkdirat:
7592         if (!(p = lock_user_string(arg2)))
7593             return -TARGET_EFAULT;
7594         ret = get_errno(mkdirat(arg1, p, arg3));
7595         unlock_user(p, arg2, 0);
7596         return ret;
7597 #endif
7598 #ifdef TARGET_NR_rmdir
7599     case TARGET_NR_rmdir:
7600         if (!(p = lock_user_string(arg1)))
7601             return -TARGET_EFAULT;
7602         ret = get_errno(rmdir(p));
7603         unlock_user(p, arg1, 0);
7604         return ret;
7605 #endif
7606     case TARGET_NR_dup:
7607         ret = get_errno(dup(arg1));
7608         if (ret >= 0) {
7609             fd_trans_dup(arg1, ret);
7610         }
7611         return ret;
7612 #ifdef TARGET_NR_pipe
7613     case TARGET_NR_pipe:
7614         return do_pipe(cpu_env, arg1, 0, 0);
7615 #endif
7616 #ifdef TARGET_NR_pipe2
7617     case TARGET_NR_pipe2:
7618         return do_pipe(cpu_env, arg1,
7619                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7620 #endif
7621     case TARGET_NR_times:
7622         {
7623             struct target_tms *tmsp;
7624             struct tms tms;
7625             ret = get_errno(times(&tms));
7626             if (arg1) {
7627                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7628                 if (!tmsp)
7629                     return -TARGET_EFAULT;
7630                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7631                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7632                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7633                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7634             }
7635             if (!is_error(ret))
7636                 ret = host_to_target_clock_t(ret);
7637         }
7638         return ret;
7639     case TARGET_NR_acct:
7640         if (arg1 == 0) {
7641             ret = get_errno(acct(NULL));
7642         } else {
7643             if (!(p = lock_user_string(arg1))) {
7644                 return -TARGET_EFAULT;
7645             }
7646             ret = get_errno(acct(path(p)));
7647             unlock_user(p, arg1, 0);
7648         }
7649         return ret;
7650 #ifdef TARGET_NR_umount2
7651     case TARGET_NR_umount2:
7652         if (!(p = lock_user_string(arg1)))
7653             return -TARGET_EFAULT;
7654         ret = get_errno(umount2(p, arg2));
7655         unlock_user(p, arg1, 0);
7656         return ret;
7657 #endif
7658     case TARGET_NR_ioctl:
7659         return do_ioctl(arg1, arg2, arg3);
7660 #ifdef TARGET_NR_fcntl
7661     case TARGET_NR_fcntl:
7662         return do_fcntl(arg1, arg2, arg3);
7663 #endif
7664     case TARGET_NR_setpgid:
7665         return get_errno(setpgid(arg1, arg2));
7666     case TARGET_NR_umask:
7667         return get_errno(umask(arg1));
7668     case TARGET_NR_chroot:
7669         if (!(p = lock_user_string(arg1)))
7670             return -TARGET_EFAULT;
7671         ret = get_errno(chroot(p));
7672         unlock_user(p, arg1, 0);
7673         return ret;
7674 #ifdef TARGET_NR_dup2
7675     case TARGET_NR_dup2:
7676         ret = get_errno(dup2(arg1, arg2));
7677         if (ret >= 0) {
7678             fd_trans_dup(arg1, arg2);
7679         }
7680         return ret;
7681 #endif
7682 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7683     case TARGET_NR_dup3:
7684     {
7685         int host_flags;
7686 
7687         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7688             return -EINVAL;
7689         }
7690         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7691         ret = get_errno(dup3(arg1, arg2, host_flags));
7692         if (ret >= 0) {
7693             fd_trans_dup(arg1, arg2);
7694         }
7695         return ret;
7696     }
7697 #endif
7698 #ifdef TARGET_NR_getppid /* not on alpha */
7699     case TARGET_NR_getppid:
7700         return get_errno(getppid());
7701 #endif
7702 #ifdef TARGET_NR_getpgrp
7703     case TARGET_NR_getpgrp:
7704         return get_errno(getpgrp());
7705 #endif
7706     case TARGET_NR_setsid:
7707         return get_errno(setsid());
7708 #ifdef TARGET_NR_sigaction
7709     case TARGET_NR_sigaction:
7710         {
7711 #if defined(TARGET_ALPHA)
7712             struct target_sigaction act, oact, *pact = 0;
7713             struct target_old_sigaction *old_act;
7714             if (arg2) {
7715                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7716                     return -TARGET_EFAULT;
7717                 act._sa_handler = old_act->_sa_handler;
7718                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7719                 act.sa_flags = old_act->sa_flags;
7720                 act.sa_restorer = 0;
7721                 unlock_user_struct(old_act, arg2, 0);
7722                 pact = &act;
7723             }
7724             ret = get_errno(do_sigaction(arg1, pact, &oact));
7725             if (!is_error(ret) && arg3) {
7726                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7727                     return -TARGET_EFAULT;
7728                 old_act->_sa_handler = oact._sa_handler;
7729                 old_act->sa_mask = oact.sa_mask.sig[0];
7730                 old_act->sa_flags = oact.sa_flags;
7731                 unlock_user_struct(old_act, arg3, 1);
7732             }
7733 #elif defined(TARGET_MIPS)
7734 	    struct target_sigaction act, oact, *pact, *old_act;
7735 
7736 	    if (arg2) {
7737                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7738                     return -TARGET_EFAULT;
7739 		act._sa_handler = old_act->_sa_handler;
7740 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7741 		act.sa_flags = old_act->sa_flags;
7742 		unlock_user_struct(old_act, arg2, 0);
7743 		pact = &act;
7744 	    } else {
7745 		pact = NULL;
7746 	    }
7747 
7748 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7749 
7750 	    if (!is_error(ret) && arg3) {
7751                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7752                     return -TARGET_EFAULT;
7753 		old_act->_sa_handler = oact._sa_handler;
7754 		old_act->sa_flags = oact.sa_flags;
7755 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7756 		old_act->sa_mask.sig[1] = 0;
7757 		old_act->sa_mask.sig[2] = 0;
7758 		old_act->sa_mask.sig[3] = 0;
7759 		unlock_user_struct(old_act, arg3, 1);
7760 	    }
7761 #else
7762             struct target_old_sigaction *old_act;
7763             struct target_sigaction act, oact, *pact;
7764             if (arg2) {
7765                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7766                     return -TARGET_EFAULT;
7767                 act._sa_handler = old_act->_sa_handler;
7768                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7769                 act.sa_flags = old_act->sa_flags;
7770                 act.sa_restorer = old_act->sa_restorer;
7771 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7772                 act.ka_restorer = 0;
7773 #endif
7774                 unlock_user_struct(old_act, arg2, 0);
7775                 pact = &act;
7776             } else {
7777                 pact = NULL;
7778             }
7779             ret = get_errno(do_sigaction(arg1, pact, &oact));
7780             if (!is_error(ret) && arg3) {
7781                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7782                     return -TARGET_EFAULT;
7783                 old_act->_sa_handler = oact._sa_handler;
7784                 old_act->sa_mask = oact.sa_mask.sig[0];
7785                 old_act->sa_flags = oact.sa_flags;
7786                 old_act->sa_restorer = oact.sa_restorer;
7787                 unlock_user_struct(old_act, arg3, 1);
7788             }
7789 #endif
7790         }
7791         return ret;
7792 #endif
7793     case TARGET_NR_rt_sigaction:
7794         {
7795 #if defined(TARGET_ALPHA)
7796             /* For Alpha and SPARC this is a 5 argument syscall, with
7797              * a 'restorer' parameter which must be copied into the
7798              * sa_restorer field of the sigaction struct.
7799              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7800              * and arg5 is the sigsetsize.
7801              * Alpha also has a separate rt_sigaction struct that it uses
7802              * here; SPARC uses the usual sigaction struct.
7803              */
7804             struct target_rt_sigaction *rt_act;
7805             struct target_sigaction act, oact, *pact = 0;
7806 
7807             if (arg4 != sizeof(target_sigset_t)) {
7808                 return -TARGET_EINVAL;
7809             }
7810             if (arg2) {
7811                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7812                     return -TARGET_EFAULT;
7813                 act._sa_handler = rt_act->_sa_handler;
7814                 act.sa_mask = rt_act->sa_mask;
7815                 act.sa_flags = rt_act->sa_flags;
7816                 act.sa_restorer = arg5;
7817                 unlock_user_struct(rt_act, arg2, 0);
7818                 pact = &act;
7819             }
7820             ret = get_errno(do_sigaction(arg1, pact, &oact));
7821             if (!is_error(ret) && arg3) {
7822                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7823                     return -TARGET_EFAULT;
7824                 rt_act->_sa_handler = oact._sa_handler;
7825                 rt_act->sa_mask = oact.sa_mask;
7826                 rt_act->sa_flags = oact.sa_flags;
7827                 unlock_user_struct(rt_act, arg3, 1);
7828             }
7829 #else
7830 #ifdef TARGET_SPARC
7831             target_ulong restorer = arg4;
7832             target_ulong sigsetsize = arg5;
7833 #else
7834             target_ulong sigsetsize = arg4;
7835 #endif
7836             struct target_sigaction *act;
7837             struct target_sigaction *oact;
7838 
7839             if (sigsetsize != sizeof(target_sigset_t)) {
7840                 return -TARGET_EINVAL;
7841             }
7842             if (arg2) {
7843                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7844                     return -TARGET_EFAULT;
7845                 }
7846 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7847                 act->ka_restorer = restorer;
7848 #endif
7849             } else {
7850                 act = NULL;
7851             }
7852             if (arg3) {
7853                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7854                     ret = -TARGET_EFAULT;
7855                     goto rt_sigaction_fail;
7856                 }
7857             } else
7858                 oact = NULL;
7859             ret = get_errno(do_sigaction(arg1, act, oact));
7860 	rt_sigaction_fail:
7861             if (act)
7862                 unlock_user_struct(act, arg2, 0);
7863             if (oact)
7864                 unlock_user_struct(oact, arg3, 1);
7865 #endif
7866         }
7867         return ret;
7868 #ifdef TARGET_NR_sgetmask /* not on alpha */
7869     case TARGET_NR_sgetmask:
7870         {
7871             sigset_t cur_set;
7872             abi_ulong target_set;
7873             ret = do_sigprocmask(0, NULL, &cur_set);
7874             if (!ret) {
7875                 host_to_target_old_sigset(&target_set, &cur_set);
7876                 ret = target_set;
7877             }
7878         }
7879         return ret;
7880 #endif
7881 #ifdef TARGET_NR_ssetmask /* not on alpha */
7882     case TARGET_NR_ssetmask:
7883         {
7884             sigset_t set, oset;
7885             abi_ulong target_set = arg1;
7886             target_to_host_old_sigset(&set, &target_set);
7887             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7888             if (!ret) {
7889                 host_to_target_old_sigset(&target_set, &oset);
7890                 ret = target_set;
7891             }
7892         }
7893         return ret;
7894 #endif
7895 #ifdef TARGET_NR_sigprocmask
7896     case TARGET_NR_sigprocmask:
7897         {
7898 #if defined(TARGET_ALPHA)
7899             sigset_t set, oldset;
7900             abi_ulong mask;
7901             int how;
7902 
7903             switch (arg1) {
7904             case TARGET_SIG_BLOCK:
7905                 how = SIG_BLOCK;
7906                 break;
7907             case TARGET_SIG_UNBLOCK:
7908                 how = SIG_UNBLOCK;
7909                 break;
7910             case TARGET_SIG_SETMASK:
7911                 how = SIG_SETMASK;
7912                 break;
7913             default:
7914                 return -TARGET_EINVAL;
7915             }
7916             mask = arg2;
7917             target_to_host_old_sigset(&set, &mask);
7918 
7919             ret = do_sigprocmask(how, &set, &oldset);
7920             if (!is_error(ret)) {
7921                 host_to_target_old_sigset(&mask, &oldset);
7922                 ret = mask;
7923                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7924             }
7925 #else
7926             sigset_t set, oldset, *set_ptr;
7927             int how;
7928 
7929             if (arg2) {
7930                 switch (arg1) {
7931                 case TARGET_SIG_BLOCK:
7932                     how = SIG_BLOCK;
7933                     break;
7934                 case TARGET_SIG_UNBLOCK:
7935                     how = SIG_UNBLOCK;
7936                     break;
7937                 case TARGET_SIG_SETMASK:
7938                     how = SIG_SETMASK;
7939                     break;
7940                 default:
7941                     return -TARGET_EINVAL;
7942                 }
7943                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7944                     return -TARGET_EFAULT;
7945                 target_to_host_old_sigset(&set, p);
7946                 unlock_user(p, arg2, 0);
7947                 set_ptr = &set;
7948             } else {
7949                 how = 0;
7950                 set_ptr = NULL;
7951             }
7952             ret = do_sigprocmask(how, set_ptr, &oldset);
7953             if (!is_error(ret) && arg3) {
7954                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7955                     return -TARGET_EFAULT;
7956                 host_to_target_old_sigset(p, &oldset);
7957                 unlock_user(p, arg3, sizeof(target_sigset_t));
7958             }
7959 #endif
7960         }
7961         return ret;
7962 #endif
7963     case TARGET_NR_rt_sigprocmask:
7964         {
7965             int how = arg1;
7966             sigset_t set, oldset, *set_ptr;
7967 
7968             if (arg4 != sizeof(target_sigset_t)) {
7969                 return -TARGET_EINVAL;
7970             }
7971 
7972             if (arg2) {
7973                 switch(how) {
7974                 case TARGET_SIG_BLOCK:
7975                     how = SIG_BLOCK;
7976                     break;
7977                 case TARGET_SIG_UNBLOCK:
7978                     how = SIG_UNBLOCK;
7979                     break;
7980                 case TARGET_SIG_SETMASK:
7981                     how = SIG_SETMASK;
7982                     break;
7983                 default:
7984                     return -TARGET_EINVAL;
7985                 }
7986                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7987                     return -TARGET_EFAULT;
7988                 target_to_host_sigset(&set, p);
7989                 unlock_user(p, arg2, 0);
7990                 set_ptr = &set;
7991             } else {
7992                 how = 0;
7993                 set_ptr = NULL;
7994             }
7995             ret = do_sigprocmask(how, set_ptr, &oldset);
7996             if (!is_error(ret) && arg3) {
7997                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7998                     return -TARGET_EFAULT;
7999                 host_to_target_sigset(p, &oldset);
8000                 unlock_user(p, arg3, sizeof(target_sigset_t));
8001             }
8002         }
8003         return ret;
8004 #ifdef TARGET_NR_sigpending
8005     case TARGET_NR_sigpending:
8006         {
8007             sigset_t set;
8008             ret = get_errno(sigpending(&set));
8009             if (!is_error(ret)) {
8010                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8011                     return -TARGET_EFAULT;
8012                 host_to_target_old_sigset(p, &set);
8013                 unlock_user(p, arg1, sizeof(target_sigset_t));
8014             }
8015         }
8016         return ret;
8017 #endif
8018     case TARGET_NR_rt_sigpending:
8019         {
8020             sigset_t set;
8021 
8022             /* Yes, this check is >, not != like most. We follow the kernel's
8023              * logic and it does it like this because it implements
8024              * NR_sigpending through the same code path, and in that case
8025              * the old_sigset_t is smaller in size.
8026              */
8027             if (arg2 > sizeof(target_sigset_t)) {
8028                 return -TARGET_EINVAL;
8029             }
8030 
8031             ret = get_errno(sigpending(&set));
8032             if (!is_error(ret)) {
8033                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8034                     return -TARGET_EFAULT;
8035                 host_to_target_sigset(p, &set);
8036                 unlock_user(p, arg1, sizeof(target_sigset_t));
8037             }
8038         }
8039         return ret;
8040 #ifdef TARGET_NR_sigsuspend
8041     case TARGET_NR_sigsuspend:
8042         {
8043             TaskState *ts = cpu->opaque;
8044 #if defined(TARGET_ALPHA)
8045             abi_ulong mask = arg1;
8046             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8047 #else
8048             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8049                 return -TARGET_EFAULT;
8050             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8051             unlock_user(p, arg1, 0);
8052 #endif
8053             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8054                                                SIGSET_T_SIZE));
8055             if (ret != -TARGET_ERESTARTSYS) {
8056                 ts->in_sigsuspend = 1;
8057             }
8058         }
8059         return ret;
8060 #endif
8061     case TARGET_NR_rt_sigsuspend:
8062         {
8063             TaskState *ts = cpu->opaque;
8064 
8065             if (arg2 != sizeof(target_sigset_t)) {
8066                 return -TARGET_EINVAL;
8067             }
8068             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8069                 return -TARGET_EFAULT;
8070             target_to_host_sigset(&ts->sigsuspend_mask, p);
8071             unlock_user(p, arg1, 0);
8072             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8073                                                SIGSET_T_SIZE));
8074             if (ret != -TARGET_ERESTARTSYS) {
8075                 ts->in_sigsuspend = 1;
8076             }
8077         }
8078         return ret;
8079     case TARGET_NR_rt_sigtimedwait:
8080         {
8081             sigset_t set;
8082             struct timespec uts, *puts;
8083             siginfo_t uinfo;
8084 
8085             if (arg4 != sizeof(target_sigset_t)) {
8086                 return -TARGET_EINVAL;
8087             }
8088 
8089             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8090                 return -TARGET_EFAULT;
8091             target_to_host_sigset(&set, p);
8092             unlock_user(p, arg1, 0);
8093             if (arg3) {
8094                 puts = &uts;
8095                 target_to_host_timespec(puts, arg3);
8096             } else {
8097                 puts = NULL;
8098             }
8099             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8100                                                  SIGSET_T_SIZE));
8101             if (!is_error(ret)) {
8102                 if (arg2) {
8103                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8104                                   0);
8105                     if (!p) {
8106                         return -TARGET_EFAULT;
8107                     }
8108                     host_to_target_siginfo(p, &uinfo);
8109                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8110                 }
8111                 ret = host_to_target_signal(ret);
8112             }
8113         }
8114         return ret;
8115     case TARGET_NR_rt_sigqueueinfo:
8116         {
8117             siginfo_t uinfo;
8118 
8119             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8120             if (!p) {
8121                 return -TARGET_EFAULT;
8122             }
8123             target_to_host_siginfo(&uinfo, p);
8124             unlock_user(p, arg3, 0);
8125             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8126         }
8127         return ret;
8128     case TARGET_NR_rt_tgsigqueueinfo:
8129         {
8130             siginfo_t uinfo;
8131 
8132             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8133             if (!p) {
8134                 return -TARGET_EFAULT;
8135             }
8136             target_to_host_siginfo(&uinfo, p);
8137             unlock_user(p, arg4, 0);
8138             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8139         }
8140         return ret;
8141 #ifdef TARGET_NR_sigreturn
8142     case TARGET_NR_sigreturn:
8143         if (block_signals()) {
8144             return -TARGET_ERESTARTSYS;
8145         }
8146         return do_sigreturn(cpu_env);
8147 #endif
8148     case TARGET_NR_rt_sigreturn:
8149         if (block_signals()) {
8150             return -TARGET_ERESTARTSYS;
8151         }
8152         return do_rt_sigreturn(cpu_env);
8153     case TARGET_NR_sethostname:
8154         if (!(p = lock_user_string(arg1)))
8155             return -TARGET_EFAULT;
8156         ret = get_errno(sethostname(p, arg2));
8157         unlock_user(p, arg1, 0);
8158         return ret;
8159 #ifdef TARGET_NR_setrlimit
8160     case TARGET_NR_setrlimit:
8161         {
8162             int resource = target_to_host_resource(arg1);
8163             struct target_rlimit *target_rlim;
8164             struct rlimit rlim;
8165             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8166                 return -TARGET_EFAULT;
8167             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8168             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8169             unlock_user_struct(target_rlim, arg2, 0);
8170             /*
8171              * If we just passed through resource limit settings for memory then
8172              * they would also apply to QEMU's own allocations, and QEMU will
8173              * crash or hang or die if its allocations fail. Ideally we would
8174              * track the guest allocations in QEMU and apply the limits ourselves.
8175              * For now, just tell the guest the call succeeded but don't actually
8176              * limit anything.
8177              */
8178             if (resource != RLIMIT_AS &&
8179                 resource != RLIMIT_DATA &&
8180                 resource != RLIMIT_STACK) {
8181                 return get_errno(setrlimit(resource, &rlim));
8182             } else {
8183                 return 0;
8184             }
8185         }
8186 #endif
8187 #ifdef TARGET_NR_getrlimit
8188     case TARGET_NR_getrlimit:
8189         {
8190             int resource = target_to_host_resource(arg1);
8191             struct target_rlimit *target_rlim;
8192             struct rlimit rlim;
8193 
8194             ret = get_errno(getrlimit(resource, &rlim));
8195             if (!is_error(ret)) {
8196                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8197                     return -TARGET_EFAULT;
8198                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8199                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8200                 unlock_user_struct(target_rlim, arg2, 1);
8201             }
8202         }
8203         return ret;
8204 #endif
8205     case TARGET_NR_getrusage:
8206         {
8207             struct rusage rusage;
8208             ret = get_errno(getrusage(arg1, &rusage));
8209             if (!is_error(ret)) {
8210                 ret = host_to_target_rusage(arg2, &rusage);
8211             }
8212         }
8213         return ret;
8214     case TARGET_NR_gettimeofday:
8215         {
8216             struct timeval tv;
8217             ret = get_errno(gettimeofday(&tv, NULL));
8218             if (!is_error(ret)) {
8219                 if (copy_to_user_timeval(arg1, &tv))
8220                     return -TARGET_EFAULT;
8221             }
8222         }
8223         return ret;
8224     case TARGET_NR_settimeofday:
8225         {
8226             struct timeval tv, *ptv = NULL;
8227             struct timezone tz, *ptz = NULL;
8228 
8229             if (arg1) {
8230                 if (copy_from_user_timeval(&tv, arg1)) {
8231                     return -TARGET_EFAULT;
8232                 }
8233                 ptv = &tv;
8234             }
8235 
8236             if (arg2) {
8237                 if (copy_from_user_timezone(&tz, arg2)) {
8238                     return -TARGET_EFAULT;
8239                 }
8240                 ptz = &tz;
8241             }
8242 
8243             return get_errno(settimeofday(ptv, ptz));
8244         }
8245 #if defined(TARGET_NR_select)
8246     case TARGET_NR_select:
8247 #if defined(TARGET_WANT_NI_OLD_SELECT)
8248         /* some architectures used to have old_select here
8249          * but now ENOSYS it.
8250          */
8251         ret = -TARGET_ENOSYS;
8252 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8253         ret = do_old_select(arg1);
8254 #else
8255         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8256 #endif
8257         return ret;
8258 #endif
8259 #ifdef TARGET_NR_pselect6
8260     case TARGET_NR_pselect6:
8261         {
8262             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8263             fd_set rfds, wfds, efds;
8264             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8265             struct timespec ts, *ts_ptr;
8266 
8267             /*
8268              * The 6th arg is actually two args smashed together,
8269              * so we cannot use the C library.
8270              */
8271             sigset_t set;
8272             struct {
8273                 sigset_t *set;
8274                 size_t size;
8275             } sig, *sig_ptr;
8276 
8277             abi_ulong arg_sigset, arg_sigsize, *arg7;
8278             target_sigset_t *target_sigset;
8279 
8280             n = arg1;
8281             rfd_addr = arg2;
8282             wfd_addr = arg3;
8283             efd_addr = arg4;
8284             ts_addr = arg5;
8285 
8286             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8287             if (ret) {
8288                 return ret;
8289             }
8290             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8291             if (ret) {
8292                 return ret;
8293             }
8294             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8295             if (ret) {
8296                 return ret;
8297             }
8298 
8299             /*
8300              * This takes a timespec, and not a timeval, so we cannot
8301              * use the do_select() helper ...
8302              */
8303             if (ts_addr) {
8304                 if (target_to_host_timespec(&ts, ts_addr)) {
8305                     return -TARGET_EFAULT;
8306                 }
8307                 ts_ptr = &ts;
8308             } else {
8309                 ts_ptr = NULL;
8310             }
8311 
8312             /* Extract the two packed args for the sigset */
8313             if (arg6) {
8314                 sig_ptr = &sig;
8315                 sig.size = SIGSET_T_SIZE;
8316 
8317                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8318                 if (!arg7) {
8319                     return -TARGET_EFAULT;
8320                 }
8321                 arg_sigset = tswapal(arg7[0]);
8322                 arg_sigsize = tswapal(arg7[1]);
8323                 unlock_user(arg7, arg6, 0);
8324 
8325                 if (arg_sigset) {
8326                     sig.set = &set;
8327                     if (arg_sigsize != sizeof(*target_sigset)) {
8328                         /* Like the kernel, we enforce correct size sigsets */
8329                         return -TARGET_EINVAL;
8330                     }
8331                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8332                                               sizeof(*target_sigset), 1);
8333                     if (!target_sigset) {
8334                         return -TARGET_EFAULT;
8335                     }
8336                     target_to_host_sigset(&set, target_sigset);
8337                     unlock_user(target_sigset, arg_sigset, 0);
8338                 } else {
8339                     sig.set = NULL;
8340                 }
8341             } else {
8342                 sig_ptr = NULL;
8343             }
8344 
8345             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8346                                           ts_ptr, sig_ptr));
8347 
8348             if (!is_error(ret)) {
8349                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8350                     return -TARGET_EFAULT;
8351                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8352                     return -TARGET_EFAULT;
8353                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8354                     return -TARGET_EFAULT;
8355 
8356                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8357                     return -TARGET_EFAULT;
8358             }
8359         }
8360         return ret;
8361 #endif
8362 #ifdef TARGET_NR_symlink
8363     case TARGET_NR_symlink:
8364         {
8365             void *p2;
8366             p = lock_user_string(arg1);
8367             p2 = lock_user_string(arg2);
8368             if (!p || !p2)
8369                 ret = -TARGET_EFAULT;
8370             else
8371                 ret = get_errno(symlink(p, p2));
8372             unlock_user(p2, arg2, 0);
8373             unlock_user(p, arg1, 0);
8374         }
8375         return ret;
8376 #endif
8377 #if defined(TARGET_NR_symlinkat)
8378     case TARGET_NR_symlinkat:
8379         {
8380             void *p2;
8381             p  = lock_user_string(arg1);
8382             p2 = lock_user_string(arg3);
8383             if (!p || !p2)
8384                 ret = -TARGET_EFAULT;
8385             else
8386                 ret = get_errno(symlinkat(p, arg2, p2));
8387             unlock_user(p2, arg3, 0);
8388             unlock_user(p, arg1, 0);
8389         }
8390         return ret;
8391 #endif
8392 #ifdef TARGET_NR_readlink
8393     case TARGET_NR_readlink:
8394         {
8395             void *p2;
8396             p = lock_user_string(arg1);
8397             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8398             if (!p || !p2) {
8399                 ret = -TARGET_EFAULT;
8400             } else if (!arg3) {
8401                 /* Short circuit this for the magic exe check. */
8402                 ret = -TARGET_EINVAL;
8403             } else if (is_proc_myself((const char *)p, "exe")) {
8404                 char real[PATH_MAX], *temp;
8405                 temp = realpath(exec_path, real);
8406                 /* Return value is # of bytes that we wrote to the buffer. */
8407                 if (temp == NULL) {
8408                     ret = get_errno(-1);
8409                 } else {
8410                     /* Don't worry about sign mismatch as earlier mapping
8411                      * logic would have thrown a bad address error. */
8412                     ret = MIN(strlen(real), arg3);
8413                     /* We cannot NUL terminate the string. */
8414                     memcpy(p2, real, ret);
8415                 }
8416             } else {
8417                 ret = get_errno(readlink(path(p), p2, arg3));
8418             }
8419             unlock_user(p2, arg2, ret);
8420             unlock_user(p, arg1, 0);
8421         }
8422         return ret;
8423 #endif
8424 #if defined(TARGET_NR_readlinkat)
8425     case TARGET_NR_readlinkat:
8426         {
8427             void *p2;
8428             p  = lock_user_string(arg2);
8429             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8430             if (!p || !p2) {
8431                 ret = -TARGET_EFAULT;
8432             } else if (is_proc_myself((const char *)p, "exe")) {
8433                 char real[PATH_MAX], *temp;
8434                 temp = realpath(exec_path, real);
8435                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8436                 snprintf((char *)p2, arg4, "%s", real);
8437             } else {
8438                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8439             }
8440             unlock_user(p2, arg3, ret);
8441             unlock_user(p, arg2, 0);
8442         }
8443         return ret;
8444 #endif
8445 #ifdef TARGET_NR_swapon
8446     case TARGET_NR_swapon:
8447         if (!(p = lock_user_string(arg1)))
8448             return -TARGET_EFAULT;
8449         ret = get_errno(swapon(p, arg2));
8450         unlock_user(p, arg1, 0);
8451         return ret;
8452 #endif
8453     case TARGET_NR_reboot:
8454         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8455            /* arg4 must be ignored in all other cases */
8456            p = lock_user_string(arg4);
8457            if (!p) {
8458                return -TARGET_EFAULT;
8459            }
8460            ret = get_errno(reboot(arg1, arg2, arg3, p));
8461            unlock_user(p, arg4, 0);
8462         } else {
8463            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8464         }
8465         return ret;
8466 #ifdef TARGET_NR_mmap
8467     case TARGET_NR_mmap:
8468 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8469     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8470     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8471     || defined(TARGET_S390X)
8472         {
8473             abi_ulong *v;
8474             abi_ulong v1, v2, v3, v4, v5, v6;
8475             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8476                 return -TARGET_EFAULT;
8477             v1 = tswapal(v[0]);
8478             v2 = tswapal(v[1]);
8479             v3 = tswapal(v[2]);
8480             v4 = tswapal(v[3]);
8481             v5 = tswapal(v[4]);
8482             v6 = tswapal(v[5]);
8483             unlock_user(v, arg1, 0);
8484             ret = get_errno(target_mmap(v1, v2, v3,
8485                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8486                                         v5, v6));
8487         }
8488 #else
8489         ret = get_errno(target_mmap(arg1, arg2, arg3,
8490                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8491                                     arg5,
8492                                     arg6));
8493 #endif
8494         return ret;
8495 #endif
8496 #ifdef TARGET_NR_mmap2
8497     case TARGET_NR_mmap2:
8498 #ifndef MMAP_SHIFT
8499 #define MMAP_SHIFT 12
8500 #endif
8501         ret = target_mmap(arg1, arg2, arg3,
8502                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8503                           arg5, arg6 << MMAP_SHIFT);
8504         return get_errno(ret);
8505 #endif
8506     case TARGET_NR_munmap:
8507         return get_errno(target_munmap(arg1, arg2));
8508     case TARGET_NR_mprotect:
8509         {
8510             TaskState *ts = cpu->opaque;
8511             /* Special hack to detect libc making the stack executable.  */
8512             if ((arg3 & PROT_GROWSDOWN)
8513                 && arg1 >= ts->info->stack_limit
8514                 && arg1 <= ts->info->start_stack) {
8515                 arg3 &= ~PROT_GROWSDOWN;
8516                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8517                 arg1 = ts->info->stack_limit;
8518             }
8519         }
8520         return get_errno(target_mprotect(arg1, arg2, arg3));
8521 #ifdef TARGET_NR_mremap
8522     case TARGET_NR_mremap:
8523         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8524 #endif
8525         /* ??? msync/mlock/munlock are broken for softmmu.  */
8526 #ifdef TARGET_NR_msync
8527     case TARGET_NR_msync:
8528         return get_errno(msync(g2h(arg1), arg2, arg3));
8529 #endif
8530 #ifdef TARGET_NR_mlock
8531     case TARGET_NR_mlock:
8532         return get_errno(mlock(g2h(arg1), arg2));
8533 #endif
8534 #ifdef TARGET_NR_munlock
8535     case TARGET_NR_munlock:
8536         return get_errno(munlock(g2h(arg1), arg2));
8537 #endif
8538 #ifdef TARGET_NR_mlockall
8539     case TARGET_NR_mlockall:
8540         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8541 #endif
8542 #ifdef TARGET_NR_munlockall
8543     case TARGET_NR_munlockall:
8544         return get_errno(munlockall());
8545 #endif
8546 #ifdef TARGET_NR_truncate
8547     case TARGET_NR_truncate:
8548         if (!(p = lock_user_string(arg1)))
8549             return -TARGET_EFAULT;
8550         ret = get_errno(truncate(p, arg2));
8551         unlock_user(p, arg1, 0);
8552         return ret;
8553 #endif
8554 #ifdef TARGET_NR_ftruncate
8555     case TARGET_NR_ftruncate:
8556         return get_errno(ftruncate(arg1, arg2));
8557 #endif
8558     case TARGET_NR_fchmod:
8559         return get_errno(fchmod(arg1, arg2));
8560 #if defined(TARGET_NR_fchmodat)
8561     case TARGET_NR_fchmodat:
8562         if (!(p = lock_user_string(arg2)))
8563             return -TARGET_EFAULT;
8564         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8565         unlock_user(p, arg2, 0);
8566         return ret;
8567 #endif
8568     case TARGET_NR_getpriority:
8569         /* Note that negative values are valid for getpriority, so we must
8570            differentiate based on errno settings.  */
8571         errno = 0;
8572         ret = getpriority(arg1, arg2);
8573         if (ret == -1 && errno != 0) {
8574             return -host_to_target_errno(errno);
8575         }
8576 #ifdef TARGET_ALPHA
8577         /* Return value is the unbiased priority.  Signal no error.  */
8578         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8579 #else
8580         /* Return value is a biased priority to avoid negative numbers.  */
8581         ret = 20 - ret;
8582 #endif
8583         return ret;
8584     case TARGET_NR_setpriority:
8585         return get_errno(setpriority(arg1, arg2, arg3));
8586 #ifdef TARGET_NR_statfs
8587     case TARGET_NR_statfs:
8588         if (!(p = lock_user_string(arg1))) {
8589             return -TARGET_EFAULT;
8590         }
8591         ret = get_errno(statfs(path(p), &stfs));
8592         unlock_user(p, arg1, 0);
8593     convert_statfs:
8594         if (!is_error(ret)) {
8595             struct target_statfs *target_stfs;
8596 
8597             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8598                 return -TARGET_EFAULT;
8599             __put_user(stfs.f_type, &target_stfs->f_type);
8600             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8601             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8602             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8603             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8604             __put_user(stfs.f_files, &target_stfs->f_files);
8605             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8606             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8607             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8608             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8609             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8610 #ifdef _STATFS_F_FLAGS
8611             __put_user(stfs.f_flags, &target_stfs->f_flags);
8612 #else
8613             __put_user(0, &target_stfs->f_flags);
8614 #endif
8615             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8616             unlock_user_struct(target_stfs, arg2, 1);
8617         }
8618         return ret;
8619 #endif
8620 #ifdef TARGET_NR_fstatfs
8621     case TARGET_NR_fstatfs:
8622         ret = get_errno(fstatfs(arg1, &stfs));
8623         goto convert_statfs;
8624 #endif
8625 #ifdef TARGET_NR_statfs64
8626     case TARGET_NR_statfs64:
8627         if (!(p = lock_user_string(arg1))) {
8628             return -TARGET_EFAULT;
8629         }
8630         ret = get_errno(statfs(path(p), &stfs));
8631         unlock_user(p, arg1, 0);
8632     convert_statfs64:
8633         if (!is_error(ret)) {
8634             struct target_statfs64 *target_stfs;
8635 
8636             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8637                 return -TARGET_EFAULT;
8638             __put_user(stfs.f_type, &target_stfs->f_type);
8639             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8640             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8641             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8642             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8643             __put_user(stfs.f_files, &target_stfs->f_files);
8644             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8645             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8646             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8647             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8648             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8649             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8650             unlock_user_struct(target_stfs, arg3, 1);
8651         }
8652         return ret;
8653     case TARGET_NR_fstatfs64:
8654         ret = get_errno(fstatfs(arg1, &stfs));
8655         goto convert_statfs64;
8656 #endif
8657 #ifdef TARGET_NR_socketcall
8658     case TARGET_NR_socketcall:
8659         return do_socketcall(arg1, arg2);
8660 #endif
8661 #ifdef TARGET_NR_accept
8662     case TARGET_NR_accept:
8663         return do_accept4(arg1, arg2, arg3, 0);
8664 #endif
8665 #ifdef TARGET_NR_accept4
8666     case TARGET_NR_accept4:
8667         return do_accept4(arg1, arg2, arg3, arg4);
8668 #endif
8669 #ifdef TARGET_NR_bind
8670     case TARGET_NR_bind:
8671         return do_bind(arg1, arg2, arg3);
8672 #endif
8673 #ifdef TARGET_NR_connect
8674     case TARGET_NR_connect:
8675         return do_connect(arg1, arg2, arg3);
8676 #endif
8677 #ifdef TARGET_NR_getpeername
8678     case TARGET_NR_getpeername:
8679         return do_getpeername(arg1, arg2, arg3);
8680 #endif
8681 #ifdef TARGET_NR_getsockname
8682     case TARGET_NR_getsockname:
8683         return do_getsockname(arg1, arg2, arg3);
8684 #endif
8685 #ifdef TARGET_NR_getsockopt
8686     case TARGET_NR_getsockopt:
8687         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8688 #endif
8689 #ifdef TARGET_NR_listen
8690     case TARGET_NR_listen:
8691         return get_errno(listen(arg1, arg2));
8692 #endif
8693 #ifdef TARGET_NR_recv
8694     case TARGET_NR_recv:
8695         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8696 #endif
8697 #ifdef TARGET_NR_recvfrom
8698     case TARGET_NR_recvfrom:
8699         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8700 #endif
8701 #ifdef TARGET_NR_recvmsg
8702     case TARGET_NR_recvmsg:
8703         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8704 #endif
8705 #ifdef TARGET_NR_send
8706     case TARGET_NR_send:
8707         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8708 #endif
8709 #ifdef TARGET_NR_sendmsg
8710     case TARGET_NR_sendmsg:
8711         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8712 #endif
8713 #ifdef TARGET_NR_sendmmsg
8714     case TARGET_NR_sendmmsg:
8715         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8716     case TARGET_NR_recvmmsg:
8717         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8718 #endif
8719 #ifdef TARGET_NR_sendto
8720     case TARGET_NR_sendto:
8721         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8722 #endif
8723 #ifdef TARGET_NR_shutdown
8724     case TARGET_NR_shutdown:
8725         return get_errno(shutdown(arg1, arg2));
8726 #endif
8727 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8728     case TARGET_NR_getrandom:
8729         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8730         if (!p) {
8731             return -TARGET_EFAULT;
8732         }
8733         ret = get_errno(getrandom(p, arg2, arg3));
8734         unlock_user(p, arg1, ret);
8735         return ret;
8736 #endif
8737 #ifdef TARGET_NR_socket
8738     case TARGET_NR_socket:
8739         return do_socket(arg1, arg2, arg3);
8740 #endif
8741 #ifdef TARGET_NR_socketpair
8742     case TARGET_NR_socketpair:
8743         return do_socketpair(arg1, arg2, arg3, arg4);
8744 #endif
8745 #ifdef TARGET_NR_setsockopt
8746     case TARGET_NR_setsockopt:
8747         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8748 #endif
8749 #if defined(TARGET_NR_syslog)
8750     case TARGET_NR_syslog:
8751         {
8752             int len = arg2;
8753 
8754             switch (arg1) {
8755             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8756             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8757             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8758             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8759             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8760             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8761             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8762             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8763                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8764             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8765             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8766             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8767                 {
8768                     if (len < 0) {
8769                         return -TARGET_EINVAL;
8770                     }
8771                     if (len == 0) {
8772                         return 0;
8773                     }
8774                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8775                     if (!p) {
8776                         return -TARGET_EFAULT;
8777                     }
8778                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8779                     unlock_user(p, arg2, arg3);
8780                 }
8781                 return ret;
8782             default:
8783                 return -TARGET_EINVAL;
8784             }
8785         }
8786         break;
8787 #endif
8788     case TARGET_NR_setitimer:
8789         {
8790             struct itimerval value, ovalue, *pvalue;
8791 
8792             if (arg2) {
8793                 pvalue = &value;
8794                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8795                     || copy_from_user_timeval(&pvalue->it_value,
8796                                               arg2 + sizeof(struct target_timeval)))
8797                     return -TARGET_EFAULT;
8798             } else {
8799                 pvalue = NULL;
8800             }
8801             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8802             if (!is_error(ret) && arg3) {
8803                 if (copy_to_user_timeval(arg3,
8804                                          &ovalue.it_interval)
8805                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8806                                             &ovalue.it_value))
8807                     return -TARGET_EFAULT;
8808             }
8809         }
8810         return ret;
8811     case TARGET_NR_getitimer:
8812         {
8813             struct itimerval value;
8814 
8815             ret = get_errno(getitimer(arg1, &value));
8816             if (!is_error(ret) && arg2) {
8817                 if (copy_to_user_timeval(arg2,
8818                                          &value.it_interval)
8819                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8820                                             &value.it_value))
8821                     return -TARGET_EFAULT;
8822             }
8823         }
8824         return ret;
8825 #ifdef TARGET_NR_stat
8826     case TARGET_NR_stat:
8827         if (!(p = lock_user_string(arg1))) {
8828             return -TARGET_EFAULT;
8829         }
8830         ret = get_errno(stat(path(p), &st));
8831         unlock_user(p, arg1, 0);
8832         goto do_stat;
8833 #endif
8834 #ifdef TARGET_NR_lstat
8835     case TARGET_NR_lstat:
8836         if (!(p = lock_user_string(arg1))) {
8837             return -TARGET_EFAULT;
8838         }
8839         ret = get_errno(lstat(path(p), &st));
8840         unlock_user(p, arg1, 0);
8841         goto do_stat;
8842 #endif
8843 #ifdef TARGET_NR_fstat
8844     case TARGET_NR_fstat:
8845         {
8846             ret = get_errno(fstat(arg1, &st));
8847 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8848         do_stat:
8849 #endif
8850             if (!is_error(ret)) {
8851                 struct target_stat *target_st;
8852 
8853                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8854                     return -TARGET_EFAULT;
8855                 memset(target_st, 0, sizeof(*target_st));
8856                 __put_user(st.st_dev, &target_st->st_dev);
8857                 __put_user(st.st_ino, &target_st->st_ino);
8858                 __put_user(st.st_mode, &target_st->st_mode);
8859                 __put_user(st.st_uid, &target_st->st_uid);
8860                 __put_user(st.st_gid, &target_st->st_gid);
8861                 __put_user(st.st_nlink, &target_st->st_nlink);
8862                 __put_user(st.st_rdev, &target_st->st_rdev);
8863                 __put_user(st.st_size, &target_st->st_size);
8864                 __put_user(st.st_blksize, &target_st->st_blksize);
8865                 __put_user(st.st_blocks, &target_st->st_blocks);
8866                 __put_user(st.st_atime, &target_st->target_st_atime);
8867                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8868                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8869                 unlock_user_struct(target_st, arg2, 1);
8870             }
8871         }
8872         return ret;
8873 #endif
8874     case TARGET_NR_vhangup:
8875         return get_errno(vhangup());
8876 #ifdef TARGET_NR_syscall
8877     case TARGET_NR_syscall:
8878         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8879                           arg6, arg7, arg8, 0);
8880 #endif
8881     case TARGET_NR_wait4:
8882         {
8883             int status;
8884             abi_long status_ptr = arg2;
8885             struct rusage rusage, *rusage_ptr;
8886             abi_ulong target_rusage = arg4;
8887             abi_long rusage_err;
8888             if (target_rusage)
8889                 rusage_ptr = &rusage;
8890             else
8891                 rusage_ptr = NULL;
8892             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8893             if (!is_error(ret)) {
8894                 if (status_ptr && ret) {
8895                     status = host_to_target_waitstatus(status);
8896                     if (put_user_s32(status, status_ptr))
8897                         return -TARGET_EFAULT;
8898                 }
8899                 if (target_rusage) {
8900                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8901                     if (rusage_err) {
8902                         ret = rusage_err;
8903                     }
8904                 }
8905             }
8906         }
8907         return ret;
8908 #ifdef TARGET_NR_swapoff
8909     case TARGET_NR_swapoff:
8910         if (!(p = lock_user_string(arg1)))
8911             return -TARGET_EFAULT;
8912         ret = get_errno(swapoff(p));
8913         unlock_user(p, arg1, 0);
8914         return ret;
8915 #endif
8916     case TARGET_NR_sysinfo:
8917         {
8918             struct target_sysinfo *target_value;
8919             struct sysinfo value;
8920             ret = get_errno(sysinfo(&value));
8921             if (!is_error(ret) && arg1)
8922             {
8923                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8924                     return -TARGET_EFAULT;
8925                 __put_user(value.uptime, &target_value->uptime);
8926                 __put_user(value.loads[0], &target_value->loads[0]);
8927                 __put_user(value.loads[1], &target_value->loads[1]);
8928                 __put_user(value.loads[2], &target_value->loads[2]);
8929                 __put_user(value.totalram, &target_value->totalram);
8930                 __put_user(value.freeram, &target_value->freeram);
8931                 __put_user(value.sharedram, &target_value->sharedram);
8932                 __put_user(value.bufferram, &target_value->bufferram);
8933                 __put_user(value.totalswap, &target_value->totalswap);
8934                 __put_user(value.freeswap, &target_value->freeswap);
8935                 __put_user(value.procs, &target_value->procs);
8936                 __put_user(value.totalhigh, &target_value->totalhigh);
8937                 __put_user(value.freehigh, &target_value->freehigh);
8938                 __put_user(value.mem_unit, &target_value->mem_unit);
8939                 unlock_user_struct(target_value, arg1, 1);
8940             }
8941         }
8942         return ret;
8943 #ifdef TARGET_NR_ipc
8944     case TARGET_NR_ipc:
8945         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8946 #endif
8947 #ifdef TARGET_NR_semget
8948     case TARGET_NR_semget:
8949         return get_errno(semget(arg1, arg2, arg3));
8950 #endif
8951 #ifdef TARGET_NR_semop
8952     case TARGET_NR_semop:
8953         return do_semop(arg1, arg2, arg3);
8954 #endif
8955 #ifdef TARGET_NR_semctl
8956     case TARGET_NR_semctl:
8957         return do_semctl(arg1, arg2, arg3, arg4);
8958 #endif
8959 #ifdef TARGET_NR_msgctl
8960     case TARGET_NR_msgctl:
8961         return do_msgctl(arg1, arg2, arg3);
8962 #endif
8963 #ifdef TARGET_NR_msgget
8964     case TARGET_NR_msgget:
8965         return get_errno(msgget(arg1, arg2));
8966 #endif
8967 #ifdef TARGET_NR_msgrcv
8968     case TARGET_NR_msgrcv:
8969         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8970 #endif
8971 #ifdef TARGET_NR_msgsnd
8972     case TARGET_NR_msgsnd:
8973         return do_msgsnd(arg1, arg2, arg3, arg4);
8974 #endif
8975 #ifdef TARGET_NR_shmget
8976     case TARGET_NR_shmget:
8977         return get_errno(shmget(arg1, arg2, arg3));
8978 #endif
8979 #ifdef TARGET_NR_shmctl
8980     case TARGET_NR_shmctl:
8981         return do_shmctl(arg1, arg2, arg3);
8982 #endif
8983 #ifdef TARGET_NR_shmat
8984     case TARGET_NR_shmat:
8985         return do_shmat(cpu_env, arg1, arg2, arg3);
8986 #endif
8987 #ifdef TARGET_NR_shmdt
8988     case TARGET_NR_shmdt:
8989         return do_shmdt(arg1);
8990 #endif
8991     case TARGET_NR_fsync:
8992         return get_errno(fsync(arg1));
8993     case TARGET_NR_clone:
8994         /* Linux manages to have three different orderings for its
8995          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8996          * match the kernel's CONFIG_CLONE_* settings.
8997          * Microblaze is further special in that it uses a sixth
8998          * implicit argument to clone for the TLS pointer.
8999          */
9000 #if defined(TARGET_MICROBLAZE)
9001         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9002 #elif defined(TARGET_CLONE_BACKWARDS)
9003         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9004 #elif defined(TARGET_CLONE_BACKWARDS2)
9005         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9006 #else
9007         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9008 #endif
9009         return ret;
9010 #ifdef __NR_exit_group
9011         /* new thread calls */
9012     case TARGET_NR_exit_group:
9013         preexit_cleanup(cpu_env, arg1);
9014         return get_errno(exit_group(arg1));
9015 #endif
9016     case TARGET_NR_setdomainname:
9017         if (!(p = lock_user_string(arg1)))
9018             return -TARGET_EFAULT;
9019         ret = get_errno(setdomainname(p, arg2));
9020         unlock_user(p, arg1, 0);
9021         return ret;
9022     case TARGET_NR_uname:
9023         /* no need to transcode because we use the linux syscall */
9024         {
9025             struct new_utsname * buf;
9026 
9027             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9028                 return -TARGET_EFAULT;
9029             ret = get_errno(sys_uname(buf));
9030             if (!is_error(ret)) {
9031                 /* Overwrite the native machine name with whatever is being
9032                    emulated. */
9033                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9034                           sizeof(buf->machine));
9035                 /* Allow the user to override the reported release.  */
9036                 if (qemu_uname_release && *qemu_uname_release) {
9037                     g_strlcpy(buf->release, qemu_uname_release,
9038                               sizeof(buf->release));
9039                 }
9040             }
9041             unlock_user_struct(buf, arg1, 1);
9042         }
9043         return ret;
9044 #ifdef TARGET_I386
9045     case TARGET_NR_modify_ldt:
9046         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9047 #if !defined(TARGET_X86_64)
9048     case TARGET_NR_vm86:
9049         return do_vm86(cpu_env, arg1, arg2);
9050 #endif
9051 #endif
9052     case TARGET_NR_adjtimex:
9053         {
9054             struct timex host_buf;
9055 
9056             if (target_to_host_timex(&host_buf, arg1) != 0) {
9057                 return -TARGET_EFAULT;
9058             }
9059             ret = get_errno(adjtimex(&host_buf));
9060             if (!is_error(ret)) {
9061                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9062                     return -TARGET_EFAULT;
9063                 }
9064             }
9065         }
9066         return ret;
9067 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9068     case TARGET_NR_clock_adjtime:
9069         {
9070             struct timex htx, *phtx = &htx;
9071 
9072             if (target_to_host_timex(phtx, arg2) != 0) {
9073                 return -TARGET_EFAULT;
9074             }
9075             ret = get_errno(clock_adjtime(arg1, phtx));
9076             if (!is_error(ret) && phtx) {
9077                 if (host_to_target_timex(arg2, phtx) != 0) {
9078                     return -TARGET_EFAULT;
9079                 }
9080             }
9081         }
9082         return ret;
9083 #endif
9084     case TARGET_NR_getpgid:
9085         return get_errno(getpgid(arg1));
9086     case TARGET_NR_fchdir:
9087         return get_errno(fchdir(arg1));
9088     case TARGET_NR_personality:
9089         return get_errno(personality(arg1));
9090 #ifdef TARGET_NR__llseek /* Not on alpha */
9091     case TARGET_NR__llseek:
9092         {
9093             int64_t res;
9094 #if !defined(__NR_llseek)
9095             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9096             if (res == -1) {
9097                 ret = get_errno(res);
9098             } else {
9099                 ret = 0;
9100             }
9101 #else
9102             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9103 #endif
9104             if ((ret == 0) && put_user_s64(res, arg4)) {
9105                 return -TARGET_EFAULT;
9106             }
9107         }
9108         return ret;
9109 #endif
9110 #ifdef TARGET_NR_getdents
9111     case TARGET_NR_getdents:
9112 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9113 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9114         {
9115             struct target_dirent *target_dirp;
9116             struct linux_dirent *dirp;
9117             abi_long count = arg3;
9118 
9119             dirp = g_try_malloc(count);
9120             if (!dirp) {
9121                 return -TARGET_ENOMEM;
9122             }
9123 
9124             ret = get_errno(sys_getdents(arg1, dirp, count));
9125             if (!is_error(ret)) {
9126                 struct linux_dirent *de;
9127 		struct target_dirent *tde;
9128                 int len = ret;
9129                 int reclen, treclen;
9130 		int count1, tnamelen;
9131 
9132 		count1 = 0;
9133                 de = dirp;
9134                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9135                     return -TARGET_EFAULT;
9136 		tde = target_dirp;
9137                 while (len > 0) {
9138                     reclen = de->d_reclen;
9139                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9140                     assert(tnamelen >= 0);
9141                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9142                     assert(count1 + treclen <= count);
9143                     tde->d_reclen = tswap16(treclen);
9144                     tde->d_ino = tswapal(de->d_ino);
9145                     tde->d_off = tswapal(de->d_off);
9146                     memcpy(tde->d_name, de->d_name, tnamelen);
9147                     de = (struct linux_dirent *)((char *)de + reclen);
9148                     len -= reclen;
9149                     tde = (struct target_dirent *)((char *)tde + treclen);
9150 		    count1 += treclen;
9151                 }
9152 		ret = count1;
9153                 unlock_user(target_dirp, arg2, ret);
9154             }
9155             g_free(dirp);
9156         }
9157 #else
9158         {
9159             struct linux_dirent *dirp;
9160             abi_long count = arg3;
9161 
9162             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9163                 return -TARGET_EFAULT;
9164             ret = get_errno(sys_getdents(arg1, dirp, count));
9165             if (!is_error(ret)) {
9166                 struct linux_dirent *de;
9167                 int len = ret;
9168                 int reclen;
9169                 de = dirp;
9170                 while (len > 0) {
9171                     reclen = de->d_reclen;
9172                     if (reclen > len)
9173                         break;
9174                     de->d_reclen = tswap16(reclen);
9175                     tswapls(&de->d_ino);
9176                     tswapls(&de->d_off);
9177                     de = (struct linux_dirent *)((char *)de + reclen);
9178                     len -= reclen;
9179                 }
9180             }
9181             unlock_user(dirp, arg2, ret);
9182         }
9183 #endif
9184 #else
9185         /* Implement getdents in terms of getdents64 */
9186         {
9187             struct linux_dirent64 *dirp;
9188             abi_long count = arg3;
9189 
9190             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9191             if (!dirp) {
9192                 return -TARGET_EFAULT;
9193             }
9194             ret = get_errno(sys_getdents64(arg1, dirp, count));
9195             if (!is_error(ret)) {
9196                 /* Convert the dirent64 structs to target dirent.  We do this
9197                  * in-place, since we can guarantee that a target_dirent is no
9198                  * larger than a dirent64; however this means we have to be
9199                  * careful to read everything before writing in the new format.
9200                  */
9201                 struct linux_dirent64 *de;
9202                 struct target_dirent *tde;
9203                 int len = ret;
9204                 int tlen = 0;
9205 
9206                 de = dirp;
9207                 tde = (struct target_dirent *)dirp;
9208                 while (len > 0) {
9209                     int namelen, treclen;
9210                     int reclen = de->d_reclen;
9211                     uint64_t ino = de->d_ino;
9212                     int64_t off = de->d_off;
9213                     uint8_t type = de->d_type;
9214 
9215                     namelen = strlen(de->d_name);
9216                     treclen = offsetof(struct target_dirent, d_name)
9217                         + namelen + 2;
9218                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9219 
9220                     memmove(tde->d_name, de->d_name, namelen + 1);
9221                     tde->d_ino = tswapal(ino);
9222                     tde->d_off = tswapal(off);
9223                     tde->d_reclen = tswap16(treclen);
9224                     /* The target_dirent type is in what was formerly a padding
9225                      * byte at the end of the structure:
9226                      */
9227                     *(((char *)tde) + treclen - 1) = type;
9228 
9229                     de = (struct linux_dirent64 *)((char *)de + reclen);
9230                     tde = (struct target_dirent *)((char *)tde + treclen);
9231                     len -= reclen;
9232                     tlen += treclen;
9233                 }
9234                 ret = tlen;
9235             }
9236             unlock_user(dirp, arg2, ret);
9237         }
9238 #endif
9239         return ret;
9240 #endif /* TARGET_NR_getdents */
9241 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9242     case TARGET_NR_getdents64:
9243         {
9244             struct linux_dirent64 *dirp;
9245             abi_long count = arg3;
9246             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9247                 return -TARGET_EFAULT;
9248             ret = get_errno(sys_getdents64(arg1, dirp, count));
9249             if (!is_error(ret)) {
9250                 struct linux_dirent64 *de;
9251                 int len = ret;
9252                 int reclen;
9253                 de = dirp;
9254                 while (len > 0) {
9255                     reclen = de->d_reclen;
9256                     if (reclen > len)
9257                         break;
9258                     de->d_reclen = tswap16(reclen);
9259                     tswap64s((uint64_t *)&de->d_ino);
9260                     tswap64s((uint64_t *)&de->d_off);
9261                     de = (struct linux_dirent64 *)((char *)de + reclen);
9262                     len -= reclen;
9263                 }
9264             }
9265             unlock_user(dirp, arg2, ret);
9266         }
9267         return ret;
9268 #endif /* TARGET_NR_getdents64 */
9269 #if defined(TARGET_NR__newselect)
9270     case TARGET_NR__newselect:
9271         return do_select(arg1, arg2, arg3, arg4, arg5);
9272 #endif
9273 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9274 # ifdef TARGET_NR_poll
9275     case TARGET_NR_poll:
9276 # endif
9277 # ifdef TARGET_NR_ppoll
9278     case TARGET_NR_ppoll:
9279 # endif
9280         {
9281             struct target_pollfd *target_pfd;
9282             unsigned int nfds = arg2;
9283             struct pollfd *pfd;
9284             unsigned int i;
9285 
9286             pfd = NULL;
9287             target_pfd = NULL;
9288             if (nfds) {
9289                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9290                     return -TARGET_EINVAL;
9291                 }
9292 
9293                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9294                                        sizeof(struct target_pollfd) * nfds, 1);
9295                 if (!target_pfd) {
9296                     return -TARGET_EFAULT;
9297                 }
9298 
9299                 pfd = alloca(sizeof(struct pollfd) * nfds);
9300                 for (i = 0; i < nfds; i++) {
9301                     pfd[i].fd = tswap32(target_pfd[i].fd);
9302                     pfd[i].events = tswap16(target_pfd[i].events);
9303                 }
9304             }
9305 
9306             switch (num) {
9307 # ifdef TARGET_NR_ppoll
9308             case TARGET_NR_ppoll:
9309             {
9310                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9311                 target_sigset_t *target_set;
9312                 sigset_t _set, *set = &_set;
9313 
9314                 if (arg3) {
9315                     if (target_to_host_timespec(timeout_ts, arg3)) {
9316                         unlock_user(target_pfd, arg1, 0);
9317                         return -TARGET_EFAULT;
9318                     }
9319                 } else {
9320                     timeout_ts = NULL;
9321                 }
9322 
9323                 if (arg4) {
9324                     if (arg5 != sizeof(target_sigset_t)) {
9325                         unlock_user(target_pfd, arg1, 0);
9326                         return -TARGET_EINVAL;
9327                     }
9328 
9329                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9330                     if (!target_set) {
9331                         unlock_user(target_pfd, arg1, 0);
9332                         return -TARGET_EFAULT;
9333                     }
9334                     target_to_host_sigset(set, target_set);
9335                 } else {
9336                     set = NULL;
9337                 }
9338 
9339                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9340                                            set, SIGSET_T_SIZE));
9341 
9342                 if (!is_error(ret) && arg3) {
9343                     host_to_target_timespec(arg3, timeout_ts);
9344                 }
9345                 if (arg4) {
9346                     unlock_user(target_set, arg4, 0);
9347                 }
9348                 break;
9349             }
9350 # endif
9351 # ifdef TARGET_NR_poll
9352             case TARGET_NR_poll:
9353             {
9354                 struct timespec ts, *pts;
9355 
9356                 if (arg3 >= 0) {
9357                     /* Convert ms to secs, ns */
9358                     ts.tv_sec = arg3 / 1000;
9359                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9360                     pts = &ts;
9361                 } else {
9362                     /* -ve poll() timeout means "infinite" */
9363                     pts = NULL;
9364                 }
9365                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9366                 break;
9367             }
9368 # endif
9369             default:
9370                 g_assert_not_reached();
9371             }
9372 
9373             if (!is_error(ret)) {
9374                 for(i = 0; i < nfds; i++) {
9375                     target_pfd[i].revents = tswap16(pfd[i].revents);
9376                 }
9377             }
9378             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9379         }
9380         return ret;
9381 #endif
9382     case TARGET_NR_flock:
9383         /* NOTE: the flock constant seems to be the same for every
9384            Linux platform */
9385         return get_errno(safe_flock(arg1, arg2));
9386     case TARGET_NR_readv:
9387         {
9388             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9389             if (vec != NULL) {
9390                 ret = get_errno(safe_readv(arg1, vec, arg3));
9391                 unlock_iovec(vec, arg2, arg3, 1);
9392             } else {
9393                 ret = -host_to_target_errno(errno);
9394             }
9395         }
9396         return ret;
9397     case TARGET_NR_writev:
9398         {
9399             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9400             if (vec != NULL) {
9401                 ret = get_errno(safe_writev(arg1, vec, arg3));
9402                 unlock_iovec(vec, arg2, arg3, 0);
9403             } else {
9404                 ret = -host_to_target_errno(errno);
9405             }
9406         }
9407         return ret;
9408 #if defined(TARGET_NR_preadv)
9409     case TARGET_NR_preadv:
9410         {
9411             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9412             if (vec != NULL) {
9413                 unsigned long low, high;
9414 
9415                 target_to_host_low_high(arg4, arg5, &low, &high);
9416                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9417                 unlock_iovec(vec, arg2, arg3, 1);
9418             } else {
9419                 ret = -host_to_target_errno(errno);
9420            }
9421         }
9422         return ret;
9423 #endif
9424 #if defined(TARGET_NR_pwritev)
9425     case TARGET_NR_pwritev:
9426         {
9427             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9428             if (vec != NULL) {
9429                 unsigned long low, high;
9430 
9431                 target_to_host_low_high(arg4, arg5, &low, &high);
9432                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9433                 unlock_iovec(vec, arg2, arg3, 0);
9434             } else {
9435                 ret = -host_to_target_errno(errno);
9436            }
9437         }
9438         return ret;
9439 #endif
9440     case TARGET_NR_getsid:
9441         return get_errno(getsid(arg1));
9442 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9443     case TARGET_NR_fdatasync:
9444         return get_errno(fdatasync(arg1));
9445 #endif
9446 #ifdef TARGET_NR__sysctl
9447     case TARGET_NR__sysctl:
9448         /* We don't implement this, but ENOTDIR is always a safe
9449            return value. */
9450         return -TARGET_ENOTDIR;
9451 #endif
9452     case TARGET_NR_sched_getaffinity:
9453         {
9454             unsigned int mask_size;
9455             unsigned long *mask;
9456 
9457             /*
9458              * sched_getaffinity needs multiples of ulong, so need to take
9459              * care of mismatches between target ulong and host ulong sizes.
9460              */
9461             if (arg2 & (sizeof(abi_ulong) - 1)) {
9462                 return -TARGET_EINVAL;
9463             }
9464             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9465 
9466             mask = alloca(mask_size);
9467             memset(mask, 0, mask_size);
9468             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9469 
9470             if (!is_error(ret)) {
9471                 if (ret > arg2) {
9472                     /* More data returned than the caller's buffer will fit.
9473                      * This only happens if sizeof(abi_long) < sizeof(long)
9474                      * and the caller passed us a buffer holding an odd number
9475                      * of abi_longs. If the host kernel is actually using the
9476                      * extra 4 bytes then fail EINVAL; otherwise we can just
9477                      * ignore them and only copy the interesting part.
9478                      */
9479                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9480                     if (numcpus > arg2 * 8) {
9481                         return -TARGET_EINVAL;
9482                     }
9483                     ret = arg2;
9484                 }
9485 
9486                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9487                     return -TARGET_EFAULT;
9488                 }
9489             }
9490         }
9491         return ret;
9492     case TARGET_NR_sched_setaffinity:
9493         {
9494             unsigned int mask_size;
9495             unsigned long *mask;
9496 
9497             /*
9498              * sched_setaffinity needs multiples of ulong, so need to take
9499              * care of mismatches between target ulong and host ulong sizes.
9500              */
9501             if (arg2 & (sizeof(abi_ulong) - 1)) {
9502                 return -TARGET_EINVAL;
9503             }
9504             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9505             mask = alloca(mask_size);
9506 
9507             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9508             if (ret) {
9509                 return ret;
9510             }
9511 
9512             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9513         }
9514     case TARGET_NR_getcpu:
9515         {
9516             unsigned cpu, node;
9517             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9518                                        arg2 ? &node : NULL,
9519                                        NULL));
9520             if (is_error(ret)) {
9521                 return ret;
9522             }
9523             if (arg1 && put_user_u32(cpu, arg1)) {
9524                 return -TARGET_EFAULT;
9525             }
9526             if (arg2 && put_user_u32(node, arg2)) {
9527                 return -TARGET_EFAULT;
9528             }
9529         }
9530         return ret;
9531     case TARGET_NR_sched_setparam:
9532         {
9533             struct sched_param *target_schp;
9534             struct sched_param schp;
9535 
9536             if (arg2 == 0) {
9537                 return -TARGET_EINVAL;
9538             }
9539             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9540                 return -TARGET_EFAULT;
9541             schp.sched_priority = tswap32(target_schp->sched_priority);
9542             unlock_user_struct(target_schp, arg2, 0);
9543             return get_errno(sched_setparam(arg1, &schp));
9544         }
9545     case TARGET_NR_sched_getparam:
9546         {
9547             struct sched_param *target_schp;
9548             struct sched_param schp;
9549 
9550             if (arg2 == 0) {
9551                 return -TARGET_EINVAL;
9552             }
9553             ret = get_errno(sched_getparam(arg1, &schp));
9554             if (!is_error(ret)) {
9555                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9556                     return -TARGET_EFAULT;
9557                 target_schp->sched_priority = tswap32(schp.sched_priority);
9558                 unlock_user_struct(target_schp, arg2, 1);
9559             }
9560         }
9561         return ret;
9562     case TARGET_NR_sched_setscheduler:
9563         {
9564             struct sched_param *target_schp;
9565             struct sched_param schp;
9566             if (arg3 == 0) {
9567                 return -TARGET_EINVAL;
9568             }
9569             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9570                 return -TARGET_EFAULT;
9571             schp.sched_priority = tswap32(target_schp->sched_priority);
9572             unlock_user_struct(target_schp, arg3, 0);
9573             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9574         }
9575     case TARGET_NR_sched_getscheduler:
9576         return get_errno(sched_getscheduler(arg1));
9577     case TARGET_NR_sched_yield:
9578         return get_errno(sched_yield());
9579     case TARGET_NR_sched_get_priority_max:
9580         return get_errno(sched_get_priority_max(arg1));
9581     case TARGET_NR_sched_get_priority_min:
9582         return get_errno(sched_get_priority_min(arg1));
9583     case TARGET_NR_sched_rr_get_interval:
9584         {
9585             struct timespec ts;
9586             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9587             if (!is_error(ret)) {
9588                 ret = host_to_target_timespec(arg2, &ts);
9589             }
9590         }
9591         return ret;
9592     case TARGET_NR_nanosleep:
9593         {
9594             struct timespec req, rem;
9595             target_to_host_timespec(&req, arg1);
9596             ret = get_errno(safe_nanosleep(&req, &rem));
9597             if (is_error(ret) && arg2) {
9598                 host_to_target_timespec(arg2, &rem);
9599             }
9600         }
9601         return ret;
9602     case TARGET_NR_prctl:
9603         switch (arg1) {
9604         case PR_GET_PDEATHSIG:
9605         {
9606             int deathsig;
9607             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9608             if (!is_error(ret) && arg2
9609                 && put_user_ual(deathsig, arg2)) {
9610                 return -TARGET_EFAULT;
9611             }
9612             return ret;
9613         }
9614 #ifdef PR_GET_NAME
9615         case PR_GET_NAME:
9616         {
9617             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9618             if (!name) {
9619                 return -TARGET_EFAULT;
9620             }
9621             ret = get_errno(prctl(arg1, (unsigned long)name,
9622                                   arg3, arg4, arg5));
9623             unlock_user(name, arg2, 16);
9624             return ret;
9625         }
9626         case PR_SET_NAME:
9627         {
9628             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9629             if (!name) {
9630                 return -TARGET_EFAULT;
9631             }
9632             ret = get_errno(prctl(arg1, (unsigned long)name,
9633                                   arg3, arg4, arg5));
9634             unlock_user(name, arg2, 0);
9635             return ret;
9636         }
9637 #endif
9638 #ifdef TARGET_MIPS
9639         case TARGET_PR_GET_FP_MODE:
9640         {
9641             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9642             ret = 0;
9643             if (env->CP0_Status & (1 << CP0St_FR)) {
9644                 ret |= TARGET_PR_FP_MODE_FR;
9645             }
9646             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9647                 ret |= TARGET_PR_FP_MODE_FRE;
9648             }
9649             return ret;
9650         }
9651         case TARGET_PR_SET_FP_MODE:
9652         {
9653             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9654             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9655             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9656             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9657             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9658 
9659             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9660                                             TARGET_PR_FP_MODE_FRE;
9661 
9662             /* If nothing to change, return right away, successfully.  */
9663             if (old_fr == new_fr && old_fre == new_fre) {
9664                 return 0;
9665             }
9666             /* Check the value is valid */
9667             if (arg2 & ~known_bits) {
9668                 return -TARGET_EOPNOTSUPP;
9669             }
9670             /* Setting FRE without FR is not supported.  */
9671             if (new_fre && !new_fr) {
9672                 return -TARGET_EOPNOTSUPP;
9673             }
9674             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9675                 /* FR1 is not supported */
9676                 return -TARGET_EOPNOTSUPP;
9677             }
9678             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9679                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9680                 /* cannot set FR=0 */
9681                 return -TARGET_EOPNOTSUPP;
9682             }
9683             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9684                 /* Cannot set FRE=1 */
9685                 return -TARGET_EOPNOTSUPP;
9686             }
9687 
9688             int i;
9689             fpr_t *fpr = env->active_fpu.fpr;
9690             for (i = 0; i < 32 ; i += 2) {
9691                 if (!old_fr && new_fr) {
9692                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9693                 } else if (old_fr && !new_fr) {
9694                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9695                 }
9696             }
9697 
9698             if (new_fr) {
9699                 env->CP0_Status |= (1 << CP0St_FR);
9700                 env->hflags |= MIPS_HFLAG_F64;
9701             } else {
9702                 env->CP0_Status &= ~(1 << CP0St_FR);
9703                 env->hflags &= ~MIPS_HFLAG_F64;
9704             }
9705             if (new_fre) {
9706                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9707                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9708                     env->hflags |= MIPS_HFLAG_FRE;
9709                 }
9710             } else {
9711                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9712                 env->hflags &= ~MIPS_HFLAG_FRE;
9713             }
9714 
9715             return 0;
9716         }
9717 #endif /* MIPS */
9718 #ifdef TARGET_AARCH64
9719         case TARGET_PR_SVE_SET_VL:
9720             /*
9721              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9722              * PR_SVE_VL_INHERIT.  Note the kernel definition
9723              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9724              * even though the current architectural maximum is VQ=16.
9725              */
9726             ret = -TARGET_EINVAL;
9727             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9728                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9729                 CPUARMState *env = cpu_env;
9730                 ARMCPU *cpu = arm_env_get_cpu(env);
9731                 uint32_t vq, old_vq;
9732 
9733                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9734                 vq = MAX(arg2 / 16, 1);
9735                 vq = MIN(vq, cpu->sve_max_vq);
9736 
9737                 if (vq < old_vq) {
9738                     aarch64_sve_narrow_vq(env, vq);
9739                 }
9740                 env->vfp.zcr_el[1] = vq - 1;
9741                 ret = vq * 16;
9742             }
9743             return ret;
9744         case TARGET_PR_SVE_GET_VL:
9745             ret = -TARGET_EINVAL;
9746             {
9747                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9748                 if (cpu_isar_feature(aa64_sve, cpu)) {
9749                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9750                 }
9751             }
9752             return ret;
9753         case TARGET_PR_PAC_RESET_KEYS:
9754             {
9755                 CPUARMState *env = cpu_env;
9756                 ARMCPU *cpu = arm_env_get_cpu(env);
9757 
9758                 if (arg3 || arg4 || arg5) {
9759                     return -TARGET_EINVAL;
9760                 }
9761                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9762                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9763                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9764                                TARGET_PR_PAC_APGAKEY);
9765                     if (arg2 == 0) {
9766                         arg2 = all;
9767                     } else if (arg2 & ~all) {
9768                         return -TARGET_EINVAL;
9769                     }
9770                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9771                         arm_init_pauth_key(&env->apia_key);
9772                     }
9773                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9774                         arm_init_pauth_key(&env->apib_key);
9775                     }
9776                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9777                         arm_init_pauth_key(&env->apda_key);
9778                     }
9779                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9780                         arm_init_pauth_key(&env->apdb_key);
9781                     }
9782                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9783                         arm_init_pauth_key(&env->apga_key);
9784                     }
9785                     return 0;
9786                 }
9787             }
9788             return -TARGET_EINVAL;
9789 #endif /* AARCH64 */
9790         case PR_GET_SECCOMP:
9791         case PR_SET_SECCOMP:
9792             /* Disable seccomp to prevent the target disabling syscalls we
9793              * need. */
9794             return -TARGET_EINVAL;
9795         default:
9796             /* Most prctl options have no pointer arguments */
9797             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9798         }
9799         break;
9800 #ifdef TARGET_NR_arch_prctl
9801     case TARGET_NR_arch_prctl:
9802 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9803         return do_arch_prctl(cpu_env, arg1, arg2);
9804 #else
9805 #error unreachable
9806 #endif
9807 #endif
9808 #ifdef TARGET_NR_pread64
9809     case TARGET_NR_pread64:
9810         if (regpairs_aligned(cpu_env, num)) {
9811             arg4 = arg5;
9812             arg5 = arg6;
9813         }
9814         if (arg2 == 0 && arg3 == 0) {
9815             /* Special-case NULL buffer and zero length, which should succeed */
9816             p = 0;
9817         } else {
9818             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9819             if (!p) {
9820                 return -TARGET_EFAULT;
9821             }
9822         }
9823         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9824         unlock_user(p, arg2, ret);
9825         return ret;
9826     case TARGET_NR_pwrite64:
9827         if (regpairs_aligned(cpu_env, num)) {
9828             arg4 = arg5;
9829             arg5 = arg6;
9830         }
9831         if (arg2 == 0 && arg3 == 0) {
9832             /* Special-case NULL buffer and zero length, which should succeed */
9833             p = 0;
9834         } else {
9835             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9836             if (!p) {
9837                 return -TARGET_EFAULT;
9838             }
9839         }
9840         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9841         unlock_user(p, arg2, 0);
9842         return ret;
9843 #endif
9844     case TARGET_NR_getcwd:
9845         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9846             return -TARGET_EFAULT;
9847         ret = get_errno(sys_getcwd1(p, arg2));
9848         unlock_user(p, arg1, ret);
9849         return ret;
9850     case TARGET_NR_capget:
9851     case TARGET_NR_capset:
9852     {
9853         struct target_user_cap_header *target_header;
9854         struct target_user_cap_data *target_data = NULL;
9855         struct __user_cap_header_struct header;
9856         struct __user_cap_data_struct data[2];
9857         struct __user_cap_data_struct *dataptr = NULL;
9858         int i, target_datalen;
9859         int data_items = 1;
9860 
9861         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9862             return -TARGET_EFAULT;
9863         }
9864         header.version = tswap32(target_header->version);
9865         header.pid = tswap32(target_header->pid);
9866 
9867         if (header.version != _LINUX_CAPABILITY_VERSION) {
9868             /* Version 2 and up takes pointer to two user_data structs */
9869             data_items = 2;
9870         }
9871 
9872         target_datalen = sizeof(*target_data) * data_items;
9873 
9874         if (arg2) {
9875             if (num == TARGET_NR_capget) {
9876                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9877             } else {
9878                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9879             }
9880             if (!target_data) {
9881                 unlock_user_struct(target_header, arg1, 0);
9882                 return -TARGET_EFAULT;
9883             }
9884 
9885             if (num == TARGET_NR_capset) {
9886                 for (i = 0; i < data_items; i++) {
9887                     data[i].effective = tswap32(target_data[i].effective);
9888                     data[i].permitted = tswap32(target_data[i].permitted);
9889                     data[i].inheritable = tswap32(target_data[i].inheritable);
9890                 }
9891             }
9892 
9893             dataptr = data;
9894         }
9895 
9896         if (num == TARGET_NR_capget) {
9897             ret = get_errno(capget(&header, dataptr));
9898         } else {
9899             ret = get_errno(capset(&header, dataptr));
9900         }
9901 
9902         /* The kernel always updates version for both capget and capset */
9903         target_header->version = tswap32(header.version);
9904         unlock_user_struct(target_header, arg1, 1);
9905 
9906         if (arg2) {
9907             if (num == TARGET_NR_capget) {
9908                 for (i = 0; i < data_items; i++) {
9909                     target_data[i].effective = tswap32(data[i].effective);
9910                     target_data[i].permitted = tswap32(data[i].permitted);
9911                     target_data[i].inheritable = tswap32(data[i].inheritable);
9912                 }
9913                 unlock_user(target_data, arg2, target_datalen);
9914             } else {
9915                 unlock_user(target_data, arg2, 0);
9916             }
9917         }
9918         return ret;
9919     }
9920     case TARGET_NR_sigaltstack:
9921         return do_sigaltstack(arg1, arg2,
9922                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9923 
9924 #ifdef CONFIG_SENDFILE
9925 #ifdef TARGET_NR_sendfile
9926     case TARGET_NR_sendfile:
9927     {
9928         off_t *offp = NULL;
9929         off_t off;
9930         if (arg3) {
9931             ret = get_user_sal(off, arg3);
9932             if (is_error(ret)) {
9933                 return ret;
9934             }
9935             offp = &off;
9936         }
9937         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9938         if (!is_error(ret) && arg3) {
9939             abi_long ret2 = put_user_sal(off, arg3);
9940             if (is_error(ret2)) {
9941                 ret = ret2;
9942             }
9943         }
9944         return ret;
9945     }
9946 #endif
9947 #ifdef TARGET_NR_sendfile64
9948     case TARGET_NR_sendfile64:
9949     {
9950         off_t *offp = NULL;
9951         off_t off;
9952         if (arg3) {
9953             ret = get_user_s64(off, arg3);
9954             if (is_error(ret)) {
9955                 return ret;
9956             }
9957             offp = &off;
9958         }
9959         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9960         if (!is_error(ret) && arg3) {
9961             abi_long ret2 = put_user_s64(off, arg3);
9962             if (is_error(ret2)) {
9963                 ret = ret2;
9964             }
9965         }
9966         return ret;
9967     }
9968 #endif
9969 #endif
9970 #ifdef TARGET_NR_vfork
9971     case TARGET_NR_vfork:
9972         return get_errno(do_fork(cpu_env,
9973                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9974                          0, 0, 0, 0));
9975 #endif
9976 #ifdef TARGET_NR_ugetrlimit
9977     case TARGET_NR_ugetrlimit:
9978     {
9979 	struct rlimit rlim;
9980 	int resource = target_to_host_resource(arg1);
9981 	ret = get_errno(getrlimit(resource, &rlim));
9982 	if (!is_error(ret)) {
9983 	    struct target_rlimit *target_rlim;
9984             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9985                 return -TARGET_EFAULT;
9986 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9987 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9988             unlock_user_struct(target_rlim, arg2, 1);
9989 	}
9990         return ret;
9991     }
9992 #endif
9993 #ifdef TARGET_NR_truncate64
9994     case TARGET_NR_truncate64:
9995         if (!(p = lock_user_string(arg1)))
9996             return -TARGET_EFAULT;
9997 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9998         unlock_user(p, arg1, 0);
9999         return ret;
10000 #endif
10001 #ifdef TARGET_NR_ftruncate64
10002     case TARGET_NR_ftruncate64:
10003         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10004 #endif
10005 #ifdef TARGET_NR_stat64
10006     case TARGET_NR_stat64:
10007         if (!(p = lock_user_string(arg1))) {
10008             return -TARGET_EFAULT;
10009         }
10010         ret = get_errno(stat(path(p), &st));
10011         unlock_user(p, arg1, 0);
10012         if (!is_error(ret))
10013             ret = host_to_target_stat64(cpu_env, arg2, &st);
10014         return ret;
10015 #endif
10016 #ifdef TARGET_NR_lstat64
10017     case TARGET_NR_lstat64:
10018         if (!(p = lock_user_string(arg1))) {
10019             return -TARGET_EFAULT;
10020         }
10021         ret = get_errno(lstat(path(p), &st));
10022         unlock_user(p, arg1, 0);
10023         if (!is_error(ret))
10024             ret = host_to_target_stat64(cpu_env, arg2, &st);
10025         return ret;
10026 #endif
10027 #ifdef TARGET_NR_fstat64
10028     case TARGET_NR_fstat64:
10029         ret = get_errno(fstat(arg1, &st));
10030         if (!is_error(ret))
10031             ret = host_to_target_stat64(cpu_env, arg2, &st);
10032         return ret;
10033 #endif
10034 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10035 #ifdef TARGET_NR_fstatat64
10036     case TARGET_NR_fstatat64:
10037 #endif
10038 #ifdef TARGET_NR_newfstatat
10039     case TARGET_NR_newfstatat:
10040 #endif
10041         if (!(p = lock_user_string(arg2))) {
10042             return -TARGET_EFAULT;
10043         }
10044         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10045         unlock_user(p, arg2, 0);
10046         if (!is_error(ret))
10047             ret = host_to_target_stat64(cpu_env, arg3, &st);
10048         return ret;
10049 #endif
10050 #ifdef TARGET_NR_lchown
10051     case TARGET_NR_lchown:
10052         if (!(p = lock_user_string(arg1)))
10053             return -TARGET_EFAULT;
10054         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10055         unlock_user(p, arg1, 0);
10056         return ret;
10057 #endif
10058 #ifdef TARGET_NR_getuid
10059     case TARGET_NR_getuid:
10060         return get_errno(high2lowuid(getuid()));
10061 #endif
10062 #ifdef TARGET_NR_getgid
10063     case TARGET_NR_getgid:
10064         return get_errno(high2lowgid(getgid()));
10065 #endif
10066 #ifdef TARGET_NR_geteuid
10067     case TARGET_NR_geteuid:
10068         return get_errno(high2lowuid(geteuid()));
10069 #endif
10070 #ifdef TARGET_NR_getegid
10071     case TARGET_NR_getegid:
10072         return get_errno(high2lowgid(getegid()));
10073 #endif
10074     case TARGET_NR_setreuid:
10075         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10076     case TARGET_NR_setregid:
10077         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10078     case TARGET_NR_getgroups:
10079         {
10080             int gidsetsize = arg1;
10081             target_id *target_grouplist;
10082             gid_t *grouplist;
10083             int i;
10084 
10085             grouplist = alloca(gidsetsize * sizeof(gid_t));
10086             ret = get_errno(getgroups(gidsetsize, grouplist));
10087             if (gidsetsize == 0)
10088                 return ret;
10089             if (!is_error(ret)) {
10090                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10091                 if (!target_grouplist)
10092                     return -TARGET_EFAULT;
10093                 for(i = 0;i < ret; i++)
10094                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10095                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10096             }
10097         }
10098         return ret;
10099     case TARGET_NR_setgroups:
10100         {
10101             int gidsetsize = arg1;
10102             target_id *target_grouplist;
10103             gid_t *grouplist = NULL;
10104             int i;
10105             if (gidsetsize) {
10106                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10107                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10108                 if (!target_grouplist) {
10109                     return -TARGET_EFAULT;
10110                 }
10111                 for (i = 0; i < gidsetsize; i++) {
10112                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10113                 }
10114                 unlock_user(target_grouplist, arg2, 0);
10115             }
10116             return get_errno(setgroups(gidsetsize, grouplist));
10117         }
10118     case TARGET_NR_fchown:
10119         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10120 #if defined(TARGET_NR_fchownat)
10121     case TARGET_NR_fchownat:
10122         if (!(p = lock_user_string(arg2)))
10123             return -TARGET_EFAULT;
10124         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10125                                  low2highgid(arg4), arg5));
10126         unlock_user(p, arg2, 0);
10127         return ret;
10128 #endif
10129 #ifdef TARGET_NR_setresuid
10130     case TARGET_NR_setresuid:
10131         return get_errno(sys_setresuid(low2highuid(arg1),
10132                                        low2highuid(arg2),
10133                                        low2highuid(arg3)));
10134 #endif
10135 #ifdef TARGET_NR_getresuid
10136     case TARGET_NR_getresuid:
10137         {
10138             uid_t ruid, euid, suid;
10139             ret = get_errno(getresuid(&ruid, &euid, &suid));
10140             if (!is_error(ret)) {
10141                 if (put_user_id(high2lowuid(ruid), arg1)
10142                     || put_user_id(high2lowuid(euid), arg2)
10143                     || put_user_id(high2lowuid(suid), arg3))
10144                     return -TARGET_EFAULT;
10145             }
10146         }
10147         return ret;
10148 #endif
10149 #ifdef TARGET_NR_getresgid
10150     case TARGET_NR_setresgid:
10151         return get_errno(sys_setresgid(low2highgid(arg1),
10152                                        low2highgid(arg2),
10153                                        low2highgid(arg3)));
10154 #endif
10155 #ifdef TARGET_NR_getresgid
10156     case TARGET_NR_getresgid:
10157         {
10158             gid_t rgid, egid, sgid;
10159             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10160             if (!is_error(ret)) {
10161                 if (put_user_id(high2lowgid(rgid), arg1)
10162                     || put_user_id(high2lowgid(egid), arg2)
10163                     || put_user_id(high2lowgid(sgid), arg3))
10164                     return -TARGET_EFAULT;
10165             }
10166         }
10167         return ret;
10168 #endif
10169 #ifdef TARGET_NR_chown
10170     case TARGET_NR_chown:
10171         if (!(p = lock_user_string(arg1)))
10172             return -TARGET_EFAULT;
10173         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10174         unlock_user(p, arg1, 0);
10175         return ret;
10176 #endif
10177     case TARGET_NR_setuid:
10178         return get_errno(sys_setuid(low2highuid(arg1)));
10179     case TARGET_NR_setgid:
10180         return get_errno(sys_setgid(low2highgid(arg1)));
10181     case TARGET_NR_setfsuid:
10182         return get_errno(setfsuid(arg1));
10183     case TARGET_NR_setfsgid:
10184         return get_errno(setfsgid(arg1));
10185 
10186 #ifdef TARGET_NR_lchown32
10187     case TARGET_NR_lchown32:
10188         if (!(p = lock_user_string(arg1)))
10189             return -TARGET_EFAULT;
10190         ret = get_errno(lchown(p, arg2, arg3));
10191         unlock_user(p, arg1, 0);
10192         return ret;
10193 #endif
10194 #ifdef TARGET_NR_getuid32
10195     case TARGET_NR_getuid32:
10196         return get_errno(getuid());
10197 #endif
10198 
10199 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10200    /* Alpha specific */
10201     case TARGET_NR_getxuid:
10202          {
10203             uid_t euid;
10204             euid=geteuid();
10205             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10206          }
10207         return get_errno(getuid());
10208 #endif
10209 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10210    /* Alpha specific */
10211     case TARGET_NR_getxgid:
10212          {
10213             uid_t egid;
10214             egid=getegid();
10215             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10216          }
10217         return get_errno(getgid());
10218 #endif
10219 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10220     /* Alpha specific */
10221     case TARGET_NR_osf_getsysinfo:
10222         ret = -TARGET_EOPNOTSUPP;
10223         switch (arg1) {
10224           case TARGET_GSI_IEEE_FP_CONTROL:
10225             {
10226                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10227 
10228                 /* Copied from linux ieee_fpcr_to_swcr.  */
10229                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10230                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10231                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10232                                         | SWCR_TRAP_ENABLE_DZE
10233                                         | SWCR_TRAP_ENABLE_OVF);
10234                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10235                                         | SWCR_TRAP_ENABLE_INE);
10236                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10237                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10238 
10239                 if (put_user_u64 (swcr, arg2))
10240                         return -TARGET_EFAULT;
10241                 ret = 0;
10242             }
10243             break;
10244 
10245           /* case GSI_IEEE_STATE_AT_SIGNAL:
10246              -- Not implemented in linux kernel.
10247              case GSI_UACPROC:
10248              -- Retrieves current unaligned access state; not much used.
10249              case GSI_PROC_TYPE:
10250              -- Retrieves implver information; surely not used.
10251              case GSI_GET_HWRPB:
10252              -- Grabs a copy of the HWRPB; surely not used.
10253           */
10254         }
10255         return ret;
10256 #endif
10257 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10258     /* Alpha specific */
10259     case TARGET_NR_osf_setsysinfo:
10260         ret = -TARGET_EOPNOTSUPP;
10261         switch (arg1) {
10262           case TARGET_SSI_IEEE_FP_CONTROL:
10263             {
10264                 uint64_t swcr, fpcr, orig_fpcr;
10265 
10266                 if (get_user_u64 (swcr, arg2)) {
10267                     return -TARGET_EFAULT;
10268                 }
10269                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10270                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10271 
10272                 /* Copied from linux ieee_swcr_to_fpcr.  */
10273                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10274                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10275                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10276                                   | SWCR_TRAP_ENABLE_DZE
10277                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10278                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10279                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10280                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10281                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10282 
10283                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10284                 ret = 0;
10285             }
10286             break;
10287 
10288           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10289             {
10290                 uint64_t exc, fpcr, orig_fpcr;
10291                 int si_code;
10292 
10293                 if (get_user_u64(exc, arg2)) {
10294                     return -TARGET_EFAULT;
10295                 }
10296 
10297                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10298 
10299                 /* We only add to the exception status here.  */
10300                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10301 
10302                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10303                 ret = 0;
10304 
10305                 /* Old exceptions are not signaled.  */
10306                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10307 
10308                 /* If any exceptions set by this call,
10309                    and are unmasked, send a signal.  */
10310                 si_code = 0;
10311                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10312                     si_code = TARGET_FPE_FLTRES;
10313                 }
10314                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10315                     si_code = TARGET_FPE_FLTUND;
10316                 }
10317                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10318                     si_code = TARGET_FPE_FLTOVF;
10319                 }
10320                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10321                     si_code = TARGET_FPE_FLTDIV;
10322                 }
10323                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10324                     si_code = TARGET_FPE_FLTINV;
10325                 }
10326                 if (si_code != 0) {
10327                     target_siginfo_t info;
10328                     info.si_signo = SIGFPE;
10329                     info.si_errno = 0;
10330                     info.si_code = si_code;
10331                     info._sifields._sigfault._addr
10332                         = ((CPUArchState *)cpu_env)->pc;
10333                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10334                                  QEMU_SI_FAULT, &info);
10335                 }
10336             }
10337             break;
10338 
10339           /* case SSI_NVPAIRS:
10340              -- Used with SSIN_UACPROC to enable unaligned accesses.
10341              case SSI_IEEE_STATE_AT_SIGNAL:
10342              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10343              -- Not implemented in linux kernel
10344           */
10345         }
10346         return ret;
10347 #endif
10348 #ifdef TARGET_NR_osf_sigprocmask
10349     /* Alpha specific.  */
10350     case TARGET_NR_osf_sigprocmask:
10351         {
10352             abi_ulong mask;
10353             int how;
10354             sigset_t set, oldset;
10355 
10356             switch(arg1) {
10357             case TARGET_SIG_BLOCK:
10358                 how = SIG_BLOCK;
10359                 break;
10360             case TARGET_SIG_UNBLOCK:
10361                 how = SIG_UNBLOCK;
10362                 break;
10363             case TARGET_SIG_SETMASK:
10364                 how = SIG_SETMASK;
10365                 break;
10366             default:
10367                 return -TARGET_EINVAL;
10368             }
10369             mask = arg2;
10370             target_to_host_old_sigset(&set, &mask);
10371             ret = do_sigprocmask(how, &set, &oldset);
10372             if (!ret) {
10373                 host_to_target_old_sigset(&mask, &oldset);
10374                 ret = mask;
10375             }
10376         }
10377         return ret;
10378 #endif
10379 
10380 #ifdef TARGET_NR_getgid32
10381     case TARGET_NR_getgid32:
10382         return get_errno(getgid());
10383 #endif
10384 #ifdef TARGET_NR_geteuid32
10385     case TARGET_NR_geteuid32:
10386         return get_errno(geteuid());
10387 #endif
10388 #ifdef TARGET_NR_getegid32
10389     case TARGET_NR_getegid32:
10390         return get_errno(getegid());
10391 #endif
10392 #ifdef TARGET_NR_setreuid32
10393     case TARGET_NR_setreuid32:
10394         return get_errno(setreuid(arg1, arg2));
10395 #endif
10396 #ifdef TARGET_NR_setregid32
10397     case TARGET_NR_setregid32:
10398         return get_errno(setregid(arg1, arg2));
10399 #endif
10400 #ifdef TARGET_NR_getgroups32
10401     case TARGET_NR_getgroups32:
10402         {
10403             int gidsetsize = arg1;
10404             uint32_t *target_grouplist;
10405             gid_t *grouplist;
10406             int i;
10407 
10408             grouplist = alloca(gidsetsize * sizeof(gid_t));
10409             ret = get_errno(getgroups(gidsetsize, grouplist));
10410             if (gidsetsize == 0)
10411                 return ret;
10412             if (!is_error(ret)) {
10413                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10414                 if (!target_grouplist) {
10415                     return -TARGET_EFAULT;
10416                 }
10417                 for(i = 0;i < ret; i++)
10418                     target_grouplist[i] = tswap32(grouplist[i]);
10419                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10420             }
10421         }
10422         return ret;
10423 #endif
10424 #ifdef TARGET_NR_setgroups32
10425     case TARGET_NR_setgroups32:
10426         {
10427             int gidsetsize = arg1;
10428             uint32_t *target_grouplist;
10429             gid_t *grouplist;
10430             int i;
10431 
10432             grouplist = alloca(gidsetsize * sizeof(gid_t));
10433             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10434             if (!target_grouplist) {
10435                 return -TARGET_EFAULT;
10436             }
10437             for(i = 0;i < gidsetsize; i++)
10438                 grouplist[i] = tswap32(target_grouplist[i]);
10439             unlock_user(target_grouplist, arg2, 0);
10440             return get_errno(setgroups(gidsetsize, grouplist));
10441         }
10442 #endif
10443 #ifdef TARGET_NR_fchown32
10444     case TARGET_NR_fchown32:
10445         return get_errno(fchown(arg1, arg2, arg3));
10446 #endif
10447 #ifdef TARGET_NR_setresuid32
10448     case TARGET_NR_setresuid32:
10449         return get_errno(sys_setresuid(arg1, arg2, arg3));
10450 #endif
10451 #ifdef TARGET_NR_getresuid32
10452     case TARGET_NR_getresuid32:
10453         {
10454             uid_t ruid, euid, suid;
10455             ret = get_errno(getresuid(&ruid, &euid, &suid));
10456             if (!is_error(ret)) {
10457                 if (put_user_u32(ruid, arg1)
10458                     || put_user_u32(euid, arg2)
10459                     || put_user_u32(suid, arg3))
10460                     return -TARGET_EFAULT;
10461             }
10462         }
10463         return ret;
10464 #endif
10465 #ifdef TARGET_NR_setresgid32
10466     case TARGET_NR_setresgid32:
10467         return get_errno(sys_setresgid(arg1, arg2, arg3));
10468 #endif
10469 #ifdef TARGET_NR_getresgid32
10470     case TARGET_NR_getresgid32:
10471         {
10472             gid_t rgid, egid, sgid;
10473             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10474             if (!is_error(ret)) {
10475                 if (put_user_u32(rgid, arg1)
10476                     || put_user_u32(egid, arg2)
10477                     || put_user_u32(sgid, arg3))
10478                     return -TARGET_EFAULT;
10479             }
10480         }
10481         return ret;
10482 #endif
10483 #ifdef TARGET_NR_chown32
10484     case TARGET_NR_chown32:
10485         if (!(p = lock_user_string(arg1)))
10486             return -TARGET_EFAULT;
10487         ret = get_errno(chown(p, arg2, arg3));
10488         unlock_user(p, arg1, 0);
10489         return ret;
10490 #endif
10491 #ifdef TARGET_NR_setuid32
10492     case TARGET_NR_setuid32:
10493         return get_errno(sys_setuid(arg1));
10494 #endif
10495 #ifdef TARGET_NR_setgid32
10496     case TARGET_NR_setgid32:
10497         return get_errno(sys_setgid(arg1));
10498 #endif
10499 #ifdef TARGET_NR_setfsuid32
10500     case TARGET_NR_setfsuid32:
10501         return get_errno(setfsuid(arg1));
10502 #endif
10503 #ifdef TARGET_NR_setfsgid32
10504     case TARGET_NR_setfsgid32:
10505         return get_errno(setfsgid(arg1));
10506 #endif
10507 #ifdef TARGET_NR_mincore
10508     case TARGET_NR_mincore:
10509         {
10510             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10511             if (!a) {
10512                 return -TARGET_ENOMEM;
10513             }
10514             p = lock_user_string(arg3);
10515             if (!p) {
10516                 ret = -TARGET_EFAULT;
10517             } else {
10518                 ret = get_errno(mincore(a, arg2, p));
10519                 unlock_user(p, arg3, ret);
10520             }
10521             unlock_user(a, arg1, 0);
10522         }
10523         return ret;
10524 #endif
10525 #ifdef TARGET_NR_arm_fadvise64_64
10526     case TARGET_NR_arm_fadvise64_64:
10527         /* arm_fadvise64_64 looks like fadvise64_64 but
10528          * with different argument order: fd, advice, offset, len
10529          * rather than the usual fd, offset, len, advice.
10530          * Note that offset and len are both 64-bit so appear as
10531          * pairs of 32-bit registers.
10532          */
10533         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10534                             target_offset64(arg5, arg6), arg2);
10535         return -host_to_target_errno(ret);
10536 #endif
10537 
10538 #if TARGET_ABI_BITS == 32
10539 
10540 #ifdef TARGET_NR_fadvise64_64
10541     case TARGET_NR_fadvise64_64:
10542 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10543         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10544         ret = arg2;
10545         arg2 = arg3;
10546         arg3 = arg4;
10547         arg4 = arg5;
10548         arg5 = arg6;
10549         arg6 = ret;
10550 #else
10551         /* 6 args: fd, offset (high, low), len (high, low), advice */
10552         if (regpairs_aligned(cpu_env, num)) {
10553             /* offset is in (3,4), len in (5,6) and advice in 7 */
10554             arg2 = arg3;
10555             arg3 = arg4;
10556             arg4 = arg5;
10557             arg5 = arg6;
10558             arg6 = arg7;
10559         }
10560 #endif
10561         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10562                             target_offset64(arg4, arg5), arg6);
10563         return -host_to_target_errno(ret);
10564 #endif
10565 
10566 #ifdef TARGET_NR_fadvise64
10567     case TARGET_NR_fadvise64:
10568         /* 5 args: fd, offset (high, low), len, advice */
10569         if (regpairs_aligned(cpu_env, num)) {
10570             /* offset is in (3,4), len in 5 and advice in 6 */
10571             arg2 = arg3;
10572             arg3 = arg4;
10573             arg4 = arg5;
10574             arg5 = arg6;
10575         }
10576         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10577         return -host_to_target_errno(ret);
10578 #endif
10579 
10580 #else /* not a 32-bit ABI */
10581 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10582 #ifdef TARGET_NR_fadvise64_64
10583     case TARGET_NR_fadvise64_64:
10584 #endif
10585 #ifdef TARGET_NR_fadvise64
10586     case TARGET_NR_fadvise64:
10587 #endif
10588 #ifdef TARGET_S390X
10589         switch (arg4) {
10590         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10591         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10592         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10593         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10594         default: break;
10595         }
10596 #endif
10597         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10598 #endif
10599 #endif /* end of 64-bit ABI fadvise handling */
10600 
10601 #ifdef TARGET_NR_madvise
10602     case TARGET_NR_madvise:
10603         /* A straight passthrough may not be safe because qemu sometimes
10604            turns private file-backed mappings into anonymous mappings.
10605            This will break MADV_DONTNEED.
10606            This is a hint, so ignoring and returning success is ok.  */
10607         return 0;
10608 #endif
10609 #if TARGET_ABI_BITS == 32
10610     case TARGET_NR_fcntl64:
10611     {
10612 	int cmd;
10613 	struct flock64 fl;
10614         from_flock64_fn *copyfrom = copy_from_user_flock64;
10615         to_flock64_fn *copyto = copy_to_user_flock64;
10616 
10617 #ifdef TARGET_ARM
10618         if (!((CPUARMState *)cpu_env)->eabi) {
10619             copyfrom = copy_from_user_oabi_flock64;
10620             copyto = copy_to_user_oabi_flock64;
10621         }
10622 #endif
10623 
10624 	cmd = target_to_host_fcntl_cmd(arg2);
10625         if (cmd == -TARGET_EINVAL) {
10626             return cmd;
10627         }
10628 
10629         switch(arg2) {
10630         case TARGET_F_GETLK64:
10631             ret = copyfrom(&fl, arg3);
10632             if (ret) {
10633                 break;
10634             }
10635             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10636             if (ret == 0) {
10637                 ret = copyto(arg3, &fl);
10638             }
10639 	    break;
10640 
10641         case TARGET_F_SETLK64:
10642         case TARGET_F_SETLKW64:
10643             ret = copyfrom(&fl, arg3);
10644             if (ret) {
10645                 break;
10646             }
10647             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10648 	    break;
10649         default:
10650             ret = do_fcntl(arg1, arg2, arg3);
10651             break;
10652         }
10653         return ret;
10654     }
10655 #endif
10656 #ifdef TARGET_NR_cacheflush
10657     case TARGET_NR_cacheflush:
10658         /* self-modifying code is handled automatically, so nothing needed */
10659         return 0;
10660 #endif
10661 #ifdef TARGET_NR_getpagesize
10662     case TARGET_NR_getpagesize:
10663         return TARGET_PAGE_SIZE;
10664 #endif
10665     case TARGET_NR_gettid:
10666         return get_errno(sys_gettid());
10667 #ifdef TARGET_NR_readahead
10668     case TARGET_NR_readahead:
10669 #if TARGET_ABI_BITS == 32
10670         if (regpairs_aligned(cpu_env, num)) {
10671             arg2 = arg3;
10672             arg3 = arg4;
10673             arg4 = arg5;
10674         }
10675         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10676 #else
10677         ret = get_errno(readahead(arg1, arg2, arg3));
10678 #endif
10679         return ret;
10680 #endif
10681 #ifdef CONFIG_ATTR
10682 #ifdef TARGET_NR_setxattr
10683     case TARGET_NR_listxattr:
10684     case TARGET_NR_llistxattr:
10685     {
10686         void *p, *b = 0;
10687         if (arg2) {
10688             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10689             if (!b) {
10690                 return -TARGET_EFAULT;
10691             }
10692         }
10693         p = lock_user_string(arg1);
10694         if (p) {
10695             if (num == TARGET_NR_listxattr) {
10696                 ret = get_errno(listxattr(p, b, arg3));
10697             } else {
10698                 ret = get_errno(llistxattr(p, b, arg3));
10699             }
10700         } else {
10701             ret = -TARGET_EFAULT;
10702         }
10703         unlock_user(p, arg1, 0);
10704         unlock_user(b, arg2, arg3);
10705         return ret;
10706     }
10707     case TARGET_NR_flistxattr:
10708     {
10709         void *b = 0;
10710         if (arg2) {
10711             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10712             if (!b) {
10713                 return -TARGET_EFAULT;
10714             }
10715         }
10716         ret = get_errno(flistxattr(arg1, b, arg3));
10717         unlock_user(b, arg2, arg3);
10718         return ret;
10719     }
10720     case TARGET_NR_setxattr:
10721     case TARGET_NR_lsetxattr:
10722         {
10723             void *p, *n, *v = 0;
10724             if (arg3) {
10725                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10726                 if (!v) {
10727                     return -TARGET_EFAULT;
10728                 }
10729             }
10730             p = lock_user_string(arg1);
10731             n = lock_user_string(arg2);
10732             if (p && n) {
10733                 if (num == TARGET_NR_setxattr) {
10734                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10735                 } else {
10736                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10737                 }
10738             } else {
10739                 ret = -TARGET_EFAULT;
10740             }
10741             unlock_user(p, arg1, 0);
10742             unlock_user(n, arg2, 0);
10743             unlock_user(v, arg3, 0);
10744         }
10745         return ret;
10746     case TARGET_NR_fsetxattr:
10747         {
10748             void *n, *v = 0;
10749             if (arg3) {
10750                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10751                 if (!v) {
10752                     return -TARGET_EFAULT;
10753                 }
10754             }
10755             n = lock_user_string(arg2);
10756             if (n) {
10757                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10758             } else {
10759                 ret = -TARGET_EFAULT;
10760             }
10761             unlock_user(n, arg2, 0);
10762             unlock_user(v, arg3, 0);
10763         }
10764         return ret;
10765     case TARGET_NR_getxattr:
10766     case TARGET_NR_lgetxattr:
10767         {
10768             void *p, *n, *v = 0;
10769             if (arg3) {
10770                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10771                 if (!v) {
10772                     return -TARGET_EFAULT;
10773                 }
10774             }
10775             p = lock_user_string(arg1);
10776             n = lock_user_string(arg2);
10777             if (p && n) {
10778                 if (num == TARGET_NR_getxattr) {
10779                     ret = get_errno(getxattr(p, n, v, arg4));
10780                 } else {
10781                     ret = get_errno(lgetxattr(p, n, v, arg4));
10782                 }
10783             } else {
10784                 ret = -TARGET_EFAULT;
10785             }
10786             unlock_user(p, arg1, 0);
10787             unlock_user(n, arg2, 0);
10788             unlock_user(v, arg3, arg4);
10789         }
10790         return ret;
10791     case TARGET_NR_fgetxattr:
10792         {
10793             void *n, *v = 0;
10794             if (arg3) {
10795                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10796                 if (!v) {
10797                     return -TARGET_EFAULT;
10798                 }
10799             }
10800             n = lock_user_string(arg2);
10801             if (n) {
10802                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10803             } else {
10804                 ret = -TARGET_EFAULT;
10805             }
10806             unlock_user(n, arg2, 0);
10807             unlock_user(v, arg3, arg4);
10808         }
10809         return ret;
10810     case TARGET_NR_removexattr:
10811     case TARGET_NR_lremovexattr:
10812         {
10813             void *p, *n;
10814             p = lock_user_string(arg1);
10815             n = lock_user_string(arg2);
10816             if (p && n) {
10817                 if (num == TARGET_NR_removexattr) {
10818                     ret = get_errno(removexattr(p, n));
10819                 } else {
10820                     ret = get_errno(lremovexattr(p, n));
10821                 }
10822             } else {
10823                 ret = -TARGET_EFAULT;
10824             }
10825             unlock_user(p, arg1, 0);
10826             unlock_user(n, arg2, 0);
10827         }
10828         return ret;
10829     case TARGET_NR_fremovexattr:
10830         {
10831             void *n;
10832             n = lock_user_string(arg2);
10833             if (n) {
10834                 ret = get_errno(fremovexattr(arg1, n));
10835             } else {
10836                 ret = -TARGET_EFAULT;
10837             }
10838             unlock_user(n, arg2, 0);
10839         }
10840         return ret;
10841 #endif
10842 #endif /* CONFIG_ATTR */
10843 #ifdef TARGET_NR_set_thread_area
10844     case TARGET_NR_set_thread_area:
10845 #if defined(TARGET_MIPS)
10846       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10847       return 0;
10848 #elif defined(TARGET_CRIS)
10849       if (arg1 & 0xff)
10850           ret = -TARGET_EINVAL;
10851       else {
10852           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10853           ret = 0;
10854       }
10855       return ret;
10856 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10857       return do_set_thread_area(cpu_env, arg1);
10858 #elif defined(TARGET_M68K)
10859       {
10860           TaskState *ts = cpu->opaque;
10861           ts->tp_value = arg1;
10862           return 0;
10863       }
10864 #else
10865       return -TARGET_ENOSYS;
10866 #endif
10867 #endif
10868 #ifdef TARGET_NR_get_thread_area
10869     case TARGET_NR_get_thread_area:
10870 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10871         return do_get_thread_area(cpu_env, arg1);
10872 #elif defined(TARGET_M68K)
10873         {
10874             TaskState *ts = cpu->opaque;
10875             return ts->tp_value;
10876         }
10877 #else
10878         return -TARGET_ENOSYS;
10879 #endif
10880 #endif
10881 #ifdef TARGET_NR_getdomainname
10882     case TARGET_NR_getdomainname:
10883         return -TARGET_ENOSYS;
10884 #endif
10885 
10886 #ifdef TARGET_NR_clock_settime
10887     case TARGET_NR_clock_settime:
10888     {
10889         struct timespec ts;
10890 
10891         ret = target_to_host_timespec(&ts, arg2);
10892         if (!is_error(ret)) {
10893             ret = get_errno(clock_settime(arg1, &ts));
10894         }
10895         return ret;
10896     }
10897 #endif
10898 #ifdef TARGET_NR_clock_gettime
10899     case TARGET_NR_clock_gettime:
10900     {
10901         struct timespec ts;
10902         ret = get_errno(clock_gettime(arg1, &ts));
10903         if (!is_error(ret)) {
10904             ret = host_to_target_timespec(arg2, &ts);
10905         }
10906         return ret;
10907     }
10908 #endif
10909 #ifdef TARGET_NR_clock_getres
10910     case TARGET_NR_clock_getres:
10911     {
10912         struct timespec ts;
10913         ret = get_errno(clock_getres(arg1, &ts));
10914         if (!is_error(ret)) {
10915             host_to_target_timespec(arg2, &ts);
10916         }
10917         return ret;
10918     }
10919 #endif
10920 #ifdef TARGET_NR_clock_nanosleep
10921     case TARGET_NR_clock_nanosleep:
10922     {
10923         struct timespec ts;
10924         target_to_host_timespec(&ts, arg3);
10925         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10926                                              &ts, arg4 ? &ts : NULL));
10927         if (arg4)
10928             host_to_target_timespec(arg4, &ts);
10929 
10930 #if defined(TARGET_PPC)
10931         /* clock_nanosleep is odd in that it returns positive errno values.
10932          * On PPC, CR0 bit 3 should be set in such a situation. */
10933         if (ret && ret != -TARGET_ERESTARTSYS) {
10934             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10935         }
10936 #endif
10937         return ret;
10938     }
10939 #endif
10940 
10941 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10942     case TARGET_NR_set_tid_address:
10943         return get_errno(set_tid_address((int *)g2h(arg1)));
10944 #endif
10945 
10946     case TARGET_NR_tkill:
10947         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10948 
10949     case TARGET_NR_tgkill:
10950         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10951                          target_to_host_signal(arg3)));
10952 
10953 #ifdef TARGET_NR_set_robust_list
10954     case TARGET_NR_set_robust_list:
10955     case TARGET_NR_get_robust_list:
10956         /* The ABI for supporting robust futexes has userspace pass
10957          * the kernel a pointer to a linked list which is updated by
10958          * userspace after the syscall; the list is walked by the kernel
10959          * when the thread exits. Since the linked list in QEMU guest
10960          * memory isn't a valid linked list for the host and we have
10961          * no way to reliably intercept the thread-death event, we can't
10962          * support these. Silently return ENOSYS so that guest userspace
10963          * falls back to a non-robust futex implementation (which should
10964          * be OK except in the corner case of the guest crashing while
10965          * holding a mutex that is shared with another process via
10966          * shared memory).
10967          */
10968         return -TARGET_ENOSYS;
10969 #endif
10970 
10971 #if defined(TARGET_NR_utimensat)
10972     case TARGET_NR_utimensat:
10973         {
10974             struct timespec *tsp, ts[2];
10975             if (!arg3) {
10976                 tsp = NULL;
10977             } else {
10978                 target_to_host_timespec(ts, arg3);
10979                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10980                 tsp = ts;
10981             }
10982             if (!arg2)
10983                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10984             else {
10985                 if (!(p = lock_user_string(arg2))) {
10986                     return -TARGET_EFAULT;
10987                 }
10988                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10989                 unlock_user(p, arg2, 0);
10990             }
10991         }
10992         return ret;
10993 #endif
10994     case TARGET_NR_futex:
10995         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10996 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10997     case TARGET_NR_inotify_init:
10998         ret = get_errno(sys_inotify_init());
10999         if (ret >= 0) {
11000             fd_trans_register(ret, &target_inotify_trans);
11001         }
11002         return ret;
11003 #endif
11004 #ifdef CONFIG_INOTIFY1
11005 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11006     case TARGET_NR_inotify_init1:
11007         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11008                                           fcntl_flags_tbl)));
11009         if (ret >= 0) {
11010             fd_trans_register(ret, &target_inotify_trans);
11011         }
11012         return ret;
11013 #endif
11014 #endif
11015 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11016     case TARGET_NR_inotify_add_watch:
11017         p = lock_user_string(arg2);
11018         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11019         unlock_user(p, arg2, 0);
11020         return ret;
11021 #endif
11022 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11023     case TARGET_NR_inotify_rm_watch:
11024         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11025 #endif
11026 
11027 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11028     case TARGET_NR_mq_open:
11029         {
11030             struct mq_attr posix_mq_attr;
11031             struct mq_attr *pposix_mq_attr;
11032             int host_flags;
11033 
11034             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11035             pposix_mq_attr = NULL;
11036             if (arg4) {
11037                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11038                     return -TARGET_EFAULT;
11039                 }
11040                 pposix_mq_attr = &posix_mq_attr;
11041             }
11042             p = lock_user_string(arg1 - 1);
11043             if (!p) {
11044                 return -TARGET_EFAULT;
11045             }
11046             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11047             unlock_user (p, arg1, 0);
11048         }
11049         return ret;
11050 
11051     case TARGET_NR_mq_unlink:
11052         p = lock_user_string(arg1 - 1);
11053         if (!p) {
11054             return -TARGET_EFAULT;
11055         }
11056         ret = get_errno(mq_unlink(p));
11057         unlock_user (p, arg1, 0);
11058         return ret;
11059 
11060     case TARGET_NR_mq_timedsend:
11061         {
11062             struct timespec ts;
11063 
11064             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11065             if (arg5 != 0) {
11066                 target_to_host_timespec(&ts, arg5);
11067                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11068                 host_to_target_timespec(arg5, &ts);
11069             } else {
11070                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11071             }
11072             unlock_user (p, arg2, arg3);
11073         }
11074         return ret;
11075 
11076     case TARGET_NR_mq_timedreceive:
11077         {
11078             struct timespec ts;
11079             unsigned int prio;
11080 
11081             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11082             if (arg5 != 0) {
11083                 target_to_host_timespec(&ts, arg5);
11084                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11085                                                      &prio, &ts));
11086                 host_to_target_timespec(arg5, &ts);
11087             } else {
11088                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11089                                                      &prio, NULL));
11090             }
11091             unlock_user (p, arg2, arg3);
11092             if (arg4 != 0)
11093                 put_user_u32(prio, arg4);
11094         }
11095         return ret;
11096 
11097     /* Not implemented for now... */
11098 /*     case TARGET_NR_mq_notify: */
11099 /*         break; */
11100 
11101     case TARGET_NR_mq_getsetattr:
11102         {
11103             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11104             ret = 0;
11105             if (arg2 != 0) {
11106                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11107                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11108                                            &posix_mq_attr_out));
11109             } else if (arg3 != 0) {
11110                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11111             }
11112             if (ret == 0 && arg3 != 0) {
11113                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11114             }
11115         }
11116         return ret;
11117 #endif
11118 
11119 #ifdef CONFIG_SPLICE
11120 #ifdef TARGET_NR_tee
11121     case TARGET_NR_tee:
11122         {
11123             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11124         }
11125         return ret;
11126 #endif
11127 #ifdef TARGET_NR_splice
11128     case TARGET_NR_splice:
11129         {
11130             loff_t loff_in, loff_out;
11131             loff_t *ploff_in = NULL, *ploff_out = NULL;
11132             if (arg2) {
11133                 if (get_user_u64(loff_in, arg2)) {
11134                     return -TARGET_EFAULT;
11135                 }
11136                 ploff_in = &loff_in;
11137             }
11138             if (arg4) {
11139                 if (get_user_u64(loff_out, arg4)) {
11140                     return -TARGET_EFAULT;
11141                 }
11142                 ploff_out = &loff_out;
11143             }
11144             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11145             if (arg2) {
11146                 if (put_user_u64(loff_in, arg2)) {
11147                     return -TARGET_EFAULT;
11148                 }
11149             }
11150             if (arg4) {
11151                 if (put_user_u64(loff_out, arg4)) {
11152                     return -TARGET_EFAULT;
11153                 }
11154             }
11155         }
11156         return ret;
11157 #endif
11158 #ifdef TARGET_NR_vmsplice
11159 	case TARGET_NR_vmsplice:
11160         {
11161             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11162             if (vec != NULL) {
11163                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11164                 unlock_iovec(vec, arg2, arg3, 0);
11165             } else {
11166                 ret = -host_to_target_errno(errno);
11167             }
11168         }
11169         return ret;
11170 #endif
11171 #endif /* CONFIG_SPLICE */
11172 #ifdef CONFIG_EVENTFD
11173 #if defined(TARGET_NR_eventfd)
11174     case TARGET_NR_eventfd:
11175         ret = get_errno(eventfd(arg1, 0));
11176         if (ret >= 0) {
11177             fd_trans_register(ret, &target_eventfd_trans);
11178         }
11179         return ret;
11180 #endif
11181 #if defined(TARGET_NR_eventfd2)
11182     case TARGET_NR_eventfd2:
11183     {
11184         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11185         if (arg2 & TARGET_O_NONBLOCK) {
11186             host_flags |= O_NONBLOCK;
11187         }
11188         if (arg2 & TARGET_O_CLOEXEC) {
11189             host_flags |= O_CLOEXEC;
11190         }
11191         ret = get_errno(eventfd(arg1, host_flags));
11192         if (ret >= 0) {
11193             fd_trans_register(ret, &target_eventfd_trans);
11194         }
11195         return ret;
11196     }
11197 #endif
11198 #endif /* CONFIG_EVENTFD  */
11199 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11200     case TARGET_NR_fallocate:
11201 #if TARGET_ABI_BITS == 32
11202         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11203                                   target_offset64(arg5, arg6)));
11204 #else
11205         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11206 #endif
11207         return ret;
11208 #endif
11209 #if defined(CONFIG_SYNC_FILE_RANGE)
11210 #if defined(TARGET_NR_sync_file_range)
11211     case TARGET_NR_sync_file_range:
11212 #if TARGET_ABI_BITS == 32
11213 #if defined(TARGET_MIPS)
11214         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11215                                         target_offset64(arg5, arg6), arg7));
11216 #else
11217         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11218                                         target_offset64(arg4, arg5), arg6));
11219 #endif /* !TARGET_MIPS */
11220 #else
11221         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11222 #endif
11223         return ret;
11224 #endif
11225 #if defined(TARGET_NR_sync_file_range2)
11226     case TARGET_NR_sync_file_range2:
11227         /* This is like sync_file_range but the arguments are reordered */
11228 #if TARGET_ABI_BITS == 32
11229         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11230                                         target_offset64(arg5, arg6), arg2));
11231 #else
11232         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11233 #endif
11234         return ret;
11235 #endif
11236 #endif
11237 #if defined(TARGET_NR_signalfd4)
11238     case TARGET_NR_signalfd4:
11239         return do_signalfd4(arg1, arg2, arg4);
11240 #endif
11241 #if defined(TARGET_NR_signalfd)
11242     case TARGET_NR_signalfd:
11243         return do_signalfd4(arg1, arg2, 0);
11244 #endif
11245 #if defined(CONFIG_EPOLL)
11246 #if defined(TARGET_NR_epoll_create)
11247     case TARGET_NR_epoll_create:
11248         return get_errno(epoll_create(arg1));
11249 #endif
11250 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11251     case TARGET_NR_epoll_create1:
11252         return get_errno(epoll_create1(arg1));
11253 #endif
11254 #if defined(TARGET_NR_epoll_ctl)
11255     case TARGET_NR_epoll_ctl:
11256     {
11257         struct epoll_event ep;
11258         struct epoll_event *epp = 0;
11259         if (arg4) {
11260             struct target_epoll_event *target_ep;
11261             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11262                 return -TARGET_EFAULT;
11263             }
11264             ep.events = tswap32(target_ep->events);
11265             /* The epoll_data_t union is just opaque data to the kernel,
11266              * so we transfer all 64 bits across and need not worry what
11267              * actual data type it is.
11268              */
11269             ep.data.u64 = tswap64(target_ep->data.u64);
11270             unlock_user_struct(target_ep, arg4, 0);
11271             epp = &ep;
11272         }
11273         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11274     }
11275 #endif
11276 
11277 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11278 #if defined(TARGET_NR_epoll_wait)
11279     case TARGET_NR_epoll_wait:
11280 #endif
11281 #if defined(TARGET_NR_epoll_pwait)
11282     case TARGET_NR_epoll_pwait:
11283 #endif
11284     {
11285         struct target_epoll_event *target_ep;
11286         struct epoll_event *ep;
11287         int epfd = arg1;
11288         int maxevents = arg3;
11289         int timeout = arg4;
11290 
11291         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11292             return -TARGET_EINVAL;
11293         }
11294 
11295         target_ep = lock_user(VERIFY_WRITE, arg2,
11296                               maxevents * sizeof(struct target_epoll_event), 1);
11297         if (!target_ep) {
11298             return -TARGET_EFAULT;
11299         }
11300 
11301         ep = g_try_new(struct epoll_event, maxevents);
11302         if (!ep) {
11303             unlock_user(target_ep, arg2, 0);
11304             return -TARGET_ENOMEM;
11305         }
11306 
11307         switch (num) {
11308 #if defined(TARGET_NR_epoll_pwait)
11309         case TARGET_NR_epoll_pwait:
11310         {
11311             target_sigset_t *target_set;
11312             sigset_t _set, *set = &_set;
11313 
11314             if (arg5) {
11315                 if (arg6 != sizeof(target_sigset_t)) {
11316                     ret = -TARGET_EINVAL;
11317                     break;
11318                 }
11319 
11320                 target_set = lock_user(VERIFY_READ, arg5,
11321                                        sizeof(target_sigset_t), 1);
11322                 if (!target_set) {
11323                     ret = -TARGET_EFAULT;
11324                     break;
11325                 }
11326                 target_to_host_sigset(set, target_set);
11327                 unlock_user(target_set, arg5, 0);
11328             } else {
11329                 set = NULL;
11330             }
11331 
11332             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11333                                              set, SIGSET_T_SIZE));
11334             break;
11335         }
11336 #endif
11337 #if defined(TARGET_NR_epoll_wait)
11338         case TARGET_NR_epoll_wait:
11339             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11340                                              NULL, 0));
11341             break;
11342 #endif
11343         default:
11344             ret = -TARGET_ENOSYS;
11345         }
11346         if (!is_error(ret)) {
11347             int i;
11348             for (i = 0; i < ret; i++) {
11349                 target_ep[i].events = tswap32(ep[i].events);
11350                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11351             }
11352             unlock_user(target_ep, arg2,
11353                         ret * sizeof(struct target_epoll_event));
11354         } else {
11355             unlock_user(target_ep, arg2, 0);
11356         }
11357         g_free(ep);
11358         return ret;
11359     }
11360 #endif
11361 #endif
11362 #ifdef TARGET_NR_prlimit64
11363     case TARGET_NR_prlimit64:
11364     {
11365         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11366         struct target_rlimit64 *target_rnew, *target_rold;
11367         struct host_rlimit64 rnew, rold, *rnewp = 0;
11368         int resource = target_to_host_resource(arg2);
11369         if (arg3) {
11370             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11371                 return -TARGET_EFAULT;
11372             }
11373             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11374             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11375             unlock_user_struct(target_rnew, arg3, 0);
11376             rnewp = &rnew;
11377         }
11378 
11379         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11380         if (!is_error(ret) && arg4) {
11381             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11382                 return -TARGET_EFAULT;
11383             }
11384             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11385             target_rold->rlim_max = tswap64(rold.rlim_max);
11386             unlock_user_struct(target_rold, arg4, 1);
11387         }
11388         return ret;
11389     }
11390 #endif
11391 #ifdef TARGET_NR_gethostname
11392     case TARGET_NR_gethostname:
11393     {
11394         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11395         if (name) {
11396             ret = get_errno(gethostname(name, arg2));
11397             unlock_user(name, arg1, arg2);
11398         } else {
11399             ret = -TARGET_EFAULT;
11400         }
11401         return ret;
11402     }
11403 #endif
11404 #ifdef TARGET_NR_atomic_cmpxchg_32
11405     case TARGET_NR_atomic_cmpxchg_32:
11406     {
11407         /* should use start_exclusive from main.c */
11408         abi_ulong mem_value;
11409         if (get_user_u32(mem_value, arg6)) {
11410             target_siginfo_t info;
11411             info.si_signo = SIGSEGV;
11412             info.si_errno = 0;
11413             info.si_code = TARGET_SEGV_MAPERR;
11414             info._sifields._sigfault._addr = arg6;
11415             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11416                          QEMU_SI_FAULT, &info);
11417             ret = 0xdeadbeef;
11418 
11419         }
11420         if (mem_value == arg2)
11421             put_user_u32(arg1, arg6);
11422         return mem_value;
11423     }
11424 #endif
11425 #ifdef TARGET_NR_atomic_barrier
11426     case TARGET_NR_atomic_barrier:
11427         /* Like the kernel implementation and the
11428            qemu arm barrier, no-op this? */
11429         return 0;
11430 #endif
11431 
11432 #ifdef TARGET_NR_timer_create
11433     case TARGET_NR_timer_create:
11434     {
11435         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11436 
11437         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11438 
11439         int clkid = arg1;
11440         int timer_index = next_free_host_timer();
11441 
11442         if (timer_index < 0) {
11443             ret = -TARGET_EAGAIN;
11444         } else {
11445             timer_t *phtimer = g_posix_timers  + timer_index;
11446 
11447             if (arg2) {
11448                 phost_sevp = &host_sevp;
11449                 ret = target_to_host_sigevent(phost_sevp, arg2);
11450                 if (ret != 0) {
11451                     return ret;
11452                 }
11453             }
11454 
11455             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11456             if (ret) {
11457                 phtimer = NULL;
11458             } else {
11459                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11460                     return -TARGET_EFAULT;
11461                 }
11462             }
11463         }
11464         return ret;
11465     }
11466 #endif
11467 
11468 #ifdef TARGET_NR_timer_settime
11469     case TARGET_NR_timer_settime:
11470     {
11471         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11472          * struct itimerspec * old_value */
11473         target_timer_t timerid = get_timer_id(arg1);
11474 
11475         if (timerid < 0) {
11476             ret = timerid;
11477         } else if (arg3 == 0) {
11478             ret = -TARGET_EINVAL;
11479         } else {
11480             timer_t htimer = g_posix_timers[timerid];
11481             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11482 
11483             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11484                 return -TARGET_EFAULT;
11485             }
11486             ret = get_errno(
11487                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11488             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11489                 return -TARGET_EFAULT;
11490             }
11491         }
11492         return ret;
11493     }
11494 #endif
11495 
11496 #ifdef TARGET_NR_timer_gettime
11497     case TARGET_NR_timer_gettime:
11498     {
11499         /* args: timer_t timerid, struct itimerspec *curr_value */
11500         target_timer_t timerid = get_timer_id(arg1);
11501 
11502         if (timerid < 0) {
11503             ret = timerid;
11504         } else if (!arg2) {
11505             ret = -TARGET_EFAULT;
11506         } else {
11507             timer_t htimer = g_posix_timers[timerid];
11508             struct itimerspec hspec;
11509             ret = get_errno(timer_gettime(htimer, &hspec));
11510 
11511             if (host_to_target_itimerspec(arg2, &hspec)) {
11512                 ret = -TARGET_EFAULT;
11513             }
11514         }
11515         return ret;
11516     }
11517 #endif
11518 
11519 #ifdef TARGET_NR_timer_getoverrun
11520     case TARGET_NR_timer_getoverrun:
11521     {
11522         /* args: timer_t timerid */
11523         target_timer_t timerid = get_timer_id(arg1);
11524 
11525         if (timerid < 0) {
11526             ret = timerid;
11527         } else {
11528             timer_t htimer = g_posix_timers[timerid];
11529             ret = get_errno(timer_getoverrun(htimer));
11530         }
11531         fd_trans_unregister(ret);
11532         return ret;
11533     }
11534 #endif
11535 
11536 #ifdef TARGET_NR_timer_delete
11537     case TARGET_NR_timer_delete:
11538     {
11539         /* args: timer_t timerid */
11540         target_timer_t timerid = get_timer_id(arg1);
11541 
11542         if (timerid < 0) {
11543             ret = timerid;
11544         } else {
11545             timer_t htimer = g_posix_timers[timerid];
11546             ret = get_errno(timer_delete(htimer));
11547             g_posix_timers[timerid] = 0;
11548         }
11549         return ret;
11550     }
11551 #endif
11552 
11553 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11554     case TARGET_NR_timerfd_create:
11555         return get_errno(timerfd_create(arg1,
11556                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11557 #endif
11558 
11559 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11560     case TARGET_NR_timerfd_gettime:
11561         {
11562             struct itimerspec its_curr;
11563 
11564             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11565 
11566             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11567                 return -TARGET_EFAULT;
11568             }
11569         }
11570         return ret;
11571 #endif
11572 
11573 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11574     case TARGET_NR_timerfd_settime:
11575         {
11576             struct itimerspec its_new, its_old, *p_new;
11577 
11578             if (arg3) {
11579                 if (target_to_host_itimerspec(&its_new, arg3)) {
11580                     return -TARGET_EFAULT;
11581                 }
11582                 p_new = &its_new;
11583             } else {
11584                 p_new = NULL;
11585             }
11586 
11587             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11588 
11589             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11590                 return -TARGET_EFAULT;
11591             }
11592         }
11593         return ret;
11594 #endif
11595 
11596 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11597     case TARGET_NR_ioprio_get:
11598         return get_errno(ioprio_get(arg1, arg2));
11599 #endif
11600 
11601 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11602     case TARGET_NR_ioprio_set:
11603         return get_errno(ioprio_set(arg1, arg2, arg3));
11604 #endif
11605 
11606 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11607     case TARGET_NR_setns:
11608         return get_errno(setns(arg1, arg2));
11609 #endif
11610 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11611     case TARGET_NR_unshare:
11612         return get_errno(unshare(arg1));
11613 #endif
11614 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11615     case TARGET_NR_kcmp:
11616         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11617 #endif
11618 #ifdef TARGET_NR_swapcontext
11619     case TARGET_NR_swapcontext:
11620         /* PowerPC specific.  */
11621         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11622 #endif
11623 
11624     default:
11625         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11626         return -TARGET_ENOSYS;
11627     }
11628     return ret;
11629 }
11630 
11631 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11632                     abi_long arg2, abi_long arg3, abi_long arg4,
11633                     abi_long arg5, abi_long arg6, abi_long arg7,
11634                     abi_long arg8)
11635 {
11636     CPUState *cpu = ENV_GET_CPU(cpu_env);
11637     abi_long ret;
11638 
11639 #ifdef DEBUG_ERESTARTSYS
11640     /* Debug-only code for exercising the syscall-restart code paths
11641      * in the per-architecture cpu main loops: restart every syscall
11642      * the guest makes once before letting it through.
11643      */
11644     {
11645         static bool flag;
11646         flag = !flag;
11647         if (flag) {
11648             return -TARGET_ERESTARTSYS;
11649         }
11650     }
11651 #endif
11652 
11653     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11654                              arg5, arg6, arg7, arg8);
11655 
11656     if (unlikely(do_strace)) {
11657         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11658         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11659                           arg5, arg6, arg7, arg8);
11660         print_syscall_ret(num, ret);
11661     } else {
11662         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11663                           arg5, arg6, arg7, arg8);
11664     }
11665 
11666     trace_guest_user_syscall_ret(cpu, num, ret);
11667     return ret;
11668 }
11669