xref: /openbmc/qemu/linux-user/syscall.c (revision 5f992db6)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "fd-trans.h"
111 
112 #ifndef CLONE_IO
113 #define CLONE_IO                0x80000000      /* Clone io context */
114 #endif
115 
116 /* We can't directly call the host clone syscall, because this will
117  * badly confuse libc (breaking mutexes, for example). So we must
118  * divide clone flags into:
119  *  * flag combinations that look like pthread_create()
120  *  * flag combinations that look like fork()
121  *  * flags we can implement within QEMU itself
122  *  * flags we can't support and will return an error for
123  */
124 /* For thread creation, all these flags must be present; for
125  * fork, none must be present.
126  */
127 #define CLONE_THREAD_FLAGS                              \
128     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
129      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
130 
131 /* These flags are ignored:
132  * CLONE_DETACHED is now ignored by the kernel;
133  * CLONE_IO is just an optimisation hint to the I/O scheduler
134  */
135 #define CLONE_IGNORED_FLAGS                     \
136     (CLONE_DETACHED | CLONE_IO)
137 
138 /* Flags for fork which we can implement within QEMU itself */
139 #define CLONE_OPTIONAL_FORK_FLAGS               \
140     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
141      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
142 
143 /* Flags for thread creation which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
145     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
146      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
147 
148 #define CLONE_INVALID_FORK_FLAGS                                        \
149     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
150 
151 #define CLONE_INVALID_THREAD_FLAGS                                      \
152     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
153        CLONE_IGNORED_FLAGS))
154 
155 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
156  * have almost all been allocated. We cannot support any of
157  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
158  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
159  * The checks against the invalid thread masks above will catch these.
160  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
161  */
162 
163 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
164  * once. This exercises the codepaths for restart.
165  */
166 //#define DEBUG_ERESTARTSYS
167 
168 //#include <linux/msdos_fs.h>
169 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
170 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
171 
172 #undef _syscall0
173 #undef _syscall1
174 #undef _syscall2
175 #undef _syscall3
176 #undef _syscall4
177 #undef _syscall5
178 #undef _syscall6
179 
180 #define _syscall0(type,name)		\
181 static type name (void)			\
182 {					\
183 	return syscall(__NR_##name);	\
184 }
185 
186 #define _syscall1(type,name,type1,arg1)		\
187 static type name (type1 arg1)			\
188 {						\
189 	return syscall(__NR_##name, arg1);	\
190 }
191 
192 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
193 static type name (type1 arg1,type2 arg2)		\
194 {							\
195 	return syscall(__NR_##name, arg1, arg2);	\
196 }
197 
198 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
199 static type name (type1 arg1,type2 arg2,type3 arg3)		\
200 {								\
201 	return syscall(__NR_##name, arg1, arg2, arg3);		\
202 }
203 
204 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
205 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
206 {										\
207 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
208 }
209 
210 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
211 		  type5,arg5)							\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
215 }
216 
217 
218 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
219 		  type5,arg5,type6,arg6)					\
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
221                   type6 arg6)							\
222 {										\
223 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
224 }
225 
226 
227 #define __NR_sys_uname __NR_uname
228 #define __NR_sys_getcwd1 __NR_getcwd
229 #define __NR_sys_getdents __NR_getdents
230 #define __NR_sys_getdents64 __NR_getdents64
231 #define __NR_sys_getpriority __NR_getpriority
232 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
233 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
234 #define __NR_sys_syslog __NR_syslog
235 #define __NR_sys_futex __NR_futex
236 #define __NR_sys_inotify_init __NR_inotify_init
237 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
238 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
239 
240 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
241 #define __NR__llseek __NR_lseek
242 #endif
243 
244 /* Newer kernel ports have llseek() instead of _llseek() */
245 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
246 #define TARGET_NR__llseek TARGET_NR_llseek
247 #endif
248 
249 #define __NR_sys_gettid __NR_gettid
250 _syscall0(int, sys_gettid)
251 
252 /* For the 64-bit guest on 32-bit host case we must emulate
253  * getdents using getdents64, because otherwise the host
254  * might hand us back more dirent records than we can fit
255  * into the guest buffer after structure format conversion.
256  * Otherwise we emulate getdents with getdents if the host has it.
257  */
258 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
259 #define EMULATE_GETDENTS_WITH_GETDENTS
260 #endif
261 
262 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if (defined(TARGET_NR_getdents) && \
266       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
267     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #endif
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
272           loff_t *, res, uint, wh);
273 #endif
274 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
275 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276           siginfo_t *, uinfo)
277 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
278 #ifdef __NR_exit_group
279 _syscall1(int,exit_group,int,error_code)
280 #endif
281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
282 _syscall1(int,set_tid_address,int *,tidptr)
283 #endif
284 #if defined(TARGET_NR_futex) && defined(__NR_futex)
285 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
286           const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #endif
288 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
289 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
290           unsigned long *, user_mask_ptr);
291 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
292 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
293           unsigned long *, user_mask_ptr);
294 #define __NR_sys_getcpu __NR_getcpu
295 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
296 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297           void *, arg);
298 _syscall2(int, capget, struct __user_cap_header_struct *, header,
299           struct __user_cap_data_struct *, data);
300 _syscall2(int, capset, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
303 _syscall2(int, ioprio_get, int, which, int, who)
304 #endif
305 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
306 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #endif
308 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
309 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
310 #endif
311 
312 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
313 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
314           unsigned long, idx1, unsigned long, idx2)
315 #endif
316 
317 static bitmask_transtbl fcntl_flags_tbl[] = {
318   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
319   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
320   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
321   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
322   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
323   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
324   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
325   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
326   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
327   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
328   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
329   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
330   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
331 #if defined(O_DIRECT)
332   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
333 #endif
334 #if defined(O_NOATIME)
335   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
336 #endif
337 #if defined(O_CLOEXEC)
338   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
339 #endif
340 #if defined(O_PATH)
341   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
342 #endif
343 #if defined(O_TMPFILE)
344   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
345 #endif
346   /* Don't terminate the list prematurely on 64-bit host+guest.  */
347 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
348   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
349 #endif
350   { 0, 0, 0, 0 }
351 };
352 
353 static int sys_getcwd1(char *buf, size_t size)
354 {
355   if (getcwd(buf, size) == NULL) {
356       /* getcwd() sets errno */
357       return (-1);
358   }
359   return strlen(buf)+1;
360 }
361 
362 #ifdef TARGET_NR_utimensat
363 #if defined(__NR_utimensat)
364 #define __NR_sys_utimensat __NR_utimensat
365 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
366           const struct timespec *,tsp,int,flags)
367 #else
368 static int sys_utimensat(int dirfd, const char *pathname,
369                          const struct timespec times[2], int flags)
370 {
371     errno = ENOSYS;
372     return -1;
373 }
374 #endif
375 #endif /* TARGET_NR_utimensat */
376 
377 #ifdef TARGET_NR_renameat2
378 #if defined(__NR_renameat2)
379 #define __NR_sys_renameat2 __NR_renameat2
380 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
381           const char *, new, unsigned int, flags)
382 #else
383 static int sys_renameat2(int oldfd, const char *old,
384                          int newfd, const char *new, int flags)
385 {
386     if (flags == 0) {
387         return renameat(oldfd, old, newfd, new);
388     }
389     errno = ENOSYS;
390     return -1;
391 }
392 #endif
393 #endif /* TARGET_NR_renameat2 */
394 
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
397 
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
400 {
401   return (inotify_init());
402 }
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
406 {
407   return (inotify_add_watch(fd, pathname, mask));
408 }
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
412 {
413   return (inotify_rm_watch(fd, wd));
414 }
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
419 {
420   return (inotify_init1(flags));
421 }
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY  */
431 
432 #if defined(TARGET_NR_prlimit64)
433 #ifndef __NR_prlimit64
434 # define __NR_prlimit64 -1
435 #endif
436 #define __NR_sys_prlimit64 __NR_prlimit64
437 /* The glibc rlimit structure may not be that used by the underlying syscall */
438 struct host_rlimit64 {
439     uint64_t rlim_cur;
440     uint64_t rlim_max;
441 };
442 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
443           const struct host_rlimit64 *, new_limit,
444           struct host_rlimit64 *, old_limit)
445 #endif
446 
447 
448 #if defined(TARGET_NR_timer_create)
449 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
450 static timer_t g_posix_timers[32] = { 0, } ;
451 
452 static inline int next_free_host_timer(void)
453 {
454     int k ;
455     /* FIXME: Does finding the next free slot require a lock? */
456     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
457         if (g_posix_timers[k] == 0) {
458             g_posix_timers[k] = (timer_t) 1;
459             return k;
460         }
461     }
462     return -1;
463 }
464 #endif
465 
466 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
467 #ifdef TARGET_ARM
468 static inline int regpairs_aligned(void *cpu_env, int num)
469 {
470     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
471 }
472 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
473 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476  * of registers which translates to the same as ARM/MIPS, because we start with
477  * r3 as arg1 */
478 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
479 #elif defined(TARGET_SH4)
480 /* SH4 doesn't align register pairs, except for p{read,write}64 */
481 static inline int regpairs_aligned(void *cpu_env, int num)
482 {
483     switch (num) {
484     case TARGET_NR_pread64:
485     case TARGET_NR_pwrite64:
486         return 1;
487 
488     default:
489         return 0;
490     }
491 }
492 #elif defined(TARGET_XTENSA)
493 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
494 #else
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
721               struct rusage *, rusage)
722 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
723               int, options, struct rusage *, rusage)
724 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
725 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
726               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
727 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
728               struct timespec *, tsp, const sigset_t *, sigmask,
729               size_t, sigsetsize)
730 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
731               int, maxevents, int, timeout, const sigset_t *, sigmask,
732               size_t, sigsetsize)
733 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
734               const struct timespec *,timeout,int *,uaddr2,int,val3)
735 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
736 safe_syscall2(int, kill, pid_t, pid, int, sig)
737 safe_syscall2(int, tkill, int, tid, int, sig)
738 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
739 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
740 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
741 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
742               unsigned long, pos_l, unsigned long, pos_h)
743 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
746               socklen_t, addrlen)
747 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
748               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
749 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
750               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
751 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
752 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
753 safe_syscall2(int, flock, int, fd, int, operation)
754 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
755               const struct timespec *, uts, size_t, sigsetsize)
756 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
757               int, flags)
758 safe_syscall2(int, nanosleep, const struct timespec *, req,
759               struct timespec *, rem)
760 #ifdef TARGET_NR_clock_nanosleep
761 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
762               const struct timespec *, req, struct timespec *, rem)
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
768               long, msgtype, int, flags)
769 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
770               unsigned, nsops, const struct timespec *, timeout)
771 #else
772 /* This host kernel architecture uses a single ipc syscall; fake up
773  * wrappers for the sub-operations to hide this implementation detail.
774  * Annoyingly we can't include linux/ipc.h to get the constant definitions
775  * for the call parameter because some structs in there conflict with the
776  * sys/ipc.h ones. So we just define them here, and rely on them being
777  * the same for all host architectures.
778  */
779 #define Q_SEMTIMEDOP 4
780 #define Q_MSGSND 11
781 #define Q_MSGRCV 12
782 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
783 
784 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
785               void *, ptr, long, fifth)
786 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
787 {
788     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
789 }
790 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
791 {
792     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
793 }
794 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
795                            const struct timespec *timeout)
796 {
797     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
798                     (long)timeout);
799 }
800 #endif
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
803               size_t, len, unsigned, prio, const struct timespec *, timeout)
804 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
805               size_t, len, unsigned *, prio, const struct timespec *, timeout)
806 #endif
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808  * "third argument might be integer or pointer or not present" behaviour of
809  * the libc function.
810  */
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814  *  use the flock64 struct rather than unsuffixed flock
815  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
816  */
817 #ifdef __NR_fcntl64
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
819 #else
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
821 #endif
822 
823 static inline int host_to_target_sock_type(int host_type)
824 {
825     int target_type;
826 
827     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
828     case SOCK_DGRAM:
829         target_type = TARGET_SOCK_DGRAM;
830         break;
831     case SOCK_STREAM:
832         target_type = TARGET_SOCK_STREAM;
833         break;
834     default:
835         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
836         break;
837     }
838 
839 #if defined(SOCK_CLOEXEC)
840     if (host_type & SOCK_CLOEXEC) {
841         target_type |= TARGET_SOCK_CLOEXEC;
842     }
843 #endif
844 
845 #if defined(SOCK_NONBLOCK)
846     if (host_type & SOCK_NONBLOCK) {
847         target_type |= TARGET_SOCK_NONBLOCK;
848     }
849 #endif
850 
851     return target_type;
852 }
853 
854 static abi_ulong target_brk;
855 static abi_ulong target_original_brk;
856 static abi_ulong brk_page;
857 
858 void target_set_brk(abi_ulong new_brk)
859 {
860     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
861     brk_page = HOST_PAGE_ALIGN(target_brk);
862 }
863 
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
866 
867 /* do_brk() must return target values and target errnos. */
868 abi_long do_brk(abi_ulong new_brk)
869 {
870     abi_long mapped_addr;
871     abi_ulong new_alloc_size;
872 
873     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
874 
875     if (!new_brk) {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
877         return target_brk;
878     }
879     if (new_brk < target_original_brk) {
880         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
881                    target_brk);
882         return target_brk;
883     }
884 
885     /* If the new brk is less than the highest page reserved to the
886      * target heap allocation, set it and we're almost done...  */
887     if (new_brk <= brk_page) {
888         /* Heap contents are initialized to zero, as for anonymous
889          * mapped pages.  */
890         if (new_brk > target_brk) {
891             memset(g2h(target_brk), 0, new_brk - target_brk);
892         }
893 	target_brk = new_brk;
894         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
895 	return target_brk;
896     }
897 
898     /* We need to allocate more memory after the brk... Note that
899      * we don't use MAP_FIXED because that will map over the top of
900      * any existing mapping (like the one with the host libc or qemu
901      * itself); instead we treat "mapped but at wrong address" as
902      * a failure and unmap again.
903      */
904     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
905     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
906                                         PROT_READ|PROT_WRITE,
907                                         MAP_ANON|MAP_PRIVATE, 0, 0));
908 
909     if (mapped_addr == brk_page) {
910         /* Heap contents are initialized to zero, as for anonymous
911          * mapped pages.  Technically the new pages are already
912          * initialized to zero since they *are* anonymous mapped
913          * pages, however we have to take care with the contents that
914          * come from the remaining part of the previous page: it may
915          * contains garbage data due to a previous heap usage (grown
916          * then shrunken).  */
917         memset(g2h(target_brk), 0, brk_page - target_brk);
918 
919         target_brk = new_brk;
920         brk_page = HOST_PAGE_ALIGN(target_brk);
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
922             target_brk);
923         return target_brk;
924     } else if (mapped_addr != -1) {
925         /* Mapped but at wrong address, meaning there wasn't actually
926          * enough space for this brk.
927          */
928         target_munmap(mapped_addr, new_alloc_size);
929         mapped_addr = -1;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
931     }
932     else {
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
934     }
935 
936 #if defined(TARGET_ALPHA)
937     /* We (partially) emulate OSF/1 on Alpha, which requires we
938        return a proper errno, not an unchanged brk value.  */
939     return -TARGET_ENOMEM;
940 #endif
941     /* For everything else, return the previous break. */
942     return target_brk;
943 }
944 
945 static inline abi_long copy_from_user_fdset(fd_set *fds,
946                                             abi_ulong target_fds_addr,
947                                             int n)
948 {
949     int i, nw, j, k;
950     abi_ulong b, *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_READ,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  1)))
957         return -TARGET_EFAULT;
958 
959     FD_ZERO(fds);
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         /* grab the abi_ulong */
963         __get_user(b, &target_fds[i]);
964         for (j = 0; j < TARGET_ABI_BITS; j++) {
965             /* check the bit inside the abi_ulong */
966             if ((b >> j) & 1)
967                 FD_SET(k, fds);
968             k++;
969         }
970     }
971 
972     unlock_user(target_fds, target_fds_addr, 0);
973 
974     return 0;
975 }
976 
977 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
978                                                  abi_ulong target_fds_addr,
979                                                  int n)
980 {
981     if (target_fds_addr) {
982         if (copy_from_user_fdset(fds, target_fds_addr, n))
983             return -TARGET_EFAULT;
984         *fds_ptr = fds;
985     } else {
986         *fds_ptr = NULL;
987     }
988     return 0;
989 }
990 
991 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
992                                           const fd_set *fds,
993                                           int n)
994 {
995     int i, nw, j, k;
996     abi_long v;
997     abi_ulong *target_fds;
998 
999     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1000     if (!(target_fds = lock_user(VERIFY_WRITE,
1001                                  target_fds_addr,
1002                                  sizeof(abi_ulong) * nw,
1003                                  0)))
1004         return -TARGET_EFAULT;
1005 
1006     k = 0;
1007     for (i = 0; i < nw; i++) {
1008         v = 0;
1009         for (j = 0; j < TARGET_ABI_BITS; j++) {
1010             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1011             k++;
1012         }
1013         __put_user(v, &target_fds[i]);
1014     }
1015 
1016     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1017 
1018     return 0;
1019 }
1020 
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1023 #else
1024 #define HOST_HZ 100
1025 #endif
1026 
1027 static inline abi_long host_to_target_clock_t(long ticks)
1028 {
1029 #if HOST_HZ == TARGET_HZ
1030     return ticks;
1031 #else
1032     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1033 #endif
1034 }
1035 
1036 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1037                                              const struct rusage *rusage)
1038 {
1039     struct target_rusage *target_rusage;
1040 
1041     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1042         return -TARGET_EFAULT;
1043     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1044     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1045     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1046     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1047     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1048     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1049     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1050     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1051     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1052     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1053     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1054     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1055     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1056     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1057     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1058     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1059     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1060     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1061     unlock_user_struct(target_rusage, target_addr, 1);
1062 
1063     return 0;
1064 }
1065 
1066 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1067 {
1068     abi_ulong target_rlim_swap;
1069     rlim_t result;
1070 
1071     target_rlim_swap = tswapal(target_rlim);
1072     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1073         return RLIM_INFINITY;
1074 
1075     result = target_rlim_swap;
1076     if (target_rlim_swap != (rlim_t)result)
1077         return RLIM_INFINITY;
1078 
1079     return result;
1080 }
1081 
1082 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1083 {
1084     abi_ulong target_rlim_swap;
1085     abi_ulong result;
1086 
1087     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1088         target_rlim_swap = TARGET_RLIM_INFINITY;
1089     else
1090         target_rlim_swap = rlim;
1091     result = tswapal(target_rlim_swap);
1092 
1093     return result;
1094 }
1095 
1096 static inline int target_to_host_resource(int code)
1097 {
1098     switch (code) {
1099     case TARGET_RLIMIT_AS:
1100         return RLIMIT_AS;
1101     case TARGET_RLIMIT_CORE:
1102         return RLIMIT_CORE;
1103     case TARGET_RLIMIT_CPU:
1104         return RLIMIT_CPU;
1105     case TARGET_RLIMIT_DATA:
1106         return RLIMIT_DATA;
1107     case TARGET_RLIMIT_FSIZE:
1108         return RLIMIT_FSIZE;
1109     case TARGET_RLIMIT_LOCKS:
1110         return RLIMIT_LOCKS;
1111     case TARGET_RLIMIT_MEMLOCK:
1112         return RLIMIT_MEMLOCK;
1113     case TARGET_RLIMIT_MSGQUEUE:
1114         return RLIMIT_MSGQUEUE;
1115     case TARGET_RLIMIT_NICE:
1116         return RLIMIT_NICE;
1117     case TARGET_RLIMIT_NOFILE:
1118         return RLIMIT_NOFILE;
1119     case TARGET_RLIMIT_NPROC:
1120         return RLIMIT_NPROC;
1121     case TARGET_RLIMIT_RSS:
1122         return RLIMIT_RSS;
1123     case TARGET_RLIMIT_RTPRIO:
1124         return RLIMIT_RTPRIO;
1125     case TARGET_RLIMIT_SIGPENDING:
1126         return RLIMIT_SIGPENDING;
1127     case TARGET_RLIMIT_STACK:
1128         return RLIMIT_STACK;
1129     default:
1130         return code;
1131     }
1132 }
1133 
1134 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1135                                               abi_ulong target_tv_addr)
1136 {
1137     struct target_timeval *target_tv;
1138 
1139     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1140         return -TARGET_EFAULT;
1141 
1142     __get_user(tv->tv_sec, &target_tv->tv_sec);
1143     __get_user(tv->tv_usec, &target_tv->tv_usec);
1144 
1145     unlock_user_struct(target_tv, target_tv_addr, 0);
1146 
1147     return 0;
1148 }
1149 
1150 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1151                                             const struct timeval *tv)
1152 {
1153     struct target_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1156         return -TARGET_EFAULT;
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1167                                                abi_ulong target_tz_addr)
1168 {
1169     struct target_timezone *target_tz;
1170 
1171     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1172         return -TARGET_EFAULT;
1173     }
1174 
1175     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1176     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1177 
1178     unlock_user_struct(target_tz, target_tz_addr, 0);
1179 
1180     return 0;
1181 }
1182 
1183 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1184 #include <mqueue.h>
1185 
1186 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1187                                               abi_ulong target_mq_attr_addr)
1188 {
1189     struct target_mq_attr *target_mq_attr;
1190 
1191     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1192                           target_mq_attr_addr, 1))
1193         return -TARGET_EFAULT;
1194 
1195     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1196     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1197     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1198     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1199 
1200     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1201 
1202     return 0;
1203 }
1204 
1205 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1206                                             const struct mq_attr *attr)
1207 {
1208     struct target_mq_attr *target_mq_attr;
1209 
1210     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1211                           target_mq_attr_addr, 0))
1212         return -TARGET_EFAULT;
1213 
1214     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1215     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1216     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1217     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1218 
1219     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1220 
1221     return 0;
1222 }
1223 #endif
1224 
1225 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1226 /* do_select() must return target values and target errnos. */
1227 static abi_long do_select(int n,
1228                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1229                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1230 {
1231     fd_set rfds, wfds, efds;
1232     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1233     struct timeval tv;
1234     struct timespec ts, *ts_ptr;
1235     abi_long ret;
1236 
1237     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1238     if (ret) {
1239         return ret;
1240     }
1241     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1242     if (ret) {
1243         return ret;
1244     }
1245     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1246     if (ret) {
1247         return ret;
1248     }
1249 
1250     if (target_tv_addr) {
1251         if (copy_from_user_timeval(&tv, target_tv_addr))
1252             return -TARGET_EFAULT;
1253         ts.tv_sec = tv.tv_sec;
1254         ts.tv_nsec = tv.tv_usec * 1000;
1255         ts_ptr = &ts;
1256     } else {
1257         ts_ptr = NULL;
1258     }
1259 
1260     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1261                                   ts_ptr, NULL));
1262 
1263     if (!is_error(ret)) {
1264         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1265             return -TARGET_EFAULT;
1266         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1267             return -TARGET_EFAULT;
1268         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1269             return -TARGET_EFAULT;
1270 
1271         if (target_tv_addr) {
1272             tv.tv_sec = ts.tv_sec;
1273             tv.tv_usec = ts.tv_nsec / 1000;
1274             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1275                 return -TARGET_EFAULT;
1276             }
1277         }
1278     }
1279 
1280     return ret;
1281 }
1282 
1283 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1284 static abi_long do_old_select(abi_ulong arg1)
1285 {
1286     struct target_sel_arg_struct *sel;
1287     abi_ulong inp, outp, exp, tvp;
1288     long nsel;
1289 
1290     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1291         return -TARGET_EFAULT;
1292     }
1293 
1294     nsel = tswapal(sel->n);
1295     inp = tswapal(sel->inp);
1296     outp = tswapal(sel->outp);
1297     exp = tswapal(sel->exp);
1298     tvp = tswapal(sel->tvp);
1299 
1300     unlock_user_struct(sel, arg1, 0);
1301 
1302     return do_select(nsel, inp, outp, exp, tvp);
1303 }
1304 #endif
1305 #endif
1306 
1307 static abi_long do_pipe2(int host_pipe[], int flags)
1308 {
1309 #ifdef CONFIG_PIPE2
1310     return pipe2(host_pipe, flags);
1311 #else
1312     return -ENOSYS;
1313 #endif
1314 }
1315 
1316 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1317                         int flags, int is_pipe2)
1318 {
1319     int host_pipe[2];
1320     abi_long ret;
1321     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1322 
1323     if (is_error(ret))
1324         return get_errno(ret);
1325 
1326     /* Several targets have special calling conventions for the original
1327        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1328     if (!is_pipe2) {
1329 #if defined(TARGET_ALPHA)
1330         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1331         return host_pipe[0];
1332 #elif defined(TARGET_MIPS)
1333         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1334         return host_pipe[0];
1335 #elif defined(TARGET_SH4)
1336         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1337         return host_pipe[0];
1338 #elif defined(TARGET_SPARC)
1339         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1340         return host_pipe[0];
1341 #endif
1342     }
1343 
1344     if (put_user_s32(host_pipe[0], pipedes)
1345         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1346         return -TARGET_EFAULT;
1347     return get_errno(ret);
1348 }
1349 
1350 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1351                                               abi_ulong target_addr,
1352                                               socklen_t len)
1353 {
1354     struct target_ip_mreqn *target_smreqn;
1355 
1356     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1357     if (!target_smreqn)
1358         return -TARGET_EFAULT;
1359     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1360     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1361     if (len == sizeof(struct target_ip_mreqn))
1362         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1363     unlock_user(target_smreqn, target_addr, 0);
1364 
1365     return 0;
1366 }
1367 
1368 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1369                                                abi_ulong target_addr,
1370                                                socklen_t len)
1371 {
1372     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1373     sa_family_t sa_family;
1374     struct target_sockaddr *target_saddr;
1375 
1376     if (fd_trans_target_to_host_addr(fd)) {
1377         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1378     }
1379 
1380     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1381     if (!target_saddr)
1382         return -TARGET_EFAULT;
1383 
1384     sa_family = tswap16(target_saddr->sa_family);
1385 
1386     /* Oops. The caller might send a incomplete sun_path; sun_path
1387      * must be terminated by \0 (see the manual page), but
1388      * unfortunately it is quite common to specify sockaddr_un
1389      * length as "strlen(x->sun_path)" while it should be
1390      * "strlen(...) + 1". We'll fix that here if needed.
1391      * Linux kernel has a similar feature.
1392      */
1393 
1394     if (sa_family == AF_UNIX) {
1395         if (len < unix_maxlen && len > 0) {
1396             char *cp = (char*)target_saddr;
1397 
1398             if ( cp[len-1] && !cp[len] )
1399                 len++;
1400         }
1401         if (len > unix_maxlen)
1402             len = unix_maxlen;
1403     }
1404 
1405     memcpy(addr, target_saddr, len);
1406     addr->sa_family = sa_family;
1407     if (sa_family == AF_NETLINK) {
1408         struct sockaddr_nl *nladdr;
1409 
1410         nladdr = (struct sockaddr_nl *)addr;
1411         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1412         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1413     } else if (sa_family == AF_PACKET) {
1414 	struct target_sockaddr_ll *lladdr;
1415 
1416 	lladdr = (struct target_sockaddr_ll *)addr;
1417 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1418 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1419     }
1420     unlock_user(target_saddr, target_addr, 0);
1421 
1422     return 0;
1423 }
1424 
1425 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1426                                                struct sockaddr *addr,
1427                                                socklen_t len)
1428 {
1429     struct target_sockaddr *target_saddr;
1430 
1431     if (len == 0) {
1432         return 0;
1433     }
1434     assert(addr);
1435 
1436     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439     memcpy(target_saddr, addr, len);
1440     if (len >= offsetof(struct target_sockaddr, sa_family) +
1441         sizeof(target_saddr->sa_family)) {
1442         target_saddr->sa_family = tswap16(addr->sa_family);
1443     }
1444     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1445         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1446         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1447         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1448     } else if (addr->sa_family == AF_PACKET) {
1449         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1450         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1451         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1452     } else if (addr->sa_family == AF_INET6 &&
1453                len >= sizeof(struct target_sockaddr_in6)) {
1454         struct target_sockaddr_in6 *target_in6 =
1455                (struct target_sockaddr_in6 *)target_saddr;
1456         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1457     }
1458     unlock_user(target_saddr, target_addr, len);
1459 
1460     return 0;
1461 }
1462 
1463 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1464                                            struct target_msghdr *target_msgh)
1465 {
1466     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1467     abi_long msg_controllen;
1468     abi_ulong target_cmsg_addr;
1469     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1470     socklen_t space = 0;
1471 
1472     msg_controllen = tswapal(target_msgh->msg_controllen);
1473     if (msg_controllen < sizeof (struct target_cmsghdr))
1474         goto the_end;
1475     target_cmsg_addr = tswapal(target_msgh->msg_control);
1476     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1477     target_cmsg_start = target_cmsg;
1478     if (!target_cmsg)
1479         return -TARGET_EFAULT;
1480 
1481     while (cmsg && target_cmsg) {
1482         void *data = CMSG_DATA(cmsg);
1483         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1484 
1485         int len = tswapal(target_cmsg->cmsg_len)
1486             - sizeof(struct target_cmsghdr);
1487 
1488         space += CMSG_SPACE(len);
1489         if (space > msgh->msg_controllen) {
1490             space -= CMSG_SPACE(len);
1491             /* This is a QEMU bug, since we allocated the payload
1492              * area ourselves (unlike overflow in host-to-target
1493              * conversion, which is just the guest giving us a buffer
1494              * that's too small). It can't happen for the payload types
1495              * we currently support; if it becomes an issue in future
1496              * we would need to improve our allocation strategy to
1497              * something more intelligent than "twice the size of the
1498              * target buffer we're reading from".
1499              */
1500             gemu_log("Host cmsg overflow\n");
1501             break;
1502         }
1503 
1504         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1505             cmsg->cmsg_level = SOL_SOCKET;
1506         } else {
1507             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1508         }
1509         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1510         cmsg->cmsg_len = CMSG_LEN(len);
1511 
1512         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1513             int *fd = (int *)data;
1514             int *target_fd = (int *)target_data;
1515             int i, numfds = len / sizeof(int);
1516 
1517             for (i = 0; i < numfds; i++) {
1518                 __get_user(fd[i], target_fd + i);
1519             }
1520         } else if (cmsg->cmsg_level == SOL_SOCKET
1521                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1522             struct ucred *cred = (struct ucred *)data;
1523             struct target_ucred *target_cred =
1524                 (struct target_ucred *)target_data;
1525 
1526             __get_user(cred->pid, &target_cred->pid);
1527             __get_user(cred->uid, &target_cred->uid);
1528             __get_user(cred->gid, &target_cred->gid);
1529         } else {
1530             gemu_log("Unsupported ancillary data: %d/%d\n",
1531                                         cmsg->cmsg_level, cmsg->cmsg_type);
1532             memcpy(data, target_data, len);
1533         }
1534 
1535         cmsg = CMSG_NXTHDR(msgh, cmsg);
1536         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1537                                          target_cmsg_start);
1538     }
1539     unlock_user(target_cmsg, target_cmsg_addr, 0);
1540  the_end:
1541     msgh->msg_controllen = space;
1542     return 0;
1543 }
1544 
1545 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1546                                            struct msghdr *msgh)
1547 {
1548     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1549     abi_long msg_controllen;
1550     abi_ulong target_cmsg_addr;
1551     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1552     socklen_t space = 0;
1553 
1554     msg_controllen = tswapal(target_msgh->msg_controllen);
1555     if (msg_controllen < sizeof (struct target_cmsghdr))
1556         goto the_end;
1557     target_cmsg_addr = tswapal(target_msgh->msg_control);
1558     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1559     target_cmsg_start = target_cmsg;
1560     if (!target_cmsg)
1561         return -TARGET_EFAULT;
1562 
1563     while (cmsg && target_cmsg) {
1564         void *data = CMSG_DATA(cmsg);
1565         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1566 
1567         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1568         int tgt_len, tgt_space;
1569 
1570         /* We never copy a half-header but may copy half-data;
1571          * this is Linux's behaviour in put_cmsg(). Note that
1572          * truncation here is a guest problem (which we report
1573          * to the guest via the CTRUNC bit), unlike truncation
1574          * in target_to_host_cmsg, which is a QEMU bug.
1575          */
1576         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1577             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1578             break;
1579         }
1580 
1581         if (cmsg->cmsg_level == SOL_SOCKET) {
1582             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1583         } else {
1584             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1585         }
1586         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1587 
1588         /* Payload types which need a different size of payload on
1589          * the target must adjust tgt_len here.
1590          */
1591         tgt_len = len;
1592         switch (cmsg->cmsg_level) {
1593         case SOL_SOCKET:
1594             switch (cmsg->cmsg_type) {
1595             case SO_TIMESTAMP:
1596                 tgt_len = sizeof(struct target_timeval);
1597                 break;
1598             default:
1599                 break;
1600             }
1601             break;
1602         default:
1603             break;
1604         }
1605 
1606         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1607             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1608             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1609         }
1610 
1611         /* We must now copy-and-convert len bytes of payload
1612          * into tgt_len bytes of destination space. Bear in mind
1613          * that in both source and destination we may be dealing
1614          * with a truncated value!
1615          */
1616         switch (cmsg->cmsg_level) {
1617         case SOL_SOCKET:
1618             switch (cmsg->cmsg_type) {
1619             case SCM_RIGHTS:
1620             {
1621                 int *fd = (int *)data;
1622                 int *target_fd = (int *)target_data;
1623                 int i, numfds = tgt_len / sizeof(int);
1624 
1625                 for (i = 0; i < numfds; i++) {
1626                     __put_user(fd[i], target_fd + i);
1627                 }
1628                 break;
1629             }
1630             case SO_TIMESTAMP:
1631             {
1632                 struct timeval *tv = (struct timeval *)data;
1633                 struct target_timeval *target_tv =
1634                     (struct target_timeval *)target_data;
1635 
1636                 if (len != sizeof(struct timeval) ||
1637                     tgt_len != sizeof(struct target_timeval)) {
1638                     goto unimplemented;
1639                 }
1640 
1641                 /* copy struct timeval to target */
1642                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1643                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1644                 break;
1645             }
1646             case SCM_CREDENTIALS:
1647             {
1648                 struct ucred *cred = (struct ucred *)data;
1649                 struct target_ucred *target_cred =
1650                     (struct target_ucred *)target_data;
1651 
1652                 __put_user(cred->pid, &target_cred->pid);
1653                 __put_user(cred->uid, &target_cred->uid);
1654                 __put_user(cred->gid, &target_cred->gid);
1655                 break;
1656             }
1657             default:
1658                 goto unimplemented;
1659             }
1660             break;
1661 
1662         case SOL_IP:
1663             switch (cmsg->cmsg_type) {
1664             case IP_TTL:
1665             {
1666                 uint32_t *v = (uint32_t *)data;
1667                 uint32_t *t_int = (uint32_t *)target_data;
1668 
1669                 if (len != sizeof(uint32_t) ||
1670                     tgt_len != sizeof(uint32_t)) {
1671                     goto unimplemented;
1672                 }
1673                 __put_user(*v, t_int);
1674                 break;
1675             }
1676             case IP_RECVERR:
1677             {
1678                 struct errhdr_t {
1679                    struct sock_extended_err ee;
1680                    struct sockaddr_in offender;
1681                 };
1682                 struct errhdr_t *errh = (struct errhdr_t *)data;
1683                 struct errhdr_t *target_errh =
1684                     (struct errhdr_t *)target_data;
1685 
1686                 if (len != sizeof(struct errhdr_t) ||
1687                     tgt_len != sizeof(struct errhdr_t)) {
1688                     goto unimplemented;
1689                 }
1690                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1691                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1692                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1693                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1694                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1695                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1696                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1697                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1698                     (void *) &errh->offender, sizeof(errh->offender));
1699                 break;
1700             }
1701             default:
1702                 goto unimplemented;
1703             }
1704             break;
1705 
1706         case SOL_IPV6:
1707             switch (cmsg->cmsg_type) {
1708             case IPV6_HOPLIMIT:
1709             {
1710                 uint32_t *v = (uint32_t *)data;
1711                 uint32_t *t_int = (uint32_t *)target_data;
1712 
1713                 if (len != sizeof(uint32_t) ||
1714                     tgt_len != sizeof(uint32_t)) {
1715                     goto unimplemented;
1716                 }
1717                 __put_user(*v, t_int);
1718                 break;
1719             }
1720             case IPV6_RECVERR:
1721             {
1722                 struct errhdr6_t {
1723                    struct sock_extended_err ee;
1724                    struct sockaddr_in6 offender;
1725                 };
1726                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1727                 struct errhdr6_t *target_errh =
1728                     (struct errhdr6_t *)target_data;
1729 
1730                 if (len != sizeof(struct errhdr6_t) ||
1731                     tgt_len != sizeof(struct errhdr6_t)) {
1732                     goto unimplemented;
1733                 }
1734                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1735                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1736                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1737                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1738                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1739                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1740                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1741                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1742                     (void *) &errh->offender, sizeof(errh->offender));
1743                 break;
1744             }
1745             default:
1746                 goto unimplemented;
1747             }
1748             break;
1749 
1750         default:
1751         unimplemented:
1752             gemu_log("Unsupported ancillary data: %d/%d\n",
1753                                         cmsg->cmsg_level, cmsg->cmsg_type);
1754             memcpy(target_data, data, MIN(len, tgt_len));
1755             if (tgt_len > len) {
1756                 memset(target_data + len, 0, tgt_len - len);
1757             }
1758         }
1759 
1760         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1761         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1762         if (msg_controllen < tgt_space) {
1763             tgt_space = msg_controllen;
1764         }
1765         msg_controllen -= tgt_space;
1766         space += tgt_space;
1767         cmsg = CMSG_NXTHDR(msgh, cmsg);
1768         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1769                                          target_cmsg_start);
1770     }
1771     unlock_user(target_cmsg, target_cmsg_addr, space);
1772  the_end:
1773     target_msgh->msg_controllen = tswapal(space);
1774     return 0;
1775 }
1776 
1777 /* do_setsockopt() Must return target values and target errnos. */
1778 static abi_long do_setsockopt(int sockfd, int level, int optname,
1779                               abi_ulong optval_addr, socklen_t optlen)
1780 {
1781     abi_long ret;
1782     int val;
1783     struct ip_mreqn *ip_mreq;
1784     struct ip_mreq_source *ip_mreq_source;
1785 
1786     switch(level) {
1787     case SOL_TCP:
1788         /* TCP options all take an 'int' value.  */
1789         if (optlen < sizeof(uint32_t))
1790             return -TARGET_EINVAL;
1791 
1792         if (get_user_u32(val, optval_addr))
1793             return -TARGET_EFAULT;
1794         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1795         break;
1796     case SOL_IP:
1797         switch(optname) {
1798         case IP_TOS:
1799         case IP_TTL:
1800         case IP_HDRINCL:
1801         case IP_ROUTER_ALERT:
1802         case IP_RECVOPTS:
1803         case IP_RETOPTS:
1804         case IP_PKTINFO:
1805         case IP_MTU_DISCOVER:
1806         case IP_RECVERR:
1807         case IP_RECVTTL:
1808         case IP_RECVTOS:
1809 #ifdef IP_FREEBIND
1810         case IP_FREEBIND:
1811 #endif
1812         case IP_MULTICAST_TTL:
1813         case IP_MULTICAST_LOOP:
1814             val = 0;
1815             if (optlen >= sizeof(uint32_t)) {
1816                 if (get_user_u32(val, optval_addr))
1817                     return -TARGET_EFAULT;
1818             } else if (optlen >= 1) {
1819                 if (get_user_u8(val, optval_addr))
1820                     return -TARGET_EFAULT;
1821             }
1822             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1823             break;
1824         case IP_ADD_MEMBERSHIP:
1825         case IP_DROP_MEMBERSHIP:
1826             if (optlen < sizeof (struct target_ip_mreq) ||
1827                 optlen > sizeof (struct target_ip_mreqn))
1828                 return -TARGET_EINVAL;
1829 
1830             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1831             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1832             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1833             break;
1834 
1835         case IP_BLOCK_SOURCE:
1836         case IP_UNBLOCK_SOURCE:
1837         case IP_ADD_SOURCE_MEMBERSHIP:
1838         case IP_DROP_SOURCE_MEMBERSHIP:
1839             if (optlen != sizeof (struct target_ip_mreq_source))
1840                 return -TARGET_EINVAL;
1841 
1842             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1843             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1844             unlock_user (ip_mreq_source, optval_addr, 0);
1845             break;
1846 
1847         default:
1848             goto unimplemented;
1849         }
1850         break;
1851     case SOL_IPV6:
1852         switch (optname) {
1853         case IPV6_MTU_DISCOVER:
1854         case IPV6_MTU:
1855         case IPV6_V6ONLY:
1856         case IPV6_RECVPKTINFO:
1857         case IPV6_UNICAST_HOPS:
1858         case IPV6_MULTICAST_HOPS:
1859         case IPV6_MULTICAST_LOOP:
1860         case IPV6_RECVERR:
1861         case IPV6_RECVHOPLIMIT:
1862         case IPV6_2292HOPLIMIT:
1863         case IPV6_CHECKSUM:
1864         case IPV6_ADDRFORM:
1865         case IPV6_2292PKTINFO:
1866         case IPV6_RECVTCLASS:
1867         case IPV6_RECVRTHDR:
1868         case IPV6_2292RTHDR:
1869         case IPV6_RECVHOPOPTS:
1870         case IPV6_2292HOPOPTS:
1871         case IPV6_RECVDSTOPTS:
1872         case IPV6_2292DSTOPTS:
1873         case IPV6_TCLASS:
1874 #ifdef IPV6_RECVPATHMTU
1875         case IPV6_RECVPATHMTU:
1876 #endif
1877 #ifdef IPV6_TRANSPARENT
1878         case IPV6_TRANSPARENT:
1879 #endif
1880 #ifdef IPV6_FREEBIND
1881         case IPV6_FREEBIND:
1882 #endif
1883 #ifdef IPV6_RECVORIGDSTADDR
1884         case IPV6_RECVORIGDSTADDR:
1885 #endif
1886             val = 0;
1887             if (optlen < sizeof(uint32_t)) {
1888                 return -TARGET_EINVAL;
1889             }
1890             if (get_user_u32(val, optval_addr)) {
1891                 return -TARGET_EFAULT;
1892             }
1893             ret = get_errno(setsockopt(sockfd, level, optname,
1894                                        &val, sizeof(val)));
1895             break;
1896         case IPV6_PKTINFO:
1897         {
1898             struct in6_pktinfo pki;
1899 
1900             if (optlen < sizeof(pki)) {
1901                 return -TARGET_EINVAL;
1902             }
1903 
1904             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1905                 return -TARGET_EFAULT;
1906             }
1907 
1908             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1909 
1910             ret = get_errno(setsockopt(sockfd, level, optname,
1911                                        &pki, sizeof(pki)));
1912             break;
1913         }
1914         default:
1915             goto unimplemented;
1916         }
1917         break;
1918     case SOL_ICMPV6:
1919         switch (optname) {
1920         case ICMPV6_FILTER:
1921         {
1922             struct icmp6_filter icmp6f;
1923 
1924             if (optlen > sizeof(icmp6f)) {
1925                 optlen = sizeof(icmp6f);
1926             }
1927 
1928             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1929                 return -TARGET_EFAULT;
1930             }
1931 
1932             for (val = 0; val < 8; val++) {
1933                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1934             }
1935 
1936             ret = get_errno(setsockopt(sockfd, level, optname,
1937                                        &icmp6f, optlen));
1938             break;
1939         }
1940         default:
1941             goto unimplemented;
1942         }
1943         break;
1944     case SOL_RAW:
1945         switch (optname) {
1946         case ICMP_FILTER:
1947         case IPV6_CHECKSUM:
1948             /* those take an u32 value */
1949             if (optlen < sizeof(uint32_t)) {
1950                 return -TARGET_EINVAL;
1951             }
1952 
1953             if (get_user_u32(val, optval_addr)) {
1954                 return -TARGET_EFAULT;
1955             }
1956             ret = get_errno(setsockopt(sockfd, level, optname,
1957                                        &val, sizeof(val)));
1958             break;
1959 
1960         default:
1961             goto unimplemented;
1962         }
1963         break;
1964     case TARGET_SOL_SOCKET:
1965         switch (optname) {
1966         case TARGET_SO_RCVTIMEO:
1967         {
1968                 struct timeval tv;
1969 
1970                 optname = SO_RCVTIMEO;
1971 
1972 set_timeout:
1973                 if (optlen != sizeof(struct target_timeval)) {
1974                     return -TARGET_EINVAL;
1975                 }
1976 
1977                 if (copy_from_user_timeval(&tv, optval_addr)) {
1978                     return -TARGET_EFAULT;
1979                 }
1980 
1981                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1982                                 &tv, sizeof(tv)));
1983                 return ret;
1984         }
1985         case TARGET_SO_SNDTIMEO:
1986                 optname = SO_SNDTIMEO;
1987                 goto set_timeout;
1988         case TARGET_SO_ATTACH_FILTER:
1989         {
1990                 struct target_sock_fprog *tfprog;
1991                 struct target_sock_filter *tfilter;
1992                 struct sock_fprog fprog;
1993                 struct sock_filter *filter;
1994                 int i;
1995 
1996                 if (optlen != sizeof(*tfprog)) {
1997                     return -TARGET_EINVAL;
1998                 }
1999                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2000                     return -TARGET_EFAULT;
2001                 }
2002                 if (!lock_user_struct(VERIFY_READ, tfilter,
2003                                       tswapal(tfprog->filter), 0)) {
2004                     unlock_user_struct(tfprog, optval_addr, 1);
2005                     return -TARGET_EFAULT;
2006                 }
2007 
2008                 fprog.len = tswap16(tfprog->len);
2009                 filter = g_try_new(struct sock_filter, fprog.len);
2010                 if (filter == NULL) {
2011                     unlock_user_struct(tfilter, tfprog->filter, 1);
2012                     unlock_user_struct(tfprog, optval_addr, 1);
2013                     return -TARGET_ENOMEM;
2014                 }
2015                 for (i = 0; i < fprog.len; i++) {
2016                     filter[i].code = tswap16(tfilter[i].code);
2017                     filter[i].jt = tfilter[i].jt;
2018                     filter[i].jf = tfilter[i].jf;
2019                     filter[i].k = tswap32(tfilter[i].k);
2020                 }
2021                 fprog.filter = filter;
2022 
2023                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2024                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2025                 g_free(filter);
2026 
2027                 unlock_user_struct(tfilter, tfprog->filter, 1);
2028                 unlock_user_struct(tfprog, optval_addr, 1);
2029                 return ret;
2030         }
2031 	case TARGET_SO_BINDTODEVICE:
2032 	{
2033 		char *dev_ifname, *addr_ifname;
2034 
2035 		if (optlen > IFNAMSIZ - 1) {
2036 		    optlen = IFNAMSIZ - 1;
2037 		}
2038 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2039 		if (!dev_ifname) {
2040 		    return -TARGET_EFAULT;
2041 		}
2042 		optname = SO_BINDTODEVICE;
2043 		addr_ifname = alloca(IFNAMSIZ);
2044 		memcpy(addr_ifname, dev_ifname, optlen);
2045 		addr_ifname[optlen] = 0;
2046 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2047                                            addr_ifname, optlen));
2048 		unlock_user (dev_ifname, optval_addr, 0);
2049 		return ret;
2050 	}
2051         case TARGET_SO_LINGER:
2052         {
2053                 struct linger lg;
2054                 struct target_linger *tlg;
2055 
2056                 if (optlen != sizeof(struct target_linger)) {
2057                     return -TARGET_EINVAL;
2058                 }
2059                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2060                     return -TARGET_EFAULT;
2061                 }
2062                 __get_user(lg.l_onoff, &tlg->l_onoff);
2063                 __get_user(lg.l_linger, &tlg->l_linger);
2064                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2065                                 &lg, sizeof(lg)));
2066                 unlock_user_struct(tlg, optval_addr, 0);
2067                 return ret;
2068         }
2069             /* Options with 'int' argument.  */
2070         case TARGET_SO_DEBUG:
2071 		optname = SO_DEBUG;
2072 		break;
2073         case TARGET_SO_REUSEADDR:
2074 		optname = SO_REUSEADDR;
2075 		break;
2076 #ifdef SO_REUSEPORT
2077         case TARGET_SO_REUSEPORT:
2078                 optname = SO_REUSEPORT;
2079                 break;
2080 #endif
2081         case TARGET_SO_TYPE:
2082 		optname = SO_TYPE;
2083 		break;
2084         case TARGET_SO_ERROR:
2085 		optname = SO_ERROR;
2086 		break;
2087         case TARGET_SO_DONTROUTE:
2088 		optname = SO_DONTROUTE;
2089 		break;
2090         case TARGET_SO_BROADCAST:
2091 		optname = SO_BROADCAST;
2092 		break;
2093         case TARGET_SO_SNDBUF:
2094 		optname = SO_SNDBUF;
2095 		break;
2096         case TARGET_SO_SNDBUFFORCE:
2097                 optname = SO_SNDBUFFORCE;
2098                 break;
2099         case TARGET_SO_RCVBUF:
2100 		optname = SO_RCVBUF;
2101 		break;
2102         case TARGET_SO_RCVBUFFORCE:
2103                 optname = SO_RCVBUFFORCE;
2104                 break;
2105         case TARGET_SO_KEEPALIVE:
2106 		optname = SO_KEEPALIVE;
2107 		break;
2108         case TARGET_SO_OOBINLINE:
2109 		optname = SO_OOBINLINE;
2110 		break;
2111         case TARGET_SO_NO_CHECK:
2112 		optname = SO_NO_CHECK;
2113 		break;
2114         case TARGET_SO_PRIORITY:
2115 		optname = SO_PRIORITY;
2116 		break;
2117 #ifdef SO_BSDCOMPAT
2118         case TARGET_SO_BSDCOMPAT:
2119 		optname = SO_BSDCOMPAT;
2120 		break;
2121 #endif
2122         case TARGET_SO_PASSCRED:
2123 		optname = SO_PASSCRED;
2124 		break;
2125         case TARGET_SO_PASSSEC:
2126                 optname = SO_PASSSEC;
2127                 break;
2128         case TARGET_SO_TIMESTAMP:
2129 		optname = SO_TIMESTAMP;
2130 		break;
2131         case TARGET_SO_RCVLOWAT:
2132 		optname = SO_RCVLOWAT;
2133 		break;
2134         default:
2135             goto unimplemented;
2136         }
2137 	if (optlen < sizeof(uint32_t))
2138             return -TARGET_EINVAL;
2139 
2140 	if (get_user_u32(val, optval_addr))
2141             return -TARGET_EFAULT;
2142 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2143         break;
2144     default:
2145     unimplemented:
2146         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2147         ret = -TARGET_ENOPROTOOPT;
2148     }
2149     return ret;
2150 }
2151 
2152 /* do_getsockopt() Must return target values and target errnos. */
2153 static abi_long do_getsockopt(int sockfd, int level, int optname,
2154                               abi_ulong optval_addr, abi_ulong optlen)
2155 {
2156     abi_long ret;
2157     int len, val;
2158     socklen_t lv;
2159 
2160     switch(level) {
2161     case TARGET_SOL_SOCKET:
2162         level = SOL_SOCKET;
2163         switch (optname) {
2164         /* These don't just return a single integer */
2165         case TARGET_SO_RCVTIMEO:
2166         case TARGET_SO_SNDTIMEO:
2167         case TARGET_SO_PEERNAME:
2168             goto unimplemented;
2169         case TARGET_SO_PEERCRED: {
2170             struct ucred cr;
2171             socklen_t crlen;
2172             struct target_ucred *tcr;
2173 
2174             if (get_user_u32(len, optlen)) {
2175                 return -TARGET_EFAULT;
2176             }
2177             if (len < 0) {
2178                 return -TARGET_EINVAL;
2179             }
2180 
2181             crlen = sizeof(cr);
2182             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2183                                        &cr, &crlen));
2184             if (ret < 0) {
2185                 return ret;
2186             }
2187             if (len > crlen) {
2188                 len = crlen;
2189             }
2190             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2191                 return -TARGET_EFAULT;
2192             }
2193             __put_user(cr.pid, &tcr->pid);
2194             __put_user(cr.uid, &tcr->uid);
2195             __put_user(cr.gid, &tcr->gid);
2196             unlock_user_struct(tcr, optval_addr, 1);
2197             if (put_user_u32(len, optlen)) {
2198                 return -TARGET_EFAULT;
2199             }
2200             break;
2201         }
2202         case TARGET_SO_LINGER:
2203         {
2204             struct linger lg;
2205             socklen_t lglen;
2206             struct target_linger *tlg;
2207 
2208             if (get_user_u32(len, optlen)) {
2209                 return -TARGET_EFAULT;
2210             }
2211             if (len < 0) {
2212                 return -TARGET_EINVAL;
2213             }
2214 
2215             lglen = sizeof(lg);
2216             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2217                                        &lg, &lglen));
2218             if (ret < 0) {
2219                 return ret;
2220             }
2221             if (len > lglen) {
2222                 len = lglen;
2223             }
2224             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2225                 return -TARGET_EFAULT;
2226             }
2227             __put_user(lg.l_onoff, &tlg->l_onoff);
2228             __put_user(lg.l_linger, &tlg->l_linger);
2229             unlock_user_struct(tlg, optval_addr, 1);
2230             if (put_user_u32(len, optlen)) {
2231                 return -TARGET_EFAULT;
2232             }
2233             break;
2234         }
2235         /* Options with 'int' argument.  */
2236         case TARGET_SO_DEBUG:
2237             optname = SO_DEBUG;
2238             goto int_case;
2239         case TARGET_SO_REUSEADDR:
2240             optname = SO_REUSEADDR;
2241             goto int_case;
2242 #ifdef SO_REUSEPORT
2243         case TARGET_SO_REUSEPORT:
2244             optname = SO_REUSEPORT;
2245             goto int_case;
2246 #endif
2247         case TARGET_SO_TYPE:
2248             optname = SO_TYPE;
2249             goto int_case;
2250         case TARGET_SO_ERROR:
2251             optname = SO_ERROR;
2252             goto int_case;
2253         case TARGET_SO_DONTROUTE:
2254             optname = SO_DONTROUTE;
2255             goto int_case;
2256         case TARGET_SO_BROADCAST:
2257             optname = SO_BROADCAST;
2258             goto int_case;
2259         case TARGET_SO_SNDBUF:
2260             optname = SO_SNDBUF;
2261             goto int_case;
2262         case TARGET_SO_RCVBUF:
2263             optname = SO_RCVBUF;
2264             goto int_case;
2265         case TARGET_SO_KEEPALIVE:
2266             optname = SO_KEEPALIVE;
2267             goto int_case;
2268         case TARGET_SO_OOBINLINE:
2269             optname = SO_OOBINLINE;
2270             goto int_case;
2271         case TARGET_SO_NO_CHECK:
2272             optname = SO_NO_CHECK;
2273             goto int_case;
2274         case TARGET_SO_PRIORITY:
2275             optname = SO_PRIORITY;
2276             goto int_case;
2277 #ifdef SO_BSDCOMPAT
2278         case TARGET_SO_BSDCOMPAT:
2279             optname = SO_BSDCOMPAT;
2280             goto int_case;
2281 #endif
2282         case TARGET_SO_PASSCRED:
2283             optname = SO_PASSCRED;
2284             goto int_case;
2285         case TARGET_SO_TIMESTAMP:
2286             optname = SO_TIMESTAMP;
2287             goto int_case;
2288         case TARGET_SO_RCVLOWAT:
2289             optname = SO_RCVLOWAT;
2290             goto int_case;
2291         case TARGET_SO_ACCEPTCONN:
2292             optname = SO_ACCEPTCONN;
2293             goto int_case;
2294         default:
2295             goto int_case;
2296         }
2297         break;
2298     case SOL_TCP:
2299         /* TCP options all take an 'int' value.  */
2300     int_case:
2301         if (get_user_u32(len, optlen))
2302             return -TARGET_EFAULT;
2303         if (len < 0)
2304             return -TARGET_EINVAL;
2305         lv = sizeof(lv);
2306         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2307         if (ret < 0)
2308             return ret;
2309         if (optname == SO_TYPE) {
2310             val = host_to_target_sock_type(val);
2311         }
2312         if (len > lv)
2313             len = lv;
2314         if (len == 4) {
2315             if (put_user_u32(val, optval_addr))
2316                 return -TARGET_EFAULT;
2317         } else {
2318             if (put_user_u8(val, optval_addr))
2319                 return -TARGET_EFAULT;
2320         }
2321         if (put_user_u32(len, optlen))
2322             return -TARGET_EFAULT;
2323         break;
2324     case SOL_IP:
2325         switch(optname) {
2326         case IP_TOS:
2327         case IP_TTL:
2328         case IP_HDRINCL:
2329         case IP_ROUTER_ALERT:
2330         case IP_RECVOPTS:
2331         case IP_RETOPTS:
2332         case IP_PKTINFO:
2333         case IP_MTU_DISCOVER:
2334         case IP_RECVERR:
2335         case IP_RECVTOS:
2336 #ifdef IP_FREEBIND
2337         case IP_FREEBIND:
2338 #endif
2339         case IP_MULTICAST_TTL:
2340         case IP_MULTICAST_LOOP:
2341             if (get_user_u32(len, optlen))
2342                 return -TARGET_EFAULT;
2343             if (len < 0)
2344                 return -TARGET_EINVAL;
2345             lv = sizeof(lv);
2346             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2347             if (ret < 0)
2348                 return ret;
2349             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2350                 len = 1;
2351                 if (put_user_u32(len, optlen)
2352                     || put_user_u8(val, optval_addr))
2353                     return -TARGET_EFAULT;
2354             } else {
2355                 if (len > sizeof(int))
2356                     len = sizeof(int);
2357                 if (put_user_u32(len, optlen)
2358                     || put_user_u32(val, optval_addr))
2359                     return -TARGET_EFAULT;
2360             }
2361             break;
2362         default:
2363             ret = -TARGET_ENOPROTOOPT;
2364             break;
2365         }
2366         break;
2367     case SOL_IPV6:
2368         switch (optname) {
2369         case IPV6_MTU_DISCOVER:
2370         case IPV6_MTU:
2371         case IPV6_V6ONLY:
2372         case IPV6_RECVPKTINFO:
2373         case IPV6_UNICAST_HOPS:
2374         case IPV6_MULTICAST_HOPS:
2375         case IPV6_MULTICAST_LOOP:
2376         case IPV6_RECVERR:
2377         case IPV6_RECVHOPLIMIT:
2378         case IPV6_2292HOPLIMIT:
2379         case IPV6_CHECKSUM:
2380         case IPV6_ADDRFORM:
2381         case IPV6_2292PKTINFO:
2382         case IPV6_RECVTCLASS:
2383         case IPV6_RECVRTHDR:
2384         case IPV6_2292RTHDR:
2385         case IPV6_RECVHOPOPTS:
2386         case IPV6_2292HOPOPTS:
2387         case IPV6_RECVDSTOPTS:
2388         case IPV6_2292DSTOPTS:
2389         case IPV6_TCLASS:
2390 #ifdef IPV6_RECVPATHMTU
2391         case IPV6_RECVPATHMTU:
2392 #endif
2393 #ifdef IPV6_TRANSPARENT
2394         case IPV6_TRANSPARENT:
2395 #endif
2396 #ifdef IPV6_FREEBIND
2397         case IPV6_FREEBIND:
2398 #endif
2399 #ifdef IPV6_RECVORIGDSTADDR
2400         case IPV6_RECVORIGDSTADDR:
2401 #endif
2402             if (get_user_u32(len, optlen))
2403                 return -TARGET_EFAULT;
2404             if (len < 0)
2405                 return -TARGET_EINVAL;
2406             lv = sizeof(lv);
2407             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2408             if (ret < 0)
2409                 return ret;
2410             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2411                 len = 1;
2412                 if (put_user_u32(len, optlen)
2413                     || put_user_u8(val, optval_addr))
2414                     return -TARGET_EFAULT;
2415             } else {
2416                 if (len > sizeof(int))
2417                     len = sizeof(int);
2418                 if (put_user_u32(len, optlen)
2419                     || put_user_u32(val, optval_addr))
2420                     return -TARGET_EFAULT;
2421             }
2422             break;
2423         default:
2424             ret = -TARGET_ENOPROTOOPT;
2425             break;
2426         }
2427         break;
2428     default:
2429     unimplemented:
2430         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2431                  level, optname);
2432         ret = -TARGET_EOPNOTSUPP;
2433         break;
2434     }
2435     return ret;
2436 }
2437 
2438 /* Convert target low/high pair representing file offset into the host
2439  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2440  * as the kernel doesn't handle them either.
2441  */
2442 static void target_to_host_low_high(abi_ulong tlow,
2443                                     abi_ulong thigh,
2444                                     unsigned long *hlow,
2445                                     unsigned long *hhigh)
2446 {
2447     uint64_t off = tlow |
2448         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2449         TARGET_LONG_BITS / 2;
2450 
2451     *hlow = off;
2452     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2453 }
2454 
2455 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2456                                 abi_ulong count, int copy)
2457 {
2458     struct target_iovec *target_vec;
2459     struct iovec *vec;
2460     abi_ulong total_len, max_len;
2461     int i;
2462     int err = 0;
2463     bool bad_address = false;
2464 
2465     if (count == 0) {
2466         errno = 0;
2467         return NULL;
2468     }
2469     if (count > IOV_MAX) {
2470         errno = EINVAL;
2471         return NULL;
2472     }
2473 
2474     vec = g_try_new0(struct iovec, count);
2475     if (vec == NULL) {
2476         errno = ENOMEM;
2477         return NULL;
2478     }
2479 
2480     target_vec = lock_user(VERIFY_READ, target_addr,
2481                            count * sizeof(struct target_iovec), 1);
2482     if (target_vec == NULL) {
2483         err = EFAULT;
2484         goto fail2;
2485     }
2486 
2487     /* ??? If host page size > target page size, this will result in a
2488        value larger than what we can actually support.  */
2489     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2490     total_len = 0;
2491 
2492     for (i = 0; i < count; i++) {
2493         abi_ulong base = tswapal(target_vec[i].iov_base);
2494         abi_long len = tswapal(target_vec[i].iov_len);
2495 
2496         if (len < 0) {
2497             err = EINVAL;
2498             goto fail;
2499         } else if (len == 0) {
2500             /* Zero length pointer is ignored.  */
2501             vec[i].iov_base = 0;
2502         } else {
2503             vec[i].iov_base = lock_user(type, base, len, copy);
2504             /* If the first buffer pointer is bad, this is a fault.  But
2505              * subsequent bad buffers will result in a partial write; this
2506              * is realized by filling the vector with null pointers and
2507              * zero lengths. */
2508             if (!vec[i].iov_base) {
2509                 if (i == 0) {
2510                     err = EFAULT;
2511                     goto fail;
2512                 } else {
2513                     bad_address = true;
2514                 }
2515             }
2516             if (bad_address) {
2517                 len = 0;
2518             }
2519             if (len > max_len - total_len) {
2520                 len = max_len - total_len;
2521             }
2522         }
2523         vec[i].iov_len = len;
2524         total_len += len;
2525     }
2526 
2527     unlock_user(target_vec, target_addr, 0);
2528     return vec;
2529 
2530  fail:
2531     while (--i >= 0) {
2532         if (tswapal(target_vec[i].iov_len) > 0) {
2533             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2534         }
2535     }
2536     unlock_user(target_vec, target_addr, 0);
2537  fail2:
2538     g_free(vec);
2539     errno = err;
2540     return NULL;
2541 }
2542 
2543 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2544                          abi_ulong count, int copy)
2545 {
2546     struct target_iovec *target_vec;
2547     int i;
2548 
2549     target_vec = lock_user(VERIFY_READ, target_addr,
2550                            count * sizeof(struct target_iovec), 1);
2551     if (target_vec) {
2552         for (i = 0; i < count; i++) {
2553             abi_ulong base = tswapal(target_vec[i].iov_base);
2554             abi_long len = tswapal(target_vec[i].iov_len);
2555             if (len < 0) {
2556                 break;
2557             }
2558             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2559         }
2560         unlock_user(target_vec, target_addr, 0);
2561     }
2562 
2563     g_free(vec);
2564 }
2565 
2566 static inline int target_to_host_sock_type(int *type)
2567 {
2568     int host_type = 0;
2569     int target_type = *type;
2570 
2571     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2572     case TARGET_SOCK_DGRAM:
2573         host_type = SOCK_DGRAM;
2574         break;
2575     case TARGET_SOCK_STREAM:
2576         host_type = SOCK_STREAM;
2577         break;
2578     default:
2579         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2580         break;
2581     }
2582     if (target_type & TARGET_SOCK_CLOEXEC) {
2583 #if defined(SOCK_CLOEXEC)
2584         host_type |= SOCK_CLOEXEC;
2585 #else
2586         return -TARGET_EINVAL;
2587 #endif
2588     }
2589     if (target_type & TARGET_SOCK_NONBLOCK) {
2590 #if defined(SOCK_NONBLOCK)
2591         host_type |= SOCK_NONBLOCK;
2592 #elif !defined(O_NONBLOCK)
2593         return -TARGET_EINVAL;
2594 #endif
2595     }
2596     *type = host_type;
2597     return 0;
2598 }
2599 
2600 /* Try to emulate socket type flags after socket creation.  */
2601 static int sock_flags_fixup(int fd, int target_type)
2602 {
2603 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2604     if (target_type & TARGET_SOCK_NONBLOCK) {
2605         int flags = fcntl(fd, F_GETFL);
2606         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2607             close(fd);
2608             return -TARGET_EINVAL;
2609         }
2610     }
2611 #endif
2612     return fd;
2613 }
2614 
2615 /* do_socket() Must return target values and target errnos. */
2616 static abi_long do_socket(int domain, int type, int protocol)
2617 {
2618     int target_type = type;
2619     int ret;
2620 
2621     ret = target_to_host_sock_type(&type);
2622     if (ret) {
2623         return ret;
2624     }
2625 
2626     if (domain == PF_NETLINK && !(
2627 #ifdef CONFIG_RTNETLINK
2628          protocol == NETLINK_ROUTE ||
2629 #endif
2630          protocol == NETLINK_KOBJECT_UEVENT ||
2631          protocol == NETLINK_AUDIT)) {
2632         return -EPFNOSUPPORT;
2633     }
2634 
2635     if (domain == AF_PACKET ||
2636         (domain == AF_INET && type == SOCK_PACKET)) {
2637         protocol = tswap16(protocol);
2638     }
2639 
2640     ret = get_errno(socket(domain, type, protocol));
2641     if (ret >= 0) {
2642         ret = sock_flags_fixup(ret, target_type);
2643         if (type == SOCK_PACKET) {
2644             /* Manage an obsolete case :
2645              * if socket type is SOCK_PACKET, bind by name
2646              */
2647             fd_trans_register(ret, &target_packet_trans);
2648         } else if (domain == PF_NETLINK) {
2649             switch (protocol) {
2650 #ifdef CONFIG_RTNETLINK
2651             case NETLINK_ROUTE:
2652                 fd_trans_register(ret, &target_netlink_route_trans);
2653                 break;
2654 #endif
2655             case NETLINK_KOBJECT_UEVENT:
2656                 /* nothing to do: messages are strings */
2657                 break;
2658             case NETLINK_AUDIT:
2659                 fd_trans_register(ret, &target_netlink_audit_trans);
2660                 break;
2661             default:
2662                 g_assert_not_reached();
2663             }
2664         }
2665     }
2666     return ret;
2667 }
2668 
2669 /* do_bind() Must return target values and target errnos. */
2670 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2671                         socklen_t addrlen)
2672 {
2673     void *addr;
2674     abi_long ret;
2675 
2676     if ((int)addrlen < 0) {
2677         return -TARGET_EINVAL;
2678     }
2679 
2680     addr = alloca(addrlen+1);
2681 
2682     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2683     if (ret)
2684         return ret;
2685 
2686     return get_errno(bind(sockfd, addr, addrlen));
2687 }
2688 
2689 /* do_connect() Must return target values and target errnos. */
2690 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2691                            socklen_t addrlen)
2692 {
2693     void *addr;
2694     abi_long ret;
2695 
2696     if ((int)addrlen < 0) {
2697         return -TARGET_EINVAL;
2698     }
2699 
2700     addr = alloca(addrlen+1);
2701 
2702     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2703     if (ret)
2704         return ret;
2705 
2706     return get_errno(safe_connect(sockfd, addr, addrlen));
2707 }
2708 
2709 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2710 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2711                                       int flags, int send)
2712 {
2713     abi_long ret, len;
2714     struct msghdr msg;
2715     abi_ulong count;
2716     struct iovec *vec;
2717     abi_ulong target_vec;
2718 
2719     if (msgp->msg_name) {
2720         msg.msg_namelen = tswap32(msgp->msg_namelen);
2721         msg.msg_name = alloca(msg.msg_namelen+1);
2722         ret = target_to_host_sockaddr(fd, msg.msg_name,
2723                                       tswapal(msgp->msg_name),
2724                                       msg.msg_namelen);
2725         if (ret == -TARGET_EFAULT) {
2726             /* For connected sockets msg_name and msg_namelen must
2727              * be ignored, so returning EFAULT immediately is wrong.
2728              * Instead, pass a bad msg_name to the host kernel, and
2729              * let it decide whether to return EFAULT or not.
2730              */
2731             msg.msg_name = (void *)-1;
2732         } else if (ret) {
2733             goto out2;
2734         }
2735     } else {
2736         msg.msg_name = NULL;
2737         msg.msg_namelen = 0;
2738     }
2739     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2740     msg.msg_control = alloca(msg.msg_controllen);
2741     memset(msg.msg_control, 0, msg.msg_controllen);
2742 
2743     msg.msg_flags = tswap32(msgp->msg_flags);
2744 
2745     count = tswapal(msgp->msg_iovlen);
2746     target_vec = tswapal(msgp->msg_iov);
2747 
2748     if (count > IOV_MAX) {
2749         /* sendrcvmsg returns a different errno for this condition than
2750          * readv/writev, so we must catch it here before lock_iovec() does.
2751          */
2752         ret = -TARGET_EMSGSIZE;
2753         goto out2;
2754     }
2755 
2756     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2757                      target_vec, count, send);
2758     if (vec == NULL) {
2759         ret = -host_to_target_errno(errno);
2760         goto out2;
2761     }
2762     msg.msg_iovlen = count;
2763     msg.msg_iov = vec;
2764 
2765     if (send) {
2766         if (fd_trans_target_to_host_data(fd)) {
2767             void *host_msg;
2768 
2769             host_msg = g_malloc(msg.msg_iov->iov_len);
2770             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2771             ret = fd_trans_target_to_host_data(fd)(host_msg,
2772                                                    msg.msg_iov->iov_len);
2773             if (ret >= 0) {
2774                 msg.msg_iov->iov_base = host_msg;
2775                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2776             }
2777             g_free(host_msg);
2778         } else {
2779             ret = target_to_host_cmsg(&msg, msgp);
2780             if (ret == 0) {
2781                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2782             }
2783         }
2784     } else {
2785         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2786         if (!is_error(ret)) {
2787             len = ret;
2788             if (fd_trans_host_to_target_data(fd)) {
2789                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2790                                                MIN(msg.msg_iov->iov_len, len));
2791             } else {
2792                 ret = host_to_target_cmsg(msgp, &msg);
2793             }
2794             if (!is_error(ret)) {
2795                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2796                 msgp->msg_flags = tswap32(msg.msg_flags);
2797                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2798                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2799                                     msg.msg_name, msg.msg_namelen);
2800                     if (ret) {
2801                         goto out;
2802                     }
2803                 }
2804 
2805                 ret = len;
2806             }
2807         }
2808     }
2809 
2810 out:
2811     unlock_iovec(vec, target_vec, count, !send);
2812 out2:
2813     return ret;
2814 }
2815 
2816 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2817                                int flags, int send)
2818 {
2819     abi_long ret;
2820     struct target_msghdr *msgp;
2821 
2822     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2823                           msgp,
2824                           target_msg,
2825                           send ? 1 : 0)) {
2826         return -TARGET_EFAULT;
2827     }
2828     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2829     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2830     return ret;
2831 }
2832 
2833 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2834  * so it might not have this *mmsg-specific flag either.
2835  */
2836 #ifndef MSG_WAITFORONE
2837 #define MSG_WAITFORONE 0x10000
2838 #endif
2839 
2840 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2841                                 unsigned int vlen, unsigned int flags,
2842                                 int send)
2843 {
2844     struct target_mmsghdr *mmsgp;
2845     abi_long ret = 0;
2846     int i;
2847 
2848     if (vlen > UIO_MAXIOV) {
2849         vlen = UIO_MAXIOV;
2850     }
2851 
2852     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2853     if (!mmsgp) {
2854         return -TARGET_EFAULT;
2855     }
2856 
2857     for (i = 0; i < vlen; i++) {
2858         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2859         if (is_error(ret)) {
2860             break;
2861         }
2862         mmsgp[i].msg_len = tswap32(ret);
2863         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2864         if (flags & MSG_WAITFORONE) {
2865             flags |= MSG_DONTWAIT;
2866         }
2867     }
2868 
2869     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2870 
2871     /* Return number of datagrams sent if we sent any at all;
2872      * otherwise return the error.
2873      */
2874     if (i) {
2875         return i;
2876     }
2877     return ret;
2878 }
2879 
2880 /* do_accept4() Must return target values and target errnos. */
2881 static abi_long do_accept4(int fd, abi_ulong target_addr,
2882                            abi_ulong target_addrlen_addr, int flags)
2883 {
2884     socklen_t addrlen, ret_addrlen;
2885     void *addr;
2886     abi_long ret;
2887     int host_flags;
2888 
2889     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2890 
2891     if (target_addr == 0) {
2892         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2893     }
2894 
2895     /* linux returns EINVAL if addrlen pointer is invalid */
2896     if (get_user_u32(addrlen, target_addrlen_addr))
2897         return -TARGET_EINVAL;
2898 
2899     if ((int)addrlen < 0) {
2900         return -TARGET_EINVAL;
2901     }
2902 
2903     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2904         return -TARGET_EINVAL;
2905 
2906     addr = alloca(addrlen);
2907 
2908     ret_addrlen = addrlen;
2909     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2910     if (!is_error(ret)) {
2911         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2912         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2913             ret = -TARGET_EFAULT;
2914         }
2915     }
2916     return ret;
2917 }
2918 
2919 /* do_getpeername() Must return target values and target errnos. */
2920 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2921                                abi_ulong target_addrlen_addr)
2922 {
2923     socklen_t addrlen, ret_addrlen;
2924     void *addr;
2925     abi_long ret;
2926 
2927     if (get_user_u32(addrlen, target_addrlen_addr))
2928         return -TARGET_EFAULT;
2929 
2930     if ((int)addrlen < 0) {
2931         return -TARGET_EINVAL;
2932     }
2933 
2934     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2935         return -TARGET_EFAULT;
2936 
2937     addr = alloca(addrlen);
2938 
2939     ret_addrlen = addrlen;
2940     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2941     if (!is_error(ret)) {
2942         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2943         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2944             ret = -TARGET_EFAULT;
2945         }
2946     }
2947     return ret;
2948 }
2949 
2950 /* do_getsockname() Must return target values and target errnos. */
2951 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2952                                abi_ulong target_addrlen_addr)
2953 {
2954     socklen_t addrlen, ret_addrlen;
2955     void *addr;
2956     abi_long ret;
2957 
2958     if (get_user_u32(addrlen, target_addrlen_addr))
2959         return -TARGET_EFAULT;
2960 
2961     if ((int)addrlen < 0) {
2962         return -TARGET_EINVAL;
2963     }
2964 
2965     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2966         return -TARGET_EFAULT;
2967 
2968     addr = alloca(addrlen);
2969 
2970     ret_addrlen = addrlen;
2971     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2972     if (!is_error(ret)) {
2973         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2974         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2975             ret = -TARGET_EFAULT;
2976         }
2977     }
2978     return ret;
2979 }
2980 
2981 /* do_socketpair() Must return target values and target errnos. */
2982 static abi_long do_socketpair(int domain, int type, int protocol,
2983                               abi_ulong target_tab_addr)
2984 {
2985     int tab[2];
2986     abi_long ret;
2987 
2988     target_to_host_sock_type(&type);
2989 
2990     ret = get_errno(socketpair(domain, type, protocol, tab));
2991     if (!is_error(ret)) {
2992         if (put_user_s32(tab[0], target_tab_addr)
2993             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2994             ret = -TARGET_EFAULT;
2995     }
2996     return ret;
2997 }
2998 
2999 /* do_sendto() Must return target values and target errnos. */
3000 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3001                           abi_ulong target_addr, socklen_t addrlen)
3002 {
3003     void *addr;
3004     void *host_msg;
3005     void *copy_msg = NULL;
3006     abi_long ret;
3007 
3008     if ((int)addrlen < 0) {
3009         return -TARGET_EINVAL;
3010     }
3011 
3012     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3013     if (!host_msg)
3014         return -TARGET_EFAULT;
3015     if (fd_trans_target_to_host_data(fd)) {
3016         copy_msg = host_msg;
3017         host_msg = g_malloc(len);
3018         memcpy(host_msg, copy_msg, len);
3019         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3020         if (ret < 0) {
3021             goto fail;
3022         }
3023     }
3024     if (target_addr) {
3025         addr = alloca(addrlen+1);
3026         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3027         if (ret) {
3028             goto fail;
3029         }
3030         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3031     } else {
3032         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3033     }
3034 fail:
3035     if (copy_msg) {
3036         g_free(host_msg);
3037         host_msg = copy_msg;
3038     }
3039     unlock_user(host_msg, msg, 0);
3040     return ret;
3041 }
3042 
3043 /* do_recvfrom() Must return target values and target errnos. */
3044 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3045                             abi_ulong target_addr,
3046                             abi_ulong target_addrlen)
3047 {
3048     socklen_t addrlen, ret_addrlen;
3049     void *addr;
3050     void *host_msg;
3051     abi_long ret;
3052 
3053     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3054     if (!host_msg)
3055         return -TARGET_EFAULT;
3056     if (target_addr) {
3057         if (get_user_u32(addrlen, target_addrlen)) {
3058             ret = -TARGET_EFAULT;
3059             goto fail;
3060         }
3061         if ((int)addrlen < 0) {
3062             ret = -TARGET_EINVAL;
3063             goto fail;
3064         }
3065         addr = alloca(addrlen);
3066         ret_addrlen = addrlen;
3067         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3068                                       addr, &ret_addrlen));
3069     } else {
3070         addr = NULL; /* To keep compiler quiet.  */
3071         addrlen = 0; /* To keep compiler quiet.  */
3072         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3073     }
3074     if (!is_error(ret)) {
3075         if (fd_trans_host_to_target_data(fd)) {
3076             abi_long trans;
3077             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3078             if (is_error(trans)) {
3079                 ret = trans;
3080                 goto fail;
3081             }
3082         }
3083         if (target_addr) {
3084             host_to_target_sockaddr(target_addr, addr,
3085                                     MIN(addrlen, ret_addrlen));
3086             if (put_user_u32(ret_addrlen, target_addrlen)) {
3087                 ret = -TARGET_EFAULT;
3088                 goto fail;
3089             }
3090         }
3091         unlock_user(host_msg, msg, len);
3092     } else {
3093 fail:
3094         unlock_user(host_msg, msg, 0);
3095     }
3096     return ret;
3097 }
3098 
3099 #ifdef TARGET_NR_socketcall
3100 /* do_socketcall() must return target values and target errnos. */
3101 static abi_long do_socketcall(int num, abi_ulong vptr)
3102 {
3103     static const unsigned nargs[] = { /* number of arguments per operation */
3104         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3105         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3106         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3107         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3108         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3109         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3110         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3111         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3112         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3113         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3114         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3115         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3116         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3117         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3118         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3119         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3120         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3121         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3122         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3123         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3124     };
3125     abi_long a[6]; /* max 6 args */
3126     unsigned i;
3127 
3128     /* check the range of the first argument num */
3129     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3130     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3131         return -TARGET_EINVAL;
3132     }
3133     /* ensure we have space for args */
3134     if (nargs[num] > ARRAY_SIZE(a)) {
3135         return -TARGET_EINVAL;
3136     }
3137     /* collect the arguments in a[] according to nargs[] */
3138     for (i = 0; i < nargs[num]; ++i) {
3139         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3140             return -TARGET_EFAULT;
3141         }
3142     }
3143     /* now when we have the args, invoke the appropriate underlying function */
3144     switch (num) {
3145     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3146         return do_socket(a[0], a[1], a[2]);
3147     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3148         return do_bind(a[0], a[1], a[2]);
3149     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3150         return do_connect(a[0], a[1], a[2]);
3151     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3152         return get_errno(listen(a[0], a[1]));
3153     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3154         return do_accept4(a[0], a[1], a[2], 0);
3155     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3156         return do_getsockname(a[0], a[1], a[2]);
3157     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3158         return do_getpeername(a[0], a[1], a[2]);
3159     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3160         return do_socketpair(a[0], a[1], a[2], a[3]);
3161     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3162         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3163     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3164         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3165     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3166         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3167     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3168         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3169     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3170         return get_errno(shutdown(a[0], a[1]));
3171     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3172         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3173     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3174         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3175     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3176         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3177     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3178         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3179     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3180         return do_accept4(a[0], a[1], a[2], a[3]);
3181     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3182         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3183     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3184         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3185     default:
3186         gemu_log("Unsupported socketcall: %d\n", num);
3187         return -TARGET_EINVAL;
3188     }
3189 }
3190 #endif
3191 
3192 #define N_SHM_REGIONS	32
3193 
3194 static struct shm_region {
3195     abi_ulong start;
3196     abi_ulong size;
3197     bool in_use;
3198 } shm_regions[N_SHM_REGIONS];
3199 
3200 #ifndef TARGET_SEMID64_DS
3201 /* asm-generic version of this struct */
3202 struct target_semid64_ds
3203 {
3204   struct target_ipc_perm sem_perm;
3205   abi_ulong sem_otime;
3206 #if TARGET_ABI_BITS == 32
3207   abi_ulong __unused1;
3208 #endif
3209   abi_ulong sem_ctime;
3210 #if TARGET_ABI_BITS == 32
3211   abi_ulong __unused2;
3212 #endif
3213   abi_ulong sem_nsems;
3214   abi_ulong __unused3;
3215   abi_ulong __unused4;
3216 };
3217 #endif
3218 
3219 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3220                                                abi_ulong target_addr)
3221 {
3222     struct target_ipc_perm *target_ip;
3223     struct target_semid64_ds *target_sd;
3224 
3225     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3226         return -TARGET_EFAULT;
3227     target_ip = &(target_sd->sem_perm);
3228     host_ip->__key = tswap32(target_ip->__key);
3229     host_ip->uid = tswap32(target_ip->uid);
3230     host_ip->gid = tswap32(target_ip->gid);
3231     host_ip->cuid = tswap32(target_ip->cuid);
3232     host_ip->cgid = tswap32(target_ip->cgid);
3233 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3234     host_ip->mode = tswap32(target_ip->mode);
3235 #else
3236     host_ip->mode = tswap16(target_ip->mode);
3237 #endif
3238 #if defined(TARGET_PPC)
3239     host_ip->__seq = tswap32(target_ip->__seq);
3240 #else
3241     host_ip->__seq = tswap16(target_ip->__seq);
3242 #endif
3243     unlock_user_struct(target_sd, target_addr, 0);
3244     return 0;
3245 }
3246 
3247 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3248                                                struct ipc_perm *host_ip)
3249 {
3250     struct target_ipc_perm *target_ip;
3251     struct target_semid64_ds *target_sd;
3252 
3253     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3254         return -TARGET_EFAULT;
3255     target_ip = &(target_sd->sem_perm);
3256     target_ip->__key = tswap32(host_ip->__key);
3257     target_ip->uid = tswap32(host_ip->uid);
3258     target_ip->gid = tswap32(host_ip->gid);
3259     target_ip->cuid = tswap32(host_ip->cuid);
3260     target_ip->cgid = tswap32(host_ip->cgid);
3261 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3262     target_ip->mode = tswap32(host_ip->mode);
3263 #else
3264     target_ip->mode = tswap16(host_ip->mode);
3265 #endif
3266 #if defined(TARGET_PPC)
3267     target_ip->__seq = tswap32(host_ip->__seq);
3268 #else
3269     target_ip->__seq = tswap16(host_ip->__seq);
3270 #endif
3271     unlock_user_struct(target_sd, target_addr, 1);
3272     return 0;
3273 }
3274 
3275 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3276                                                abi_ulong target_addr)
3277 {
3278     struct target_semid64_ds *target_sd;
3279 
3280     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3281         return -TARGET_EFAULT;
3282     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3283         return -TARGET_EFAULT;
3284     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3285     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3286     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3287     unlock_user_struct(target_sd, target_addr, 0);
3288     return 0;
3289 }
3290 
3291 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3292                                                struct semid_ds *host_sd)
3293 {
3294     struct target_semid64_ds *target_sd;
3295 
3296     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3297         return -TARGET_EFAULT;
3298     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3299         return -TARGET_EFAULT;
3300     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3301     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3302     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3303     unlock_user_struct(target_sd, target_addr, 1);
3304     return 0;
3305 }
3306 
3307 struct target_seminfo {
3308     int semmap;
3309     int semmni;
3310     int semmns;
3311     int semmnu;
3312     int semmsl;
3313     int semopm;
3314     int semume;
3315     int semusz;
3316     int semvmx;
3317     int semaem;
3318 };
3319 
3320 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3321                                               struct seminfo *host_seminfo)
3322 {
3323     struct target_seminfo *target_seminfo;
3324     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3325         return -TARGET_EFAULT;
3326     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3327     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3328     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3329     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3330     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3331     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3332     __put_user(host_seminfo->semume, &target_seminfo->semume);
3333     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3334     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3335     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3336     unlock_user_struct(target_seminfo, target_addr, 1);
3337     return 0;
3338 }
3339 
3340 union semun {
3341 	int val;
3342 	struct semid_ds *buf;
3343 	unsigned short *array;
3344 	struct seminfo *__buf;
3345 };
3346 
3347 union target_semun {
3348 	int val;
3349 	abi_ulong buf;
3350 	abi_ulong array;
3351 	abi_ulong __buf;
3352 };
3353 
3354 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3355                                                abi_ulong target_addr)
3356 {
3357     int nsems;
3358     unsigned short *array;
3359     union semun semun;
3360     struct semid_ds semid_ds;
3361     int i, ret;
3362 
3363     semun.buf = &semid_ds;
3364 
3365     ret = semctl(semid, 0, IPC_STAT, semun);
3366     if (ret == -1)
3367         return get_errno(ret);
3368 
3369     nsems = semid_ds.sem_nsems;
3370 
3371     *host_array = g_try_new(unsigned short, nsems);
3372     if (!*host_array) {
3373         return -TARGET_ENOMEM;
3374     }
3375     array = lock_user(VERIFY_READ, target_addr,
3376                       nsems*sizeof(unsigned short), 1);
3377     if (!array) {
3378         g_free(*host_array);
3379         return -TARGET_EFAULT;
3380     }
3381 
3382     for(i=0; i<nsems; i++) {
3383         __get_user((*host_array)[i], &array[i]);
3384     }
3385     unlock_user(array, target_addr, 0);
3386 
3387     return 0;
3388 }
3389 
3390 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3391                                                unsigned short **host_array)
3392 {
3393     int nsems;
3394     unsigned short *array;
3395     union semun semun;
3396     struct semid_ds semid_ds;
3397     int i, ret;
3398 
3399     semun.buf = &semid_ds;
3400 
3401     ret = semctl(semid, 0, IPC_STAT, semun);
3402     if (ret == -1)
3403         return get_errno(ret);
3404 
3405     nsems = semid_ds.sem_nsems;
3406 
3407     array = lock_user(VERIFY_WRITE, target_addr,
3408                       nsems*sizeof(unsigned short), 0);
3409     if (!array)
3410         return -TARGET_EFAULT;
3411 
3412     for(i=0; i<nsems; i++) {
3413         __put_user((*host_array)[i], &array[i]);
3414     }
3415     g_free(*host_array);
3416     unlock_user(array, target_addr, 1);
3417 
3418     return 0;
3419 }
3420 
3421 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3422                                  abi_ulong target_arg)
3423 {
3424     union target_semun target_su = { .buf = target_arg };
3425     union semun arg;
3426     struct semid_ds dsarg;
3427     unsigned short *array = NULL;
3428     struct seminfo seminfo;
3429     abi_long ret = -TARGET_EINVAL;
3430     abi_long err;
3431     cmd &= 0xff;
3432 
3433     switch( cmd ) {
3434 	case GETVAL:
3435 	case SETVAL:
3436             /* In 64 bit cross-endian situations, we will erroneously pick up
3437              * the wrong half of the union for the "val" element.  To rectify
3438              * this, the entire 8-byte structure is byteswapped, followed by
3439 	     * a swap of the 4 byte val field. In other cases, the data is
3440 	     * already in proper host byte order. */
3441 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3442 		target_su.buf = tswapal(target_su.buf);
3443 		arg.val = tswap32(target_su.val);
3444 	    } else {
3445 		arg.val = target_su.val;
3446 	    }
3447             ret = get_errno(semctl(semid, semnum, cmd, arg));
3448             break;
3449 	case GETALL:
3450 	case SETALL:
3451             err = target_to_host_semarray(semid, &array, target_su.array);
3452             if (err)
3453                 return err;
3454             arg.array = array;
3455             ret = get_errno(semctl(semid, semnum, cmd, arg));
3456             err = host_to_target_semarray(semid, target_su.array, &array);
3457             if (err)
3458                 return err;
3459             break;
3460 	case IPC_STAT:
3461 	case IPC_SET:
3462 	case SEM_STAT:
3463             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3464             if (err)
3465                 return err;
3466             arg.buf = &dsarg;
3467             ret = get_errno(semctl(semid, semnum, cmd, arg));
3468             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3469             if (err)
3470                 return err;
3471             break;
3472 	case IPC_INFO:
3473 	case SEM_INFO:
3474             arg.__buf = &seminfo;
3475             ret = get_errno(semctl(semid, semnum, cmd, arg));
3476             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3477             if (err)
3478                 return err;
3479             break;
3480 	case IPC_RMID:
3481 	case GETPID:
3482 	case GETNCNT:
3483 	case GETZCNT:
3484             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3485             break;
3486     }
3487 
3488     return ret;
3489 }
3490 
3491 struct target_sembuf {
3492     unsigned short sem_num;
3493     short sem_op;
3494     short sem_flg;
3495 };
3496 
3497 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3498                                              abi_ulong target_addr,
3499                                              unsigned nsops)
3500 {
3501     struct target_sembuf *target_sembuf;
3502     int i;
3503 
3504     target_sembuf = lock_user(VERIFY_READ, target_addr,
3505                               nsops*sizeof(struct target_sembuf), 1);
3506     if (!target_sembuf)
3507         return -TARGET_EFAULT;
3508 
3509     for(i=0; i<nsops; i++) {
3510         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3511         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3512         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3513     }
3514 
3515     unlock_user(target_sembuf, target_addr, 0);
3516 
3517     return 0;
3518 }
3519 
3520 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3521 {
3522     struct sembuf sops[nsops];
3523 
3524     if (target_to_host_sembuf(sops, ptr, nsops))
3525         return -TARGET_EFAULT;
3526 
3527     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3528 }
3529 
3530 struct target_msqid_ds
3531 {
3532     struct target_ipc_perm msg_perm;
3533     abi_ulong msg_stime;
3534 #if TARGET_ABI_BITS == 32
3535     abi_ulong __unused1;
3536 #endif
3537     abi_ulong msg_rtime;
3538 #if TARGET_ABI_BITS == 32
3539     abi_ulong __unused2;
3540 #endif
3541     abi_ulong msg_ctime;
3542 #if TARGET_ABI_BITS == 32
3543     abi_ulong __unused3;
3544 #endif
3545     abi_ulong __msg_cbytes;
3546     abi_ulong msg_qnum;
3547     abi_ulong msg_qbytes;
3548     abi_ulong msg_lspid;
3549     abi_ulong msg_lrpid;
3550     abi_ulong __unused4;
3551     abi_ulong __unused5;
3552 };
3553 
3554 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3555                                                abi_ulong target_addr)
3556 {
3557     struct target_msqid_ds *target_md;
3558 
3559     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3560         return -TARGET_EFAULT;
3561     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3562         return -TARGET_EFAULT;
3563     host_md->msg_stime = tswapal(target_md->msg_stime);
3564     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3565     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3566     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3567     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3568     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3569     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3570     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3571     unlock_user_struct(target_md, target_addr, 0);
3572     return 0;
3573 }
3574 
3575 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3576                                                struct msqid_ds *host_md)
3577 {
3578     struct target_msqid_ds *target_md;
3579 
3580     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3581         return -TARGET_EFAULT;
3582     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3583         return -TARGET_EFAULT;
3584     target_md->msg_stime = tswapal(host_md->msg_stime);
3585     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3586     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3587     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3588     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3589     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3590     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3591     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3592     unlock_user_struct(target_md, target_addr, 1);
3593     return 0;
3594 }
3595 
3596 struct target_msginfo {
3597     int msgpool;
3598     int msgmap;
3599     int msgmax;
3600     int msgmnb;
3601     int msgmni;
3602     int msgssz;
3603     int msgtql;
3604     unsigned short int msgseg;
3605 };
3606 
3607 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3608                                               struct msginfo *host_msginfo)
3609 {
3610     struct target_msginfo *target_msginfo;
3611     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3612         return -TARGET_EFAULT;
3613     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3614     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3615     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3616     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3617     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3618     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3619     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3620     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3621     unlock_user_struct(target_msginfo, target_addr, 1);
3622     return 0;
3623 }
3624 
3625 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3626 {
3627     struct msqid_ds dsarg;
3628     struct msginfo msginfo;
3629     abi_long ret = -TARGET_EINVAL;
3630 
3631     cmd &= 0xff;
3632 
3633     switch (cmd) {
3634     case IPC_STAT:
3635     case IPC_SET:
3636     case MSG_STAT:
3637         if (target_to_host_msqid_ds(&dsarg,ptr))
3638             return -TARGET_EFAULT;
3639         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3640         if (host_to_target_msqid_ds(ptr,&dsarg))
3641             return -TARGET_EFAULT;
3642         break;
3643     case IPC_RMID:
3644         ret = get_errno(msgctl(msgid, cmd, NULL));
3645         break;
3646     case IPC_INFO:
3647     case MSG_INFO:
3648         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3649         if (host_to_target_msginfo(ptr, &msginfo))
3650             return -TARGET_EFAULT;
3651         break;
3652     }
3653 
3654     return ret;
3655 }
3656 
3657 struct target_msgbuf {
3658     abi_long mtype;
3659     char	mtext[1];
3660 };
3661 
3662 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3663                                  ssize_t msgsz, int msgflg)
3664 {
3665     struct target_msgbuf *target_mb;
3666     struct msgbuf *host_mb;
3667     abi_long ret = 0;
3668 
3669     if (msgsz < 0) {
3670         return -TARGET_EINVAL;
3671     }
3672 
3673     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3674         return -TARGET_EFAULT;
3675     host_mb = g_try_malloc(msgsz + sizeof(long));
3676     if (!host_mb) {
3677         unlock_user_struct(target_mb, msgp, 0);
3678         return -TARGET_ENOMEM;
3679     }
3680     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3681     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3682     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3683     g_free(host_mb);
3684     unlock_user_struct(target_mb, msgp, 0);
3685 
3686     return ret;
3687 }
3688 
3689 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3690                                  ssize_t msgsz, abi_long msgtyp,
3691                                  int msgflg)
3692 {
3693     struct target_msgbuf *target_mb;
3694     char *target_mtext;
3695     struct msgbuf *host_mb;
3696     abi_long ret = 0;
3697 
3698     if (msgsz < 0) {
3699         return -TARGET_EINVAL;
3700     }
3701 
3702     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3703         return -TARGET_EFAULT;
3704 
3705     host_mb = g_try_malloc(msgsz + sizeof(long));
3706     if (!host_mb) {
3707         ret = -TARGET_ENOMEM;
3708         goto end;
3709     }
3710     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3711 
3712     if (ret > 0) {
3713         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3714         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3715         if (!target_mtext) {
3716             ret = -TARGET_EFAULT;
3717             goto end;
3718         }
3719         memcpy(target_mb->mtext, host_mb->mtext, ret);
3720         unlock_user(target_mtext, target_mtext_addr, ret);
3721     }
3722 
3723     target_mb->mtype = tswapal(host_mb->mtype);
3724 
3725 end:
3726     if (target_mb)
3727         unlock_user_struct(target_mb, msgp, 1);
3728     g_free(host_mb);
3729     return ret;
3730 }
3731 
3732 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3733                                                abi_ulong target_addr)
3734 {
3735     struct target_shmid_ds *target_sd;
3736 
3737     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3738         return -TARGET_EFAULT;
3739     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3740         return -TARGET_EFAULT;
3741     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3742     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3743     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3744     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3745     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3746     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3747     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3748     unlock_user_struct(target_sd, target_addr, 0);
3749     return 0;
3750 }
3751 
3752 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3753                                                struct shmid_ds *host_sd)
3754 {
3755     struct target_shmid_ds *target_sd;
3756 
3757     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3758         return -TARGET_EFAULT;
3759     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3760         return -TARGET_EFAULT;
3761     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3762     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3763     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3764     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3765     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3766     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3767     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3768     unlock_user_struct(target_sd, target_addr, 1);
3769     return 0;
3770 }
3771 
3772 struct  target_shminfo {
3773     abi_ulong shmmax;
3774     abi_ulong shmmin;
3775     abi_ulong shmmni;
3776     abi_ulong shmseg;
3777     abi_ulong shmall;
3778 };
3779 
3780 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3781                                               struct shminfo *host_shminfo)
3782 {
3783     struct target_shminfo *target_shminfo;
3784     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3785         return -TARGET_EFAULT;
3786     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3787     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3788     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3789     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3790     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3791     unlock_user_struct(target_shminfo, target_addr, 1);
3792     return 0;
3793 }
3794 
3795 struct target_shm_info {
3796     int used_ids;
3797     abi_ulong shm_tot;
3798     abi_ulong shm_rss;
3799     abi_ulong shm_swp;
3800     abi_ulong swap_attempts;
3801     abi_ulong swap_successes;
3802 };
3803 
3804 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3805                                                struct shm_info *host_shm_info)
3806 {
3807     struct target_shm_info *target_shm_info;
3808     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3809         return -TARGET_EFAULT;
3810     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3811     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3812     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3813     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3814     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3815     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3816     unlock_user_struct(target_shm_info, target_addr, 1);
3817     return 0;
3818 }
3819 
3820 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3821 {
3822     struct shmid_ds dsarg;
3823     struct shminfo shminfo;
3824     struct shm_info shm_info;
3825     abi_long ret = -TARGET_EINVAL;
3826 
3827     cmd &= 0xff;
3828 
3829     switch(cmd) {
3830     case IPC_STAT:
3831     case IPC_SET:
3832     case SHM_STAT:
3833         if (target_to_host_shmid_ds(&dsarg, buf))
3834             return -TARGET_EFAULT;
3835         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3836         if (host_to_target_shmid_ds(buf, &dsarg))
3837             return -TARGET_EFAULT;
3838         break;
3839     case IPC_INFO:
3840         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3841         if (host_to_target_shminfo(buf, &shminfo))
3842             return -TARGET_EFAULT;
3843         break;
3844     case SHM_INFO:
3845         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3846         if (host_to_target_shm_info(buf, &shm_info))
3847             return -TARGET_EFAULT;
3848         break;
3849     case IPC_RMID:
3850     case SHM_LOCK:
3851     case SHM_UNLOCK:
3852         ret = get_errno(shmctl(shmid, cmd, NULL));
3853         break;
3854     }
3855 
3856     return ret;
3857 }
3858 
3859 #ifndef TARGET_FORCE_SHMLBA
3860 /* For most architectures, SHMLBA is the same as the page size;
3861  * some architectures have larger values, in which case they should
3862  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3863  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3864  * and defining its own value for SHMLBA.
3865  *
3866  * The kernel also permits SHMLBA to be set by the architecture to a
3867  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3868  * this means that addresses are rounded to the large size if
3869  * SHM_RND is set but addresses not aligned to that size are not rejected
3870  * as long as they are at least page-aligned. Since the only architecture
3871  * which uses this is ia64 this code doesn't provide for that oddity.
3872  */
3873 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3874 {
3875     return TARGET_PAGE_SIZE;
3876 }
3877 #endif
3878 
3879 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3880                                  int shmid, abi_ulong shmaddr, int shmflg)
3881 {
3882     abi_long raddr;
3883     void *host_raddr;
3884     struct shmid_ds shm_info;
3885     int i,ret;
3886     abi_ulong shmlba;
3887 
3888     /* find out the length of the shared memory segment */
3889     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3890     if (is_error(ret)) {
3891         /* can't get length, bail out */
3892         return ret;
3893     }
3894 
3895     shmlba = target_shmlba(cpu_env);
3896 
3897     if (shmaddr & (shmlba - 1)) {
3898         if (shmflg & SHM_RND) {
3899             shmaddr &= ~(shmlba - 1);
3900         } else {
3901             return -TARGET_EINVAL;
3902         }
3903     }
3904     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3905         return -TARGET_EINVAL;
3906     }
3907 
3908     mmap_lock();
3909 
3910     if (shmaddr)
3911         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3912     else {
3913         abi_ulong mmap_start;
3914 
3915         /* In order to use the host shmat, we need to honor host SHMLBA.  */
3916         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
3917 
3918         if (mmap_start == -1) {
3919             errno = ENOMEM;
3920             host_raddr = (void *)-1;
3921         } else
3922             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3923     }
3924 
3925     if (host_raddr == (void *)-1) {
3926         mmap_unlock();
3927         return get_errno((long)host_raddr);
3928     }
3929     raddr=h2g((unsigned long)host_raddr);
3930 
3931     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3932                    PAGE_VALID | PAGE_READ |
3933                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3934 
3935     for (i = 0; i < N_SHM_REGIONS; i++) {
3936         if (!shm_regions[i].in_use) {
3937             shm_regions[i].in_use = true;
3938             shm_regions[i].start = raddr;
3939             shm_regions[i].size = shm_info.shm_segsz;
3940             break;
3941         }
3942     }
3943 
3944     mmap_unlock();
3945     return raddr;
3946 
3947 }
3948 
3949 static inline abi_long do_shmdt(abi_ulong shmaddr)
3950 {
3951     int i;
3952     abi_long rv;
3953 
3954     mmap_lock();
3955 
3956     for (i = 0; i < N_SHM_REGIONS; ++i) {
3957         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3958             shm_regions[i].in_use = false;
3959             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3960             break;
3961         }
3962     }
3963     rv = get_errno(shmdt(g2h(shmaddr)));
3964 
3965     mmap_unlock();
3966 
3967     return rv;
3968 }
3969 
3970 #ifdef TARGET_NR_ipc
3971 /* ??? This only works with linear mappings.  */
3972 /* do_ipc() must return target values and target errnos. */
3973 static abi_long do_ipc(CPUArchState *cpu_env,
3974                        unsigned int call, abi_long first,
3975                        abi_long second, abi_long third,
3976                        abi_long ptr, abi_long fifth)
3977 {
3978     int version;
3979     abi_long ret = 0;
3980 
3981     version = call >> 16;
3982     call &= 0xffff;
3983 
3984     switch (call) {
3985     case IPCOP_semop:
3986         ret = do_semop(first, ptr, second);
3987         break;
3988 
3989     case IPCOP_semget:
3990         ret = get_errno(semget(first, second, third));
3991         break;
3992 
3993     case IPCOP_semctl: {
3994         /* The semun argument to semctl is passed by value, so dereference the
3995          * ptr argument. */
3996         abi_ulong atptr;
3997         get_user_ual(atptr, ptr);
3998         ret = do_semctl(first, second, third, atptr);
3999         break;
4000     }
4001 
4002     case IPCOP_msgget:
4003         ret = get_errno(msgget(first, second));
4004         break;
4005 
4006     case IPCOP_msgsnd:
4007         ret = do_msgsnd(first, ptr, second, third);
4008         break;
4009 
4010     case IPCOP_msgctl:
4011         ret = do_msgctl(first, second, ptr);
4012         break;
4013 
4014     case IPCOP_msgrcv:
4015         switch (version) {
4016         case 0:
4017             {
4018                 struct target_ipc_kludge {
4019                     abi_long msgp;
4020                     abi_long msgtyp;
4021                 } *tmp;
4022 
4023                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4024                     ret = -TARGET_EFAULT;
4025                     break;
4026                 }
4027 
4028                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4029 
4030                 unlock_user_struct(tmp, ptr, 0);
4031                 break;
4032             }
4033         default:
4034             ret = do_msgrcv(first, ptr, second, fifth, third);
4035         }
4036         break;
4037 
4038     case IPCOP_shmat:
4039         switch (version) {
4040         default:
4041         {
4042             abi_ulong raddr;
4043             raddr = do_shmat(cpu_env, first, ptr, second);
4044             if (is_error(raddr))
4045                 return get_errno(raddr);
4046             if (put_user_ual(raddr, third))
4047                 return -TARGET_EFAULT;
4048             break;
4049         }
4050         case 1:
4051             ret = -TARGET_EINVAL;
4052             break;
4053         }
4054 	break;
4055     case IPCOP_shmdt:
4056         ret = do_shmdt(ptr);
4057 	break;
4058 
4059     case IPCOP_shmget:
4060 	/* IPC_* flag values are the same on all linux platforms */
4061 	ret = get_errno(shmget(first, second, third));
4062 	break;
4063 
4064 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4065     case IPCOP_shmctl:
4066         ret = do_shmctl(first, second, ptr);
4067         break;
4068     default:
4069 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4070 	ret = -TARGET_ENOSYS;
4071 	break;
4072     }
4073     return ret;
4074 }
4075 #endif
4076 
4077 /* kernel structure types definitions */
4078 
4079 #define STRUCT(name, ...) STRUCT_ ## name,
4080 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4081 enum {
4082 #include "syscall_types.h"
4083 STRUCT_MAX
4084 };
4085 #undef STRUCT
4086 #undef STRUCT_SPECIAL
4087 
4088 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4089 #define STRUCT_SPECIAL(name)
4090 #include "syscall_types.h"
4091 #undef STRUCT
4092 #undef STRUCT_SPECIAL
4093 
4094 typedef struct IOCTLEntry IOCTLEntry;
4095 
4096 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4097                              int fd, int cmd, abi_long arg);
4098 
4099 struct IOCTLEntry {
4100     int target_cmd;
4101     unsigned int host_cmd;
4102     const char *name;
4103     int access;
4104     do_ioctl_fn *do_ioctl;
4105     const argtype arg_type[5];
4106 };
4107 
4108 #define IOC_R 0x0001
4109 #define IOC_W 0x0002
4110 #define IOC_RW (IOC_R | IOC_W)
4111 
4112 #define MAX_STRUCT_SIZE 4096
4113 
4114 #ifdef CONFIG_FIEMAP
4115 /* So fiemap access checks don't overflow on 32 bit systems.
4116  * This is very slightly smaller than the limit imposed by
4117  * the underlying kernel.
4118  */
4119 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4120                             / sizeof(struct fiemap_extent))
4121 
4122 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4123                                        int fd, int cmd, abi_long arg)
4124 {
4125     /* The parameter for this ioctl is a struct fiemap followed
4126      * by an array of struct fiemap_extent whose size is set
4127      * in fiemap->fm_extent_count. The array is filled in by the
4128      * ioctl.
4129      */
4130     int target_size_in, target_size_out;
4131     struct fiemap *fm;
4132     const argtype *arg_type = ie->arg_type;
4133     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4134     void *argptr, *p;
4135     abi_long ret;
4136     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4137     uint32_t outbufsz;
4138     int free_fm = 0;
4139 
4140     assert(arg_type[0] == TYPE_PTR);
4141     assert(ie->access == IOC_RW);
4142     arg_type++;
4143     target_size_in = thunk_type_size(arg_type, 0);
4144     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4145     if (!argptr) {
4146         return -TARGET_EFAULT;
4147     }
4148     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4149     unlock_user(argptr, arg, 0);
4150     fm = (struct fiemap *)buf_temp;
4151     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4152         return -TARGET_EINVAL;
4153     }
4154 
4155     outbufsz = sizeof (*fm) +
4156         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4157 
4158     if (outbufsz > MAX_STRUCT_SIZE) {
4159         /* We can't fit all the extents into the fixed size buffer.
4160          * Allocate one that is large enough and use it instead.
4161          */
4162         fm = g_try_malloc(outbufsz);
4163         if (!fm) {
4164             return -TARGET_ENOMEM;
4165         }
4166         memcpy(fm, buf_temp, sizeof(struct fiemap));
4167         free_fm = 1;
4168     }
4169     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4170     if (!is_error(ret)) {
4171         target_size_out = target_size_in;
4172         /* An extent_count of 0 means we were only counting the extents
4173          * so there are no structs to copy
4174          */
4175         if (fm->fm_extent_count != 0) {
4176             target_size_out += fm->fm_mapped_extents * extent_size;
4177         }
4178         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4179         if (!argptr) {
4180             ret = -TARGET_EFAULT;
4181         } else {
4182             /* Convert the struct fiemap */
4183             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4184             if (fm->fm_extent_count != 0) {
4185                 p = argptr + target_size_in;
4186                 /* ...and then all the struct fiemap_extents */
4187                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4188                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4189                                   THUNK_TARGET);
4190                     p += extent_size;
4191                 }
4192             }
4193             unlock_user(argptr, arg, target_size_out);
4194         }
4195     }
4196     if (free_fm) {
4197         g_free(fm);
4198     }
4199     return ret;
4200 }
4201 #endif
4202 
4203 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4204                                 int fd, int cmd, abi_long arg)
4205 {
4206     const argtype *arg_type = ie->arg_type;
4207     int target_size;
4208     void *argptr;
4209     int ret;
4210     struct ifconf *host_ifconf;
4211     uint32_t outbufsz;
4212     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4213     int target_ifreq_size;
4214     int nb_ifreq;
4215     int free_buf = 0;
4216     int i;
4217     int target_ifc_len;
4218     abi_long target_ifc_buf;
4219     int host_ifc_len;
4220     char *host_ifc_buf;
4221 
4222     assert(arg_type[0] == TYPE_PTR);
4223     assert(ie->access == IOC_RW);
4224 
4225     arg_type++;
4226     target_size = thunk_type_size(arg_type, 0);
4227 
4228     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4229     if (!argptr)
4230         return -TARGET_EFAULT;
4231     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4232     unlock_user(argptr, arg, 0);
4233 
4234     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4235     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4236     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4237 
4238     if (target_ifc_buf != 0) {
4239         target_ifc_len = host_ifconf->ifc_len;
4240         nb_ifreq = target_ifc_len / target_ifreq_size;
4241         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4242 
4243         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4244         if (outbufsz > MAX_STRUCT_SIZE) {
4245             /*
4246              * We can't fit all the extents into the fixed size buffer.
4247              * Allocate one that is large enough and use it instead.
4248              */
4249             host_ifconf = malloc(outbufsz);
4250             if (!host_ifconf) {
4251                 return -TARGET_ENOMEM;
4252             }
4253             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4254             free_buf = 1;
4255         }
4256         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4257 
4258         host_ifconf->ifc_len = host_ifc_len;
4259     } else {
4260       host_ifc_buf = NULL;
4261     }
4262     host_ifconf->ifc_buf = host_ifc_buf;
4263 
4264     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4265     if (!is_error(ret)) {
4266 	/* convert host ifc_len to target ifc_len */
4267 
4268         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4269         target_ifc_len = nb_ifreq * target_ifreq_size;
4270         host_ifconf->ifc_len = target_ifc_len;
4271 
4272 	/* restore target ifc_buf */
4273 
4274         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4275 
4276 	/* copy struct ifconf to target user */
4277 
4278         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4279         if (!argptr)
4280             return -TARGET_EFAULT;
4281         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4282         unlock_user(argptr, arg, target_size);
4283 
4284         if (target_ifc_buf != 0) {
4285             /* copy ifreq[] to target user */
4286             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4287             for (i = 0; i < nb_ifreq ; i++) {
4288                 thunk_convert(argptr + i * target_ifreq_size,
4289                               host_ifc_buf + i * sizeof(struct ifreq),
4290                               ifreq_arg_type, THUNK_TARGET);
4291             }
4292             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4293         }
4294     }
4295 
4296     if (free_buf) {
4297         free(host_ifconf);
4298     }
4299 
4300     return ret;
4301 }
4302 
4303 #if defined(CONFIG_USBFS)
4304 #if HOST_LONG_BITS > 64
4305 #error USBDEVFS thunks do not support >64 bit hosts yet.
4306 #endif
4307 struct live_urb {
4308     uint64_t target_urb_adr;
4309     uint64_t target_buf_adr;
4310     char *target_buf_ptr;
4311     struct usbdevfs_urb host_urb;
4312 };
4313 
4314 static GHashTable *usbdevfs_urb_hashtable(void)
4315 {
4316     static GHashTable *urb_hashtable;
4317 
4318     if (!urb_hashtable) {
4319         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4320     }
4321     return urb_hashtable;
4322 }
4323 
4324 static void urb_hashtable_insert(struct live_urb *urb)
4325 {
4326     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4327     g_hash_table_insert(urb_hashtable, urb, urb);
4328 }
4329 
4330 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4331 {
4332     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4333     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4334 }
4335 
4336 static void urb_hashtable_remove(struct live_urb *urb)
4337 {
4338     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4339     g_hash_table_remove(urb_hashtable, urb);
4340 }
4341 
4342 static abi_long
4343 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4344                           int fd, int cmd, abi_long arg)
4345 {
4346     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4347     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4348     struct live_urb *lurb;
4349     void *argptr;
4350     uint64_t hurb;
4351     int target_size;
4352     uintptr_t target_urb_adr;
4353     abi_long ret;
4354 
4355     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4356 
4357     memset(buf_temp, 0, sizeof(uint64_t));
4358     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4359     if (is_error(ret)) {
4360         return ret;
4361     }
4362 
4363     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4364     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4365     if (!lurb->target_urb_adr) {
4366         return -TARGET_EFAULT;
4367     }
4368     urb_hashtable_remove(lurb);
4369     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4370         lurb->host_urb.buffer_length);
4371     lurb->target_buf_ptr = NULL;
4372 
4373     /* restore the guest buffer pointer */
4374     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4375 
4376     /* update the guest urb struct */
4377     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4378     if (!argptr) {
4379         g_free(lurb);
4380         return -TARGET_EFAULT;
4381     }
4382     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4383     unlock_user(argptr, lurb->target_urb_adr, target_size);
4384 
4385     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4386     /* write back the urb handle */
4387     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4388     if (!argptr) {
4389         g_free(lurb);
4390         return -TARGET_EFAULT;
4391     }
4392 
4393     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4394     target_urb_adr = lurb->target_urb_adr;
4395     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4396     unlock_user(argptr, arg, target_size);
4397 
4398     g_free(lurb);
4399     return ret;
4400 }
4401 
4402 static abi_long
4403 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4404                              uint8_t *buf_temp __attribute__((unused)),
4405                              int fd, int cmd, abi_long arg)
4406 {
4407     struct live_urb *lurb;
4408 
4409     /* map target address back to host URB with metadata. */
4410     lurb = urb_hashtable_lookup(arg);
4411     if (!lurb) {
4412         return -TARGET_EFAULT;
4413     }
4414     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4415 }
4416 
4417 static abi_long
4418 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4419                             int fd, int cmd, abi_long arg)
4420 {
4421     const argtype *arg_type = ie->arg_type;
4422     int target_size;
4423     abi_long ret;
4424     void *argptr;
4425     int rw_dir;
4426     struct live_urb *lurb;
4427 
4428     /*
4429      * each submitted URB needs to map to a unique ID for the
4430      * kernel, and that unique ID needs to be a pointer to
4431      * host memory.  hence, we need to malloc for each URB.
4432      * isochronous transfers have a variable length struct.
4433      */
4434     arg_type++;
4435     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4436 
4437     /* construct host copy of urb and metadata */
4438     lurb = g_try_malloc0(sizeof(struct live_urb));
4439     if (!lurb) {
4440         return -TARGET_ENOMEM;
4441     }
4442 
4443     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4444     if (!argptr) {
4445         g_free(lurb);
4446         return -TARGET_EFAULT;
4447     }
4448     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4449     unlock_user(argptr, arg, 0);
4450 
4451     lurb->target_urb_adr = arg;
4452     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4453 
4454     /* buffer space used depends on endpoint type so lock the entire buffer */
4455     /* control type urbs should check the buffer contents for true direction */
4456     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4457     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4458         lurb->host_urb.buffer_length, 1);
4459     if (lurb->target_buf_ptr == NULL) {
4460         g_free(lurb);
4461         return -TARGET_EFAULT;
4462     }
4463 
4464     /* update buffer pointer in host copy */
4465     lurb->host_urb.buffer = lurb->target_buf_ptr;
4466 
4467     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4468     if (is_error(ret)) {
4469         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4470         g_free(lurb);
4471     } else {
4472         urb_hashtable_insert(lurb);
4473     }
4474 
4475     return ret;
4476 }
4477 #endif /* CONFIG_USBFS */
4478 
4479 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4480                             int cmd, abi_long arg)
4481 {
4482     void *argptr;
4483     struct dm_ioctl *host_dm;
4484     abi_long guest_data;
4485     uint32_t guest_data_size;
4486     int target_size;
4487     const argtype *arg_type = ie->arg_type;
4488     abi_long ret;
4489     void *big_buf = NULL;
4490     char *host_data;
4491 
4492     arg_type++;
4493     target_size = thunk_type_size(arg_type, 0);
4494     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4495     if (!argptr) {
4496         ret = -TARGET_EFAULT;
4497         goto out;
4498     }
4499     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4500     unlock_user(argptr, arg, 0);
4501 
4502     /* buf_temp is too small, so fetch things into a bigger buffer */
4503     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4504     memcpy(big_buf, buf_temp, target_size);
4505     buf_temp = big_buf;
4506     host_dm = big_buf;
4507 
4508     guest_data = arg + host_dm->data_start;
4509     if ((guest_data - arg) < 0) {
4510         ret = -TARGET_EINVAL;
4511         goto out;
4512     }
4513     guest_data_size = host_dm->data_size - host_dm->data_start;
4514     host_data = (char*)host_dm + host_dm->data_start;
4515 
4516     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4517     if (!argptr) {
4518         ret = -TARGET_EFAULT;
4519         goto out;
4520     }
4521 
4522     switch (ie->host_cmd) {
4523     case DM_REMOVE_ALL:
4524     case DM_LIST_DEVICES:
4525     case DM_DEV_CREATE:
4526     case DM_DEV_REMOVE:
4527     case DM_DEV_SUSPEND:
4528     case DM_DEV_STATUS:
4529     case DM_DEV_WAIT:
4530     case DM_TABLE_STATUS:
4531     case DM_TABLE_CLEAR:
4532     case DM_TABLE_DEPS:
4533     case DM_LIST_VERSIONS:
4534         /* no input data */
4535         break;
4536     case DM_DEV_RENAME:
4537     case DM_DEV_SET_GEOMETRY:
4538         /* data contains only strings */
4539         memcpy(host_data, argptr, guest_data_size);
4540         break;
4541     case DM_TARGET_MSG:
4542         memcpy(host_data, argptr, guest_data_size);
4543         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4544         break;
4545     case DM_TABLE_LOAD:
4546     {
4547         void *gspec = argptr;
4548         void *cur_data = host_data;
4549         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4550         int spec_size = thunk_type_size(arg_type, 0);
4551         int i;
4552 
4553         for (i = 0; i < host_dm->target_count; i++) {
4554             struct dm_target_spec *spec = cur_data;
4555             uint32_t next;
4556             int slen;
4557 
4558             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4559             slen = strlen((char*)gspec + spec_size) + 1;
4560             next = spec->next;
4561             spec->next = sizeof(*spec) + slen;
4562             strcpy((char*)&spec[1], gspec + spec_size);
4563             gspec += next;
4564             cur_data += spec->next;
4565         }
4566         break;
4567     }
4568     default:
4569         ret = -TARGET_EINVAL;
4570         unlock_user(argptr, guest_data, 0);
4571         goto out;
4572     }
4573     unlock_user(argptr, guest_data, 0);
4574 
4575     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4576     if (!is_error(ret)) {
4577         guest_data = arg + host_dm->data_start;
4578         guest_data_size = host_dm->data_size - host_dm->data_start;
4579         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4580         switch (ie->host_cmd) {
4581         case DM_REMOVE_ALL:
4582         case DM_DEV_CREATE:
4583         case DM_DEV_REMOVE:
4584         case DM_DEV_RENAME:
4585         case DM_DEV_SUSPEND:
4586         case DM_DEV_STATUS:
4587         case DM_TABLE_LOAD:
4588         case DM_TABLE_CLEAR:
4589         case DM_TARGET_MSG:
4590         case DM_DEV_SET_GEOMETRY:
4591             /* no return data */
4592             break;
4593         case DM_LIST_DEVICES:
4594         {
4595             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4596             uint32_t remaining_data = guest_data_size;
4597             void *cur_data = argptr;
4598             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4599             int nl_size = 12; /* can't use thunk_size due to alignment */
4600 
4601             while (1) {
4602                 uint32_t next = nl->next;
4603                 if (next) {
4604                     nl->next = nl_size + (strlen(nl->name) + 1);
4605                 }
4606                 if (remaining_data < nl->next) {
4607                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4608                     break;
4609                 }
4610                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4611                 strcpy(cur_data + nl_size, nl->name);
4612                 cur_data += nl->next;
4613                 remaining_data -= nl->next;
4614                 if (!next) {
4615                     break;
4616                 }
4617                 nl = (void*)nl + next;
4618             }
4619             break;
4620         }
4621         case DM_DEV_WAIT:
4622         case DM_TABLE_STATUS:
4623         {
4624             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4625             void *cur_data = argptr;
4626             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4627             int spec_size = thunk_type_size(arg_type, 0);
4628             int i;
4629 
4630             for (i = 0; i < host_dm->target_count; i++) {
4631                 uint32_t next = spec->next;
4632                 int slen = strlen((char*)&spec[1]) + 1;
4633                 spec->next = (cur_data - argptr) + spec_size + slen;
4634                 if (guest_data_size < spec->next) {
4635                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4636                     break;
4637                 }
4638                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4639                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4640                 cur_data = argptr + spec->next;
4641                 spec = (void*)host_dm + host_dm->data_start + next;
4642             }
4643             break;
4644         }
4645         case DM_TABLE_DEPS:
4646         {
4647             void *hdata = (void*)host_dm + host_dm->data_start;
4648             int count = *(uint32_t*)hdata;
4649             uint64_t *hdev = hdata + 8;
4650             uint64_t *gdev = argptr + 8;
4651             int i;
4652 
4653             *(uint32_t*)argptr = tswap32(count);
4654             for (i = 0; i < count; i++) {
4655                 *gdev = tswap64(*hdev);
4656                 gdev++;
4657                 hdev++;
4658             }
4659             break;
4660         }
4661         case DM_LIST_VERSIONS:
4662         {
4663             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4664             uint32_t remaining_data = guest_data_size;
4665             void *cur_data = argptr;
4666             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4667             int vers_size = thunk_type_size(arg_type, 0);
4668 
4669             while (1) {
4670                 uint32_t next = vers->next;
4671                 if (next) {
4672                     vers->next = vers_size + (strlen(vers->name) + 1);
4673                 }
4674                 if (remaining_data < vers->next) {
4675                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4676                     break;
4677                 }
4678                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4679                 strcpy(cur_data + vers_size, vers->name);
4680                 cur_data += vers->next;
4681                 remaining_data -= vers->next;
4682                 if (!next) {
4683                     break;
4684                 }
4685                 vers = (void*)vers + next;
4686             }
4687             break;
4688         }
4689         default:
4690             unlock_user(argptr, guest_data, 0);
4691             ret = -TARGET_EINVAL;
4692             goto out;
4693         }
4694         unlock_user(argptr, guest_data, guest_data_size);
4695 
4696         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4697         if (!argptr) {
4698             ret = -TARGET_EFAULT;
4699             goto out;
4700         }
4701         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4702         unlock_user(argptr, arg, target_size);
4703     }
4704 out:
4705     g_free(big_buf);
4706     return ret;
4707 }
4708 
4709 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4710                                int cmd, abi_long arg)
4711 {
4712     void *argptr;
4713     int target_size;
4714     const argtype *arg_type = ie->arg_type;
4715     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4716     abi_long ret;
4717 
4718     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4719     struct blkpg_partition host_part;
4720 
4721     /* Read and convert blkpg */
4722     arg_type++;
4723     target_size = thunk_type_size(arg_type, 0);
4724     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4725     if (!argptr) {
4726         ret = -TARGET_EFAULT;
4727         goto out;
4728     }
4729     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4730     unlock_user(argptr, arg, 0);
4731 
4732     switch (host_blkpg->op) {
4733     case BLKPG_ADD_PARTITION:
4734     case BLKPG_DEL_PARTITION:
4735         /* payload is struct blkpg_partition */
4736         break;
4737     default:
4738         /* Unknown opcode */
4739         ret = -TARGET_EINVAL;
4740         goto out;
4741     }
4742 
4743     /* Read and convert blkpg->data */
4744     arg = (abi_long)(uintptr_t)host_blkpg->data;
4745     target_size = thunk_type_size(part_arg_type, 0);
4746     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4747     if (!argptr) {
4748         ret = -TARGET_EFAULT;
4749         goto out;
4750     }
4751     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4752     unlock_user(argptr, arg, 0);
4753 
4754     /* Swizzle the data pointer to our local copy and call! */
4755     host_blkpg->data = &host_part;
4756     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4757 
4758 out:
4759     return ret;
4760 }
4761 
4762 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4763                                 int fd, int cmd, abi_long arg)
4764 {
4765     const argtype *arg_type = ie->arg_type;
4766     const StructEntry *se;
4767     const argtype *field_types;
4768     const int *dst_offsets, *src_offsets;
4769     int target_size;
4770     void *argptr;
4771     abi_ulong *target_rt_dev_ptr = NULL;
4772     unsigned long *host_rt_dev_ptr = NULL;
4773     abi_long ret;
4774     int i;
4775 
4776     assert(ie->access == IOC_W);
4777     assert(*arg_type == TYPE_PTR);
4778     arg_type++;
4779     assert(*arg_type == TYPE_STRUCT);
4780     target_size = thunk_type_size(arg_type, 0);
4781     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4782     if (!argptr) {
4783         return -TARGET_EFAULT;
4784     }
4785     arg_type++;
4786     assert(*arg_type == (int)STRUCT_rtentry);
4787     se = struct_entries + *arg_type++;
4788     assert(se->convert[0] == NULL);
4789     /* convert struct here to be able to catch rt_dev string */
4790     field_types = se->field_types;
4791     dst_offsets = se->field_offsets[THUNK_HOST];
4792     src_offsets = se->field_offsets[THUNK_TARGET];
4793     for (i = 0; i < se->nb_fields; i++) {
4794         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4795             assert(*field_types == TYPE_PTRVOID);
4796             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4797             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4798             if (*target_rt_dev_ptr != 0) {
4799                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4800                                                   tswapal(*target_rt_dev_ptr));
4801                 if (!*host_rt_dev_ptr) {
4802                     unlock_user(argptr, arg, 0);
4803                     return -TARGET_EFAULT;
4804                 }
4805             } else {
4806                 *host_rt_dev_ptr = 0;
4807             }
4808             field_types++;
4809             continue;
4810         }
4811         field_types = thunk_convert(buf_temp + dst_offsets[i],
4812                                     argptr + src_offsets[i],
4813                                     field_types, THUNK_HOST);
4814     }
4815     unlock_user(argptr, arg, 0);
4816 
4817     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4818 
4819     assert(host_rt_dev_ptr != NULL);
4820     assert(target_rt_dev_ptr != NULL);
4821     if (*host_rt_dev_ptr != 0) {
4822         unlock_user((void *)*host_rt_dev_ptr,
4823                     *target_rt_dev_ptr, 0);
4824     }
4825     return ret;
4826 }
4827 
4828 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4829                                      int fd, int cmd, abi_long arg)
4830 {
4831     int sig = target_to_host_signal(arg);
4832     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4833 }
4834 
4835 #ifdef TIOCGPTPEER
4836 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4837                                      int fd, int cmd, abi_long arg)
4838 {
4839     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4840     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4841 }
4842 #endif
4843 
4844 static IOCTLEntry ioctl_entries[] = {
4845 #define IOCTL(cmd, access, ...) \
4846     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4847 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4848     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4849 #define IOCTL_IGNORE(cmd) \
4850     { TARGET_ ## cmd, 0, #cmd },
4851 #include "ioctls.h"
4852     { 0, 0, },
4853 };
4854 
4855 /* ??? Implement proper locking for ioctls.  */
4856 /* do_ioctl() Must return target values and target errnos. */
4857 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4858 {
4859     const IOCTLEntry *ie;
4860     const argtype *arg_type;
4861     abi_long ret;
4862     uint8_t buf_temp[MAX_STRUCT_SIZE];
4863     int target_size;
4864     void *argptr;
4865 
4866     ie = ioctl_entries;
4867     for(;;) {
4868         if (ie->target_cmd == 0) {
4869             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4870             return -TARGET_ENOSYS;
4871         }
4872         if (ie->target_cmd == cmd)
4873             break;
4874         ie++;
4875     }
4876     arg_type = ie->arg_type;
4877     if (ie->do_ioctl) {
4878         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4879     } else if (!ie->host_cmd) {
4880         /* Some architectures define BSD ioctls in their headers
4881            that are not implemented in Linux.  */
4882         return -TARGET_ENOSYS;
4883     }
4884 
4885     switch(arg_type[0]) {
4886     case TYPE_NULL:
4887         /* no argument */
4888         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4889         break;
4890     case TYPE_PTRVOID:
4891     case TYPE_INT:
4892         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4893         break;
4894     case TYPE_PTR:
4895         arg_type++;
4896         target_size = thunk_type_size(arg_type, 0);
4897         switch(ie->access) {
4898         case IOC_R:
4899             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4900             if (!is_error(ret)) {
4901                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4902                 if (!argptr)
4903                     return -TARGET_EFAULT;
4904                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4905                 unlock_user(argptr, arg, target_size);
4906             }
4907             break;
4908         case IOC_W:
4909             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4910             if (!argptr)
4911                 return -TARGET_EFAULT;
4912             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4913             unlock_user(argptr, arg, 0);
4914             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4915             break;
4916         default:
4917         case IOC_RW:
4918             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4919             if (!argptr)
4920                 return -TARGET_EFAULT;
4921             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4922             unlock_user(argptr, arg, 0);
4923             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4924             if (!is_error(ret)) {
4925                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4926                 if (!argptr)
4927                     return -TARGET_EFAULT;
4928                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4929                 unlock_user(argptr, arg, target_size);
4930             }
4931             break;
4932         }
4933         break;
4934     default:
4935         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4936                  (long)cmd, arg_type[0]);
4937         ret = -TARGET_ENOSYS;
4938         break;
4939     }
4940     return ret;
4941 }
4942 
4943 static const bitmask_transtbl iflag_tbl[] = {
4944         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4945         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4946         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4947         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4948         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4949         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4950         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4951         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4952         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4953         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4954         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4955         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4956         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4957         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4958         { 0, 0, 0, 0 }
4959 };
4960 
4961 static const bitmask_transtbl oflag_tbl[] = {
4962 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4963 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4964 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4965 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4966 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4967 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4968 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4969 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4970 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4971 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4972 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4973 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4974 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4975 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4976 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4977 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4978 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4979 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4980 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4981 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4982 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4983 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4984 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4985 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4986 	{ 0, 0, 0, 0 }
4987 };
4988 
4989 static const bitmask_transtbl cflag_tbl[] = {
4990 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4991 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4992 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4993 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4994 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4995 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4996 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4997 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4998 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4999 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5000 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5001 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5002 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5003 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5004 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5005 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5006 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5007 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5008 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5009 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5010 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5011 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5012 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5013 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5014 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5015 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5016 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5017 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5018 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5019 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5020 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5021 	{ 0, 0, 0, 0 }
5022 };
5023 
5024 static const bitmask_transtbl lflag_tbl[] = {
5025 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5026 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5027 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5028 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5029 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5030 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5031 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5032 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5033 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5034 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5035 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5036 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5037 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5038 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5039 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5040 	{ 0, 0, 0, 0 }
5041 };
5042 
5043 static void target_to_host_termios (void *dst, const void *src)
5044 {
5045     struct host_termios *host = dst;
5046     const struct target_termios *target = src;
5047 
5048     host->c_iflag =
5049         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5050     host->c_oflag =
5051         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5052     host->c_cflag =
5053         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5054     host->c_lflag =
5055         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5056     host->c_line = target->c_line;
5057 
5058     memset(host->c_cc, 0, sizeof(host->c_cc));
5059     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5060     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5061     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5062     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5063     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5064     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5065     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5066     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5067     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5068     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5069     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5070     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5071     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5072     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5073     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5074     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5075     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5076 }
5077 
5078 static void host_to_target_termios (void *dst, const void *src)
5079 {
5080     struct target_termios *target = dst;
5081     const struct host_termios *host = src;
5082 
5083     target->c_iflag =
5084         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5085     target->c_oflag =
5086         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5087     target->c_cflag =
5088         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5089     target->c_lflag =
5090         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5091     target->c_line = host->c_line;
5092 
5093     memset(target->c_cc, 0, sizeof(target->c_cc));
5094     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5095     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5096     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5097     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5098     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5099     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5100     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5101     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5102     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5103     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5104     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5105     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5106     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5107     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5108     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5109     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5110     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5111 }
5112 
5113 static const StructEntry struct_termios_def = {
5114     .convert = { host_to_target_termios, target_to_host_termios },
5115     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5116     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5117 };
5118 
5119 static bitmask_transtbl mmap_flags_tbl[] = {
5120     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5121     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5122     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5123     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5124       MAP_ANONYMOUS, MAP_ANONYMOUS },
5125     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5126       MAP_GROWSDOWN, MAP_GROWSDOWN },
5127     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5128       MAP_DENYWRITE, MAP_DENYWRITE },
5129     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5130       MAP_EXECUTABLE, MAP_EXECUTABLE },
5131     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5132     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5133       MAP_NORESERVE, MAP_NORESERVE },
5134     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5135     /* MAP_STACK had been ignored by the kernel for quite some time.
5136        Recognize it for the target insofar as we do not want to pass
5137        it through to the host.  */
5138     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5139     { 0, 0, 0, 0 }
5140 };
5141 
5142 #if defined(TARGET_I386)
5143 
5144 /* NOTE: there is really one LDT for all the threads */
5145 static uint8_t *ldt_table;
5146 
5147 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5148 {
5149     int size;
5150     void *p;
5151 
5152     if (!ldt_table)
5153         return 0;
5154     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5155     if (size > bytecount)
5156         size = bytecount;
5157     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5158     if (!p)
5159         return -TARGET_EFAULT;
5160     /* ??? Should this by byteswapped?  */
5161     memcpy(p, ldt_table, size);
5162     unlock_user(p, ptr, size);
5163     return size;
5164 }
5165 
5166 /* XXX: add locking support */
5167 static abi_long write_ldt(CPUX86State *env,
5168                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5169 {
5170     struct target_modify_ldt_ldt_s ldt_info;
5171     struct target_modify_ldt_ldt_s *target_ldt_info;
5172     int seg_32bit, contents, read_exec_only, limit_in_pages;
5173     int seg_not_present, useable, lm;
5174     uint32_t *lp, entry_1, entry_2;
5175 
5176     if (bytecount != sizeof(ldt_info))
5177         return -TARGET_EINVAL;
5178     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5179         return -TARGET_EFAULT;
5180     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5181     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5182     ldt_info.limit = tswap32(target_ldt_info->limit);
5183     ldt_info.flags = tswap32(target_ldt_info->flags);
5184     unlock_user_struct(target_ldt_info, ptr, 0);
5185 
5186     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5187         return -TARGET_EINVAL;
5188     seg_32bit = ldt_info.flags & 1;
5189     contents = (ldt_info.flags >> 1) & 3;
5190     read_exec_only = (ldt_info.flags >> 3) & 1;
5191     limit_in_pages = (ldt_info.flags >> 4) & 1;
5192     seg_not_present = (ldt_info.flags >> 5) & 1;
5193     useable = (ldt_info.flags >> 6) & 1;
5194 #ifdef TARGET_ABI32
5195     lm = 0;
5196 #else
5197     lm = (ldt_info.flags >> 7) & 1;
5198 #endif
5199     if (contents == 3) {
5200         if (oldmode)
5201             return -TARGET_EINVAL;
5202         if (seg_not_present == 0)
5203             return -TARGET_EINVAL;
5204     }
5205     /* allocate the LDT */
5206     if (!ldt_table) {
5207         env->ldt.base = target_mmap(0,
5208                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5209                                     PROT_READ|PROT_WRITE,
5210                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5211         if (env->ldt.base == -1)
5212             return -TARGET_ENOMEM;
5213         memset(g2h(env->ldt.base), 0,
5214                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5215         env->ldt.limit = 0xffff;
5216         ldt_table = g2h(env->ldt.base);
5217     }
5218 
5219     /* NOTE: same code as Linux kernel */
5220     /* Allow LDTs to be cleared by the user. */
5221     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5222         if (oldmode ||
5223             (contents == 0		&&
5224              read_exec_only == 1	&&
5225              seg_32bit == 0		&&
5226              limit_in_pages == 0	&&
5227              seg_not_present == 1	&&
5228              useable == 0 )) {
5229             entry_1 = 0;
5230             entry_2 = 0;
5231             goto install;
5232         }
5233     }
5234 
5235     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5236         (ldt_info.limit & 0x0ffff);
5237     entry_2 = (ldt_info.base_addr & 0xff000000) |
5238         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5239         (ldt_info.limit & 0xf0000) |
5240         ((read_exec_only ^ 1) << 9) |
5241         (contents << 10) |
5242         ((seg_not_present ^ 1) << 15) |
5243         (seg_32bit << 22) |
5244         (limit_in_pages << 23) |
5245         (lm << 21) |
5246         0x7000;
5247     if (!oldmode)
5248         entry_2 |= (useable << 20);
5249 
5250     /* Install the new entry ...  */
5251 install:
5252     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5253     lp[0] = tswap32(entry_1);
5254     lp[1] = tswap32(entry_2);
5255     return 0;
5256 }
5257 
5258 /* specific and weird i386 syscalls */
5259 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5260                               unsigned long bytecount)
5261 {
5262     abi_long ret;
5263 
5264     switch (func) {
5265     case 0:
5266         ret = read_ldt(ptr, bytecount);
5267         break;
5268     case 1:
5269         ret = write_ldt(env, ptr, bytecount, 1);
5270         break;
5271     case 0x11:
5272         ret = write_ldt(env, ptr, bytecount, 0);
5273         break;
5274     default:
5275         ret = -TARGET_ENOSYS;
5276         break;
5277     }
5278     return ret;
5279 }
5280 
5281 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5282 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5283 {
5284     uint64_t *gdt_table = g2h(env->gdt.base);
5285     struct target_modify_ldt_ldt_s ldt_info;
5286     struct target_modify_ldt_ldt_s *target_ldt_info;
5287     int seg_32bit, contents, read_exec_only, limit_in_pages;
5288     int seg_not_present, useable, lm;
5289     uint32_t *lp, entry_1, entry_2;
5290     int i;
5291 
5292     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5293     if (!target_ldt_info)
5294         return -TARGET_EFAULT;
5295     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5296     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5297     ldt_info.limit = tswap32(target_ldt_info->limit);
5298     ldt_info.flags = tswap32(target_ldt_info->flags);
5299     if (ldt_info.entry_number == -1) {
5300         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5301             if (gdt_table[i] == 0) {
5302                 ldt_info.entry_number = i;
5303                 target_ldt_info->entry_number = tswap32(i);
5304                 break;
5305             }
5306         }
5307     }
5308     unlock_user_struct(target_ldt_info, ptr, 1);
5309 
5310     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5311         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5312            return -TARGET_EINVAL;
5313     seg_32bit = ldt_info.flags & 1;
5314     contents = (ldt_info.flags >> 1) & 3;
5315     read_exec_only = (ldt_info.flags >> 3) & 1;
5316     limit_in_pages = (ldt_info.flags >> 4) & 1;
5317     seg_not_present = (ldt_info.flags >> 5) & 1;
5318     useable = (ldt_info.flags >> 6) & 1;
5319 #ifdef TARGET_ABI32
5320     lm = 0;
5321 #else
5322     lm = (ldt_info.flags >> 7) & 1;
5323 #endif
5324 
5325     if (contents == 3) {
5326         if (seg_not_present == 0)
5327             return -TARGET_EINVAL;
5328     }
5329 
5330     /* NOTE: same code as Linux kernel */
5331     /* Allow LDTs to be cleared by the user. */
5332     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5333         if ((contents == 0             &&
5334              read_exec_only == 1       &&
5335              seg_32bit == 0            &&
5336              limit_in_pages == 0       &&
5337              seg_not_present == 1      &&
5338              useable == 0 )) {
5339             entry_1 = 0;
5340             entry_2 = 0;
5341             goto install;
5342         }
5343     }
5344 
5345     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5346         (ldt_info.limit & 0x0ffff);
5347     entry_2 = (ldt_info.base_addr & 0xff000000) |
5348         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5349         (ldt_info.limit & 0xf0000) |
5350         ((read_exec_only ^ 1) << 9) |
5351         (contents << 10) |
5352         ((seg_not_present ^ 1) << 15) |
5353         (seg_32bit << 22) |
5354         (limit_in_pages << 23) |
5355         (useable << 20) |
5356         (lm << 21) |
5357         0x7000;
5358 
5359     /* Install the new entry ...  */
5360 install:
5361     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5362     lp[0] = tswap32(entry_1);
5363     lp[1] = tswap32(entry_2);
5364     return 0;
5365 }
5366 
5367 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5368 {
5369     struct target_modify_ldt_ldt_s *target_ldt_info;
5370     uint64_t *gdt_table = g2h(env->gdt.base);
5371     uint32_t base_addr, limit, flags;
5372     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5373     int seg_not_present, useable, lm;
5374     uint32_t *lp, entry_1, entry_2;
5375 
5376     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5377     if (!target_ldt_info)
5378         return -TARGET_EFAULT;
5379     idx = tswap32(target_ldt_info->entry_number);
5380     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5381         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5382         unlock_user_struct(target_ldt_info, ptr, 1);
5383         return -TARGET_EINVAL;
5384     }
5385     lp = (uint32_t *)(gdt_table + idx);
5386     entry_1 = tswap32(lp[0]);
5387     entry_2 = tswap32(lp[1]);
5388 
5389     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5390     contents = (entry_2 >> 10) & 3;
5391     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5392     seg_32bit = (entry_2 >> 22) & 1;
5393     limit_in_pages = (entry_2 >> 23) & 1;
5394     useable = (entry_2 >> 20) & 1;
5395 #ifdef TARGET_ABI32
5396     lm = 0;
5397 #else
5398     lm = (entry_2 >> 21) & 1;
5399 #endif
5400     flags = (seg_32bit << 0) | (contents << 1) |
5401         (read_exec_only << 3) | (limit_in_pages << 4) |
5402         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5403     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5404     base_addr = (entry_1 >> 16) |
5405         (entry_2 & 0xff000000) |
5406         ((entry_2 & 0xff) << 16);
5407     target_ldt_info->base_addr = tswapal(base_addr);
5408     target_ldt_info->limit = tswap32(limit);
5409     target_ldt_info->flags = tswap32(flags);
5410     unlock_user_struct(target_ldt_info, ptr, 1);
5411     return 0;
5412 }
5413 #endif /* TARGET_I386 && TARGET_ABI32 */
5414 
5415 #ifndef TARGET_ABI32
5416 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5417 {
5418     abi_long ret = 0;
5419     abi_ulong val;
5420     int idx;
5421 
5422     switch(code) {
5423     case TARGET_ARCH_SET_GS:
5424     case TARGET_ARCH_SET_FS:
5425         if (code == TARGET_ARCH_SET_GS)
5426             idx = R_GS;
5427         else
5428             idx = R_FS;
5429         cpu_x86_load_seg(env, idx, 0);
5430         env->segs[idx].base = addr;
5431         break;
5432     case TARGET_ARCH_GET_GS:
5433     case TARGET_ARCH_GET_FS:
5434         if (code == TARGET_ARCH_GET_GS)
5435             idx = R_GS;
5436         else
5437             idx = R_FS;
5438         val = env->segs[idx].base;
5439         if (put_user(val, addr, abi_ulong))
5440             ret = -TARGET_EFAULT;
5441         break;
5442     default:
5443         ret = -TARGET_EINVAL;
5444         break;
5445     }
5446     return ret;
5447 }
5448 #endif
5449 
5450 #endif /* defined(TARGET_I386) */
5451 
5452 #define NEW_STACK_SIZE 0x40000
5453 
5454 
5455 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5456 typedef struct {
5457     CPUArchState *env;
5458     pthread_mutex_t mutex;
5459     pthread_cond_t cond;
5460     pthread_t thread;
5461     uint32_t tid;
5462     abi_ulong child_tidptr;
5463     abi_ulong parent_tidptr;
5464     sigset_t sigmask;
5465 } new_thread_info;
5466 
5467 static void *clone_func(void *arg)
5468 {
5469     new_thread_info *info = arg;
5470     CPUArchState *env;
5471     CPUState *cpu;
5472     TaskState *ts;
5473 
5474     rcu_register_thread();
5475     tcg_register_thread();
5476     env = info->env;
5477     cpu = ENV_GET_CPU(env);
5478     thread_cpu = cpu;
5479     ts = (TaskState *)cpu->opaque;
5480     info->tid = sys_gettid();
5481     task_settid(ts);
5482     if (info->child_tidptr)
5483         put_user_u32(info->tid, info->child_tidptr);
5484     if (info->parent_tidptr)
5485         put_user_u32(info->tid, info->parent_tidptr);
5486     /* Enable signals.  */
5487     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5488     /* Signal to the parent that we're ready.  */
5489     pthread_mutex_lock(&info->mutex);
5490     pthread_cond_broadcast(&info->cond);
5491     pthread_mutex_unlock(&info->mutex);
5492     /* Wait until the parent has finished initializing the tls state.  */
5493     pthread_mutex_lock(&clone_lock);
5494     pthread_mutex_unlock(&clone_lock);
5495     cpu_loop(env);
5496     /* never exits */
5497     return NULL;
5498 }
5499 
5500 /* do_fork() Must return host values and target errnos (unlike most
5501    do_*() functions). */
5502 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5503                    abi_ulong parent_tidptr, target_ulong newtls,
5504                    abi_ulong child_tidptr)
5505 {
5506     CPUState *cpu = ENV_GET_CPU(env);
5507     int ret;
5508     TaskState *ts;
5509     CPUState *new_cpu;
5510     CPUArchState *new_env;
5511     sigset_t sigmask;
5512 
5513     flags &= ~CLONE_IGNORED_FLAGS;
5514 
5515     /* Emulate vfork() with fork() */
5516     if (flags & CLONE_VFORK)
5517         flags &= ~(CLONE_VFORK | CLONE_VM);
5518 
5519     if (flags & CLONE_VM) {
5520         TaskState *parent_ts = (TaskState *)cpu->opaque;
5521         new_thread_info info;
5522         pthread_attr_t attr;
5523 
5524         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5525             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5526             return -TARGET_EINVAL;
5527         }
5528 
5529         ts = g_new0(TaskState, 1);
5530         init_task_state(ts);
5531 
5532         /* Grab a mutex so that thread setup appears atomic.  */
5533         pthread_mutex_lock(&clone_lock);
5534 
5535         /* we create a new CPU instance. */
5536         new_env = cpu_copy(env);
5537         /* Init regs that differ from the parent.  */
5538         cpu_clone_regs(new_env, newsp);
5539         new_cpu = ENV_GET_CPU(new_env);
5540         new_cpu->opaque = ts;
5541         ts->bprm = parent_ts->bprm;
5542         ts->info = parent_ts->info;
5543         ts->signal_mask = parent_ts->signal_mask;
5544 
5545         if (flags & CLONE_CHILD_CLEARTID) {
5546             ts->child_tidptr = child_tidptr;
5547         }
5548 
5549         if (flags & CLONE_SETTLS) {
5550             cpu_set_tls (new_env, newtls);
5551         }
5552 
5553         memset(&info, 0, sizeof(info));
5554         pthread_mutex_init(&info.mutex, NULL);
5555         pthread_mutex_lock(&info.mutex);
5556         pthread_cond_init(&info.cond, NULL);
5557         info.env = new_env;
5558         if (flags & CLONE_CHILD_SETTID) {
5559             info.child_tidptr = child_tidptr;
5560         }
5561         if (flags & CLONE_PARENT_SETTID) {
5562             info.parent_tidptr = parent_tidptr;
5563         }
5564 
5565         ret = pthread_attr_init(&attr);
5566         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5567         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5568         /* It is not safe to deliver signals until the child has finished
5569            initializing, so temporarily block all signals.  */
5570         sigfillset(&sigmask);
5571         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5572 
5573         /* If this is our first additional thread, we need to ensure we
5574          * generate code for parallel execution and flush old translations.
5575          */
5576         if (!parallel_cpus) {
5577             parallel_cpus = true;
5578             tb_flush(cpu);
5579         }
5580 
5581         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5582         /* TODO: Free new CPU state if thread creation failed.  */
5583 
5584         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5585         pthread_attr_destroy(&attr);
5586         if (ret == 0) {
5587             /* Wait for the child to initialize.  */
5588             pthread_cond_wait(&info.cond, &info.mutex);
5589             ret = info.tid;
5590         } else {
5591             ret = -1;
5592         }
5593         pthread_mutex_unlock(&info.mutex);
5594         pthread_cond_destroy(&info.cond);
5595         pthread_mutex_destroy(&info.mutex);
5596         pthread_mutex_unlock(&clone_lock);
5597     } else {
5598         /* if no CLONE_VM, we consider it is a fork */
5599         if (flags & CLONE_INVALID_FORK_FLAGS) {
5600             return -TARGET_EINVAL;
5601         }
5602 
5603         /* We can't support custom termination signals */
5604         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5605             return -TARGET_EINVAL;
5606         }
5607 
5608         if (block_signals()) {
5609             return -TARGET_ERESTARTSYS;
5610         }
5611 
5612         fork_start();
5613         ret = fork();
5614         if (ret == 0) {
5615             /* Child Process.  */
5616             cpu_clone_regs(env, newsp);
5617             fork_end(1);
5618             /* There is a race condition here.  The parent process could
5619                theoretically read the TID in the child process before the child
5620                tid is set.  This would require using either ptrace
5621                (not implemented) or having *_tidptr to point at a shared memory
5622                mapping.  We can't repeat the spinlock hack used above because
5623                the child process gets its own copy of the lock.  */
5624             if (flags & CLONE_CHILD_SETTID)
5625                 put_user_u32(sys_gettid(), child_tidptr);
5626             if (flags & CLONE_PARENT_SETTID)
5627                 put_user_u32(sys_gettid(), parent_tidptr);
5628             ts = (TaskState *)cpu->opaque;
5629             if (flags & CLONE_SETTLS)
5630                 cpu_set_tls (env, newtls);
5631             if (flags & CLONE_CHILD_CLEARTID)
5632                 ts->child_tidptr = child_tidptr;
5633         } else {
5634             fork_end(0);
5635         }
5636     }
5637     return ret;
5638 }
5639 
5640 /* warning : doesn't handle linux specific flags... */
5641 static int target_to_host_fcntl_cmd(int cmd)
5642 {
5643     int ret;
5644 
5645     switch(cmd) {
5646     case TARGET_F_DUPFD:
5647     case TARGET_F_GETFD:
5648     case TARGET_F_SETFD:
5649     case TARGET_F_GETFL:
5650     case TARGET_F_SETFL:
5651         ret = cmd;
5652         break;
5653     case TARGET_F_GETLK:
5654         ret = F_GETLK64;
5655         break;
5656     case TARGET_F_SETLK:
5657         ret = F_SETLK64;
5658         break;
5659     case TARGET_F_SETLKW:
5660         ret = F_SETLKW64;
5661         break;
5662     case TARGET_F_GETOWN:
5663         ret = F_GETOWN;
5664         break;
5665     case TARGET_F_SETOWN:
5666         ret = F_SETOWN;
5667         break;
5668     case TARGET_F_GETSIG:
5669         ret = F_GETSIG;
5670         break;
5671     case TARGET_F_SETSIG:
5672         ret = F_SETSIG;
5673         break;
5674 #if TARGET_ABI_BITS == 32
5675     case TARGET_F_GETLK64:
5676         ret = F_GETLK64;
5677         break;
5678     case TARGET_F_SETLK64:
5679         ret = F_SETLK64;
5680         break;
5681     case TARGET_F_SETLKW64:
5682         ret = F_SETLKW64;
5683         break;
5684 #endif
5685     case TARGET_F_SETLEASE:
5686         ret = F_SETLEASE;
5687         break;
5688     case TARGET_F_GETLEASE:
5689         ret = F_GETLEASE;
5690         break;
5691 #ifdef F_DUPFD_CLOEXEC
5692     case TARGET_F_DUPFD_CLOEXEC:
5693         ret = F_DUPFD_CLOEXEC;
5694         break;
5695 #endif
5696     case TARGET_F_NOTIFY:
5697         ret = F_NOTIFY;
5698         break;
5699 #ifdef F_GETOWN_EX
5700     case TARGET_F_GETOWN_EX:
5701         ret = F_GETOWN_EX;
5702         break;
5703 #endif
5704 #ifdef F_SETOWN_EX
5705     case TARGET_F_SETOWN_EX:
5706         ret = F_SETOWN_EX;
5707         break;
5708 #endif
5709 #ifdef F_SETPIPE_SZ
5710     case TARGET_F_SETPIPE_SZ:
5711         ret = F_SETPIPE_SZ;
5712         break;
5713     case TARGET_F_GETPIPE_SZ:
5714         ret = F_GETPIPE_SZ;
5715         break;
5716 #endif
5717     default:
5718         ret = -TARGET_EINVAL;
5719         break;
5720     }
5721 
5722 #if defined(__powerpc64__)
5723     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5724      * is not supported by kernel. The glibc fcntl call actually adjusts
5725      * them to 5, 6 and 7 before making the syscall(). Since we make the
5726      * syscall directly, adjust to what is supported by the kernel.
5727      */
5728     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5729         ret -= F_GETLK64 - 5;
5730     }
5731 #endif
5732 
5733     return ret;
5734 }
5735 
5736 #define FLOCK_TRANSTBL \
5737     switch (type) { \
5738     TRANSTBL_CONVERT(F_RDLCK); \
5739     TRANSTBL_CONVERT(F_WRLCK); \
5740     TRANSTBL_CONVERT(F_UNLCK); \
5741     TRANSTBL_CONVERT(F_EXLCK); \
5742     TRANSTBL_CONVERT(F_SHLCK); \
5743     }
5744 
5745 static int target_to_host_flock(int type)
5746 {
5747 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5748     FLOCK_TRANSTBL
5749 #undef  TRANSTBL_CONVERT
5750     return -TARGET_EINVAL;
5751 }
5752 
5753 static int host_to_target_flock(int type)
5754 {
5755 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5756     FLOCK_TRANSTBL
5757 #undef  TRANSTBL_CONVERT
5758     /* if we don't know how to convert the value coming
5759      * from the host we copy to the target field as-is
5760      */
5761     return type;
5762 }
5763 
5764 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5765                                             abi_ulong target_flock_addr)
5766 {
5767     struct target_flock *target_fl;
5768     int l_type;
5769 
5770     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5771         return -TARGET_EFAULT;
5772     }
5773 
5774     __get_user(l_type, &target_fl->l_type);
5775     l_type = target_to_host_flock(l_type);
5776     if (l_type < 0) {
5777         return l_type;
5778     }
5779     fl->l_type = l_type;
5780     __get_user(fl->l_whence, &target_fl->l_whence);
5781     __get_user(fl->l_start, &target_fl->l_start);
5782     __get_user(fl->l_len, &target_fl->l_len);
5783     __get_user(fl->l_pid, &target_fl->l_pid);
5784     unlock_user_struct(target_fl, target_flock_addr, 0);
5785     return 0;
5786 }
5787 
5788 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5789                                           const struct flock64 *fl)
5790 {
5791     struct target_flock *target_fl;
5792     short l_type;
5793 
5794     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5795         return -TARGET_EFAULT;
5796     }
5797 
5798     l_type = host_to_target_flock(fl->l_type);
5799     __put_user(l_type, &target_fl->l_type);
5800     __put_user(fl->l_whence, &target_fl->l_whence);
5801     __put_user(fl->l_start, &target_fl->l_start);
5802     __put_user(fl->l_len, &target_fl->l_len);
5803     __put_user(fl->l_pid, &target_fl->l_pid);
5804     unlock_user_struct(target_fl, target_flock_addr, 1);
5805     return 0;
5806 }
5807 
5808 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5809 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5810 
5811 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5812 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5813                                                    abi_ulong target_flock_addr)
5814 {
5815     struct target_oabi_flock64 *target_fl;
5816     int l_type;
5817 
5818     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5819         return -TARGET_EFAULT;
5820     }
5821 
5822     __get_user(l_type, &target_fl->l_type);
5823     l_type = target_to_host_flock(l_type);
5824     if (l_type < 0) {
5825         return l_type;
5826     }
5827     fl->l_type = l_type;
5828     __get_user(fl->l_whence, &target_fl->l_whence);
5829     __get_user(fl->l_start, &target_fl->l_start);
5830     __get_user(fl->l_len, &target_fl->l_len);
5831     __get_user(fl->l_pid, &target_fl->l_pid);
5832     unlock_user_struct(target_fl, target_flock_addr, 0);
5833     return 0;
5834 }
5835 
5836 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5837                                                  const struct flock64 *fl)
5838 {
5839     struct target_oabi_flock64 *target_fl;
5840     short l_type;
5841 
5842     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5843         return -TARGET_EFAULT;
5844     }
5845 
5846     l_type = host_to_target_flock(fl->l_type);
5847     __put_user(l_type, &target_fl->l_type);
5848     __put_user(fl->l_whence, &target_fl->l_whence);
5849     __put_user(fl->l_start, &target_fl->l_start);
5850     __put_user(fl->l_len, &target_fl->l_len);
5851     __put_user(fl->l_pid, &target_fl->l_pid);
5852     unlock_user_struct(target_fl, target_flock_addr, 1);
5853     return 0;
5854 }
5855 #endif
5856 
5857 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5858                                               abi_ulong target_flock_addr)
5859 {
5860     struct target_flock64 *target_fl;
5861     int l_type;
5862 
5863     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5864         return -TARGET_EFAULT;
5865     }
5866 
5867     __get_user(l_type, &target_fl->l_type);
5868     l_type = target_to_host_flock(l_type);
5869     if (l_type < 0) {
5870         return l_type;
5871     }
5872     fl->l_type = l_type;
5873     __get_user(fl->l_whence, &target_fl->l_whence);
5874     __get_user(fl->l_start, &target_fl->l_start);
5875     __get_user(fl->l_len, &target_fl->l_len);
5876     __get_user(fl->l_pid, &target_fl->l_pid);
5877     unlock_user_struct(target_fl, target_flock_addr, 0);
5878     return 0;
5879 }
5880 
5881 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5882                                             const struct flock64 *fl)
5883 {
5884     struct target_flock64 *target_fl;
5885     short l_type;
5886 
5887     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5888         return -TARGET_EFAULT;
5889     }
5890 
5891     l_type = host_to_target_flock(fl->l_type);
5892     __put_user(l_type, &target_fl->l_type);
5893     __put_user(fl->l_whence, &target_fl->l_whence);
5894     __put_user(fl->l_start, &target_fl->l_start);
5895     __put_user(fl->l_len, &target_fl->l_len);
5896     __put_user(fl->l_pid, &target_fl->l_pid);
5897     unlock_user_struct(target_fl, target_flock_addr, 1);
5898     return 0;
5899 }
5900 
5901 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5902 {
5903     struct flock64 fl64;
5904 #ifdef F_GETOWN_EX
5905     struct f_owner_ex fox;
5906     struct target_f_owner_ex *target_fox;
5907 #endif
5908     abi_long ret;
5909     int host_cmd = target_to_host_fcntl_cmd(cmd);
5910 
5911     if (host_cmd == -TARGET_EINVAL)
5912 	    return host_cmd;
5913 
5914     switch(cmd) {
5915     case TARGET_F_GETLK:
5916         ret = copy_from_user_flock(&fl64, arg);
5917         if (ret) {
5918             return ret;
5919         }
5920         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5921         if (ret == 0) {
5922             ret = copy_to_user_flock(arg, &fl64);
5923         }
5924         break;
5925 
5926     case TARGET_F_SETLK:
5927     case TARGET_F_SETLKW:
5928         ret = copy_from_user_flock(&fl64, arg);
5929         if (ret) {
5930             return ret;
5931         }
5932         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5933         break;
5934 
5935     case TARGET_F_GETLK64:
5936         ret = copy_from_user_flock64(&fl64, arg);
5937         if (ret) {
5938             return ret;
5939         }
5940         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5941         if (ret == 0) {
5942             ret = copy_to_user_flock64(arg, &fl64);
5943         }
5944         break;
5945     case TARGET_F_SETLK64:
5946     case TARGET_F_SETLKW64:
5947         ret = copy_from_user_flock64(&fl64, arg);
5948         if (ret) {
5949             return ret;
5950         }
5951         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5952         break;
5953 
5954     case TARGET_F_GETFL:
5955         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5956         if (ret >= 0) {
5957             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5958         }
5959         break;
5960 
5961     case TARGET_F_SETFL:
5962         ret = get_errno(safe_fcntl(fd, host_cmd,
5963                                    target_to_host_bitmask(arg,
5964                                                           fcntl_flags_tbl)));
5965         break;
5966 
5967 #ifdef F_GETOWN_EX
5968     case TARGET_F_GETOWN_EX:
5969         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5970         if (ret >= 0) {
5971             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5972                 return -TARGET_EFAULT;
5973             target_fox->type = tswap32(fox.type);
5974             target_fox->pid = tswap32(fox.pid);
5975             unlock_user_struct(target_fox, arg, 1);
5976         }
5977         break;
5978 #endif
5979 
5980 #ifdef F_SETOWN_EX
5981     case TARGET_F_SETOWN_EX:
5982         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5983             return -TARGET_EFAULT;
5984         fox.type = tswap32(target_fox->type);
5985         fox.pid = tswap32(target_fox->pid);
5986         unlock_user_struct(target_fox, arg, 0);
5987         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5988         break;
5989 #endif
5990 
5991     case TARGET_F_SETOWN:
5992     case TARGET_F_GETOWN:
5993     case TARGET_F_SETSIG:
5994     case TARGET_F_GETSIG:
5995     case TARGET_F_SETLEASE:
5996     case TARGET_F_GETLEASE:
5997     case TARGET_F_SETPIPE_SZ:
5998     case TARGET_F_GETPIPE_SZ:
5999         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6000         break;
6001 
6002     default:
6003         ret = get_errno(safe_fcntl(fd, cmd, arg));
6004         break;
6005     }
6006     return ret;
6007 }
6008 
6009 #ifdef USE_UID16
6010 
6011 static inline int high2lowuid(int uid)
6012 {
6013     if (uid > 65535)
6014         return 65534;
6015     else
6016         return uid;
6017 }
6018 
6019 static inline int high2lowgid(int gid)
6020 {
6021     if (gid > 65535)
6022         return 65534;
6023     else
6024         return gid;
6025 }
6026 
6027 static inline int low2highuid(int uid)
6028 {
6029     if ((int16_t)uid == -1)
6030         return -1;
6031     else
6032         return uid;
6033 }
6034 
6035 static inline int low2highgid(int gid)
6036 {
6037     if ((int16_t)gid == -1)
6038         return -1;
6039     else
6040         return gid;
6041 }
6042 static inline int tswapid(int id)
6043 {
6044     return tswap16(id);
6045 }
6046 
6047 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6048 
6049 #else /* !USE_UID16 */
6050 static inline int high2lowuid(int uid)
6051 {
6052     return uid;
6053 }
6054 static inline int high2lowgid(int gid)
6055 {
6056     return gid;
6057 }
6058 static inline int low2highuid(int uid)
6059 {
6060     return uid;
6061 }
6062 static inline int low2highgid(int gid)
6063 {
6064     return gid;
6065 }
6066 static inline int tswapid(int id)
6067 {
6068     return tswap32(id);
6069 }
6070 
6071 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6072 
6073 #endif /* USE_UID16 */
6074 
6075 /* We must do direct syscalls for setting UID/GID, because we want to
6076  * implement the Linux system call semantics of "change only for this thread",
6077  * not the libc/POSIX semantics of "change for all threads in process".
6078  * (See http://ewontfix.com/17/ for more details.)
6079  * We use the 32-bit version of the syscalls if present; if it is not
6080  * then either the host architecture supports 32-bit UIDs natively with
6081  * the standard syscall, or the 16-bit UID is the best we can do.
6082  */
6083 #ifdef __NR_setuid32
6084 #define __NR_sys_setuid __NR_setuid32
6085 #else
6086 #define __NR_sys_setuid __NR_setuid
6087 #endif
6088 #ifdef __NR_setgid32
6089 #define __NR_sys_setgid __NR_setgid32
6090 #else
6091 #define __NR_sys_setgid __NR_setgid
6092 #endif
6093 #ifdef __NR_setresuid32
6094 #define __NR_sys_setresuid __NR_setresuid32
6095 #else
6096 #define __NR_sys_setresuid __NR_setresuid
6097 #endif
6098 #ifdef __NR_setresgid32
6099 #define __NR_sys_setresgid __NR_setresgid32
6100 #else
6101 #define __NR_sys_setresgid __NR_setresgid
6102 #endif
6103 
6104 _syscall1(int, sys_setuid, uid_t, uid)
6105 _syscall1(int, sys_setgid, gid_t, gid)
6106 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6107 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6108 
6109 void syscall_init(void)
6110 {
6111     IOCTLEntry *ie;
6112     const argtype *arg_type;
6113     int size;
6114     int i;
6115 
6116     thunk_init(STRUCT_MAX);
6117 
6118 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6119 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6120 #include "syscall_types.h"
6121 #undef STRUCT
6122 #undef STRUCT_SPECIAL
6123 
6124     /* Build target_to_host_errno_table[] table from
6125      * host_to_target_errno_table[]. */
6126     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6127         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6128     }
6129 
6130     /* we patch the ioctl size if necessary. We rely on the fact that
6131        no ioctl has all the bits at '1' in the size field */
6132     ie = ioctl_entries;
6133     while (ie->target_cmd != 0) {
6134         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6135             TARGET_IOC_SIZEMASK) {
6136             arg_type = ie->arg_type;
6137             if (arg_type[0] != TYPE_PTR) {
6138                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6139                         ie->target_cmd);
6140                 exit(1);
6141             }
6142             arg_type++;
6143             size = thunk_type_size(arg_type, 0);
6144             ie->target_cmd = (ie->target_cmd &
6145                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6146                 (size << TARGET_IOC_SIZESHIFT);
6147         }
6148 
6149         /* automatic consistency check if same arch */
6150 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6151     (defined(__x86_64__) && defined(TARGET_X86_64))
6152         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6153             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6154                     ie->name, ie->target_cmd, ie->host_cmd);
6155         }
6156 #endif
6157         ie++;
6158     }
6159 }
6160 
6161 #if TARGET_ABI_BITS == 32
6162 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6163 {
6164 #ifdef TARGET_WORDS_BIGENDIAN
6165     return ((uint64_t)word0 << 32) | word1;
6166 #else
6167     return ((uint64_t)word1 << 32) | word0;
6168 #endif
6169 }
6170 #else /* TARGET_ABI_BITS == 32 */
6171 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6172 {
6173     return word0;
6174 }
6175 #endif /* TARGET_ABI_BITS != 32 */
6176 
6177 #ifdef TARGET_NR_truncate64
6178 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6179                                          abi_long arg2,
6180                                          abi_long arg3,
6181                                          abi_long arg4)
6182 {
6183     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6184         arg2 = arg3;
6185         arg3 = arg4;
6186     }
6187     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6188 }
6189 #endif
6190 
6191 #ifdef TARGET_NR_ftruncate64
6192 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6193                                           abi_long arg2,
6194                                           abi_long arg3,
6195                                           abi_long arg4)
6196 {
6197     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6198         arg2 = arg3;
6199         arg3 = arg4;
6200     }
6201     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6202 }
6203 #endif
6204 
6205 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6206                                                abi_ulong target_addr)
6207 {
6208     struct target_timespec *target_ts;
6209 
6210     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6211         return -TARGET_EFAULT;
6212     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6213     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6214     unlock_user_struct(target_ts, target_addr, 0);
6215     return 0;
6216 }
6217 
6218 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6219                                                struct timespec *host_ts)
6220 {
6221     struct target_timespec *target_ts;
6222 
6223     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6224         return -TARGET_EFAULT;
6225     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6226     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6227     unlock_user_struct(target_ts, target_addr, 1);
6228     return 0;
6229 }
6230 
6231 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6232                                                  abi_ulong target_addr)
6233 {
6234     struct target_itimerspec *target_itspec;
6235 
6236     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6237         return -TARGET_EFAULT;
6238     }
6239 
6240     host_itspec->it_interval.tv_sec =
6241                             tswapal(target_itspec->it_interval.tv_sec);
6242     host_itspec->it_interval.tv_nsec =
6243                             tswapal(target_itspec->it_interval.tv_nsec);
6244     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6245     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6246 
6247     unlock_user_struct(target_itspec, target_addr, 1);
6248     return 0;
6249 }
6250 
6251 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6252                                                struct itimerspec *host_its)
6253 {
6254     struct target_itimerspec *target_itspec;
6255 
6256     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6257         return -TARGET_EFAULT;
6258     }
6259 
6260     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6261     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6262 
6263     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6264     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6265 
6266     unlock_user_struct(target_itspec, target_addr, 0);
6267     return 0;
6268 }
6269 
6270 static inline abi_long target_to_host_timex(struct timex *host_tx,
6271                                             abi_long target_addr)
6272 {
6273     struct target_timex *target_tx;
6274 
6275     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6276         return -TARGET_EFAULT;
6277     }
6278 
6279     __get_user(host_tx->modes, &target_tx->modes);
6280     __get_user(host_tx->offset, &target_tx->offset);
6281     __get_user(host_tx->freq, &target_tx->freq);
6282     __get_user(host_tx->maxerror, &target_tx->maxerror);
6283     __get_user(host_tx->esterror, &target_tx->esterror);
6284     __get_user(host_tx->status, &target_tx->status);
6285     __get_user(host_tx->constant, &target_tx->constant);
6286     __get_user(host_tx->precision, &target_tx->precision);
6287     __get_user(host_tx->tolerance, &target_tx->tolerance);
6288     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6289     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6290     __get_user(host_tx->tick, &target_tx->tick);
6291     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6292     __get_user(host_tx->jitter, &target_tx->jitter);
6293     __get_user(host_tx->shift, &target_tx->shift);
6294     __get_user(host_tx->stabil, &target_tx->stabil);
6295     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6296     __get_user(host_tx->calcnt, &target_tx->calcnt);
6297     __get_user(host_tx->errcnt, &target_tx->errcnt);
6298     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6299     __get_user(host_tx->tai, &target_tx->tai);
6300 
6301     unlock_user_struct(target_tx, target_addr, 0);
6302     return 0;
6303 }
6304 
6305 static inline abi_long host_to_target_timex(abi_long target_addr,
6306                                             struct timex *host_tx)
6307 {
6308     struct target_timex *target_tx;
6309 
6310     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6311         return -TARGET_EFAULT;
6312     }
6313 
6314     __put_user(host_tx->modes, &target_tx->modes);
6315     __put_user(host_tx->offset, &target_tx->offset);
6316     __put_user(host_tx->freq, &target_tx->freq);
6317     __put_user(host_tx->maxerror, &target_tx->maxerror);
6318     __put_user(host_tx->esterror, &target_tx->esterror);
6319     __put_user(host_tx->status, &target_tx->status);
6320     __put_user(host_tx->constant, &target_tx->constant);
6321     __put_user(host_tx->precision, &target_tx->precision);
6322     __put_user(host_tx->tolerance, &target_tx->tolerance);
6323     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6324     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6325     __put_user(host_tx->tick, &target_tx->tick);
6326     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6327     __put_user(host_tx->jitter, &target_tx->jitter);
6328     __put_user(host_tx->shift, &target_tx->shift);
6329     __put_user(host_tx->stabil, &target_tx->stabil);
6330     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6331     __put_user(host_tx->calcnt, &target_tx->calcnt);
6332     __put_user(host_tx->errcnt, &target_tx->errcnt);
6333     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6334     __put_user(host_tx->tai, &target_tx->tai);
6335 
6336     unlock_user_struct(target_tx, target_addr, 1);
6337     return 0;
6338 }
6339 
6340 
6341 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6342                                                abi_ulong target_addr)
6343 {
6344     struct target_sigevent *target_sevp;
6345 
6346     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6347         return -TARGET_EFAULT;
6348     }
6349 
6350     /* This union is awkward on 64 bit systems because it has a 32 bit
6351      * integer and a pointer in it; we follow the conversion approach
6352      * used for handling sigval types in signal.c so the guest should get
6353      * the correct value back even if we did a 64 bit byteswap and it's
6354      * using the 32 bit integer.
6355      */
6356     host_sevp->sigev_value.sival_ptr =
6357         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6358     host_sevp->sigev_signo =
6359         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6360     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6361     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6362 
6363     unlock_user_struct(target_sevp, target_addr, 1);
6364     return 0;
6365 }
6366 
6367 #if defined(TARGET_NR_mlockall)
6368 static inline int target_to_host_mlockall_arg(int arg)
6369 {
6370     int result = 0;
6371 
6372     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6373         result |= MCL_CURRENT;
6374     }
6375     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6376         result |= MCL_FUTURE;
6377     }
6378     return result;
6379 }
6380 #endif
6381 
6382 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6383      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6384      defined(TARGET_NR_newfstatat))
6385 static inline abi_long host_to_target_stat64(void *cpu_env,
6386                                              abi_ulong target_addr,
6387                                              struct stat *host_st)
6388 {
6389 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6390     if (((CPUARMState *)cpu_env)->eabi) {
6391         struct target_eabi_stat64 *target_st;
6392 
6393         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6394             return -TARGET_EFAULT;
6395         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6396         __put_user(host_st->st_dev, &target_st->st_dev);
6397         __put_user(host_st->st_ino, &target_st->st_ino);
6398 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6399         __put_user(host_st->st_ino, &target_st->__st_ino);
6400 #endif
6401         __put_user(host_st->st_mode, &target_st->st_mode);
6402         __put_user(host_st->st_nlink, &target_st->st_nlink);
6403         __put_user(host_st->st_uid, &target_st->st_uid);
6404         __put_user(host_st->st_gid, &target_st->st_gid);
6405         __put_user(host_st->st_rdev, &target_st->st_rdev);
6406         __put_user(host_st->st_size, &target_st->st_size);
6407         __put_user(host_st->st_blksize, &target_st->st_blksize);
6408         __put_user(host_st->st_blocks, &target_st->st_blocks);
6409         __put_user(host_st->st_atime, &target_st->target_st_atime);
6410         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6411         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6412 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6413         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6414         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6415         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6416 #endif
6417         unlock_user_struct(target_st, target_addr, 1);
6418     } else
6419 #endif
6420     {
6421 #if defined(TARGET_HAS_STRUCT_STAT64)
6422         struct target_stat64 *target_st;
6423 #else
6424         struct target_stat *target_st;
6425 #endif
6426 
6427         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6428             return -TARGET_EFAULT;
6429         memset(target_st, 0, sizeof(*target_st));
6430         __put_user(host_st->st_dev, &target_st->st_dev);
6431         __put_user(host_st->st_ino, &target_st->st_ino);
6432 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6433         __put_user(host_st->st_ino, &target_st->__st_ino);
6434 #endif
6435         __put_user(host_st->st_mode, &target_st->st_mode);
6436         __put_user(host_st->st_nlink, &target_st->st_nlink);
6437         __put_user(host_st->st_uid, &target_st->st_uid);
6438         __put_user(host_st->st_gid, &target_st->st_gid);
6439         __put_user(host_st->st_rdev, &target_st->st_rdev);
6440         /* XXX: better use of kernel struct */
6441         __put_user(host_st->st_size, &target_st->st_size);
6442         __put_user(host_st->st_blksize, &target_st->st_blksize);
6443         __put_user(host_st->st_blocks, &target_st->st_blocks);
6444         __put_user(host_st->st_atime, &target_st->target_st_atime);
6445         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6446         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6447 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6448         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6449         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6450         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6451 #endif
6452         unlock_user_struct(target_st, target_addr, 1);
6453     }
6454 
6455     return 0;
6456 }
6457 #endif
6458 
6459 /* ??? Using host futex calls even when target atomic operations
6460    are not really atomic probably breaks things.  However implementing
6461    futexes locally would make futexes shared between multiple processes
6462    tricky.  However they're probably useless because guest atomic
6463    operations won't work either.  */
6464 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6465                     target_ulong uaddr2, int val3)
6466 {
6467     struct timespec ts, *pts;
6468     int base_op;
6469 
6470     /* ??? We assume FUTEX_* constants are the same on both host
6471        and target.  */
6472 #ifdef FUTEX_CMD_MASK
6473     base_op = op & FUTEX_CMD_MASK;
6474 #else
6475     base_op = op;
6476 #endif
6477     switch (base_op) {
6478     case FUTEX_WAIT:
6479     case FUTEX_WAIT_BITSET:
6480         if (timeout) {
6481             pts = &ts;
6482             target_to_host_timespec(pts, timeout);
6483         } else {
6484             pts = NULL;
6485         }
6486         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6487                          pts, NULL, val3));
6488     case FUTEX_WAKE:
6489         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6490     case FUTEX_FD:
6491         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6492     case FUTEX_REQUEUE:
6493     case FUTEX_CMP_REQUEUE:
6494     case FUTEX_WAKE_OP:
6495         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6496            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6497            But the prototype takes a `struct timespec *'; insert casts
6498            to satisfy the compiler.  We do not need to tswap TIMEOUT
6499            since it's not compared to guest memory.  */
6500         pts = (struct timespec *)(uintptr_t) timeout;
6501         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6502                                     g2h(uaddr2),
6503                                     (base_op == FUTEX_CMP_REQUEUE
6504                                      ? tswap32(val3)
6505                                      : val3)));
6506     default:
6507         return -TARGET_ENOSYS;
6508     }
6509 }
6510 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6511 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6512                                      abi_long handle, abi_long mount_id,
6513                                      abi_long flags)
6514 {
6515     struct file_handle *target_fh;
6516     struct file_handle *fh;
6517     int mid = 0;
6518     abi_long ret;
6519     char *name;
6520     unsigned int size, total_size;
6521 
6522     if (get_user_s32(size, handle)) {
6523         return -TARGET_EFAULT;
6524     }
6525 
6526     name = lock_user_string(pathname);
6527     if (!name) {
6528         return -TARGET_EFAULT;
6529     }
6530 
6531     total_size = sizeof(struct file_handle) + size;
6532     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6533     if (!target_fh) {
6534         unlock_user(name, pathname, 0);
6535         return -TARGET_EFAULT;
6536     }
6537 
6538     fh = g_malloc0(total_size);
6539     fh->handle_bytes = size;
6540 
6541     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6542     unlock_user(name, pathname, 0);
6543 
6544     /* man name_to_handle_at(2):
6545      * Other than the use of the handle_bytes field, the caller should treat
6546      * the file_handle structure as an opaque data type
6547      */
6548 
6549     memcpy(target_fh, fh, total_size);
6550     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6551     target_fh->handle_type = tswap32(fh->handle_type);
6552     g_free(fh);
6553     unlock_user(target_fh, handle, total_size);
6554 
6555     if (put_user_s32(mid, mount_id)) {
6556         return -TARGET_EFAULT;
6557     }
6558 
6559     return ret;
6560 
6561 }
6562 #endif
6563 
6564 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6565 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6566                                      abi_long flags)
6567 {
6568     struct file_handle *target_fh;
6569     struct file_handle *fh;
6570     unsigned int size, total_size;
6571     abi_long ret;
6572 
6573     if (get_user_s32(size, handle)) {
6574         return -TARGET_EFAULT;
6575     }
6576 
6577     total_size = sizeof(struct file_handle) + size;
6578     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6579     if (!target_fh) {
6580         return -TARGET_EFAULT;
6581     }
6582 
6583     fh = g_memdup(target_fh, total_size);
6584     fh->handle_bytes = size;
6585     fh->handle_type = tswap32(target_fh->handle_type);
6586 
6587     ret = get_errno(open_by_handle_at(mount_fd, fh,
6588                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6589 
6590     g_free(fh);
6591 
6592     unlock_user(target_fh, handle, total_size);
6593 
6594     return ret;
6595 }
6596 #endif
6597 
6598 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6599 
6600 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6601 {
6602     int host_flags;
6603     target_sigset_t *target_mask;
6604     sigset_t host_mask;
6605     abi_long ret;
6606 
6607     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6608         return -TARGET_EINVAL;
6609     }
6610     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6611         return -TARGET_EFAULT;
6612     }
6613 
6614     target_to_host_sigset(&host_mask, target_mask);
6615 
6616     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6617 
6618     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6619     if (ret >= 0) {
6620         fd_trans_register(ret, &target_signalfd_trans);
6621     }
6622 
6623     unlock_user_struct(target_mask, mask, 0);
6624 
6625     return ret;
6626 }
6627 #endif
6628 
6629 /* Map host to target signal numbers for the wait family of syscalls.
6630    Assume all other status bits are the same.  */
6631 int host_to_target_waitstatus(int status)
6632 {
6633     if (WIFSIGNALED(status)) {
6634         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6635     }
6636     if (WIFSTOPPED(status)) {
6637         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6638                | (status & 0xff);
6639     }
6640     return status;
6641 }
6642 
6643 static int open_self_cmdline(void *cpu_env, int fd)
6644 {
6645     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6646     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6647     int i;
6648 
6649     for (i = 0; i < bprm->argc; i++) {
6650         size_t len = strlen(bprm->argv[i]) + 1;
6651 
6652         if (write(fd, bprm->argv[i], len) != len) {
6653             return -1;
6654         }
6655     }
6656 
6657     return 0;
6658 }
6659 
6660 static int open_self_maps(void *cpu_env, int fd)
6661 {
6662     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6663     TaskState *ts = cpu->opaque;
6664     FILE *fp;
6665     char *line = NULL;
6666     size_t len = 0;
6667     ssize_t read;
6668 
6669     fp = fopen("/proc/self/maps", "r");
6670     if (fp == NULL) {
6671         return -1;
6672     }
6673 
6674     while ((read = getline(&line, &len, fp)) != -1) {
6675         int fields, dev_maj, dev_min, inode;
6676         uint64_t min, max, offset;
6677         char flag_r, flag_w, flag_x, flag_p;
6678         char path[512] = "";
6679         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6680                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6681                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6682 
6683         if ((fields < 10) || (fields > 11)) {
6684             continue;
6685         }
6686         if (h2g_valid(min)) {
6687             int flags = page_get_flags(h2g(min));
6688             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6689             if (page_check_range(h2g(min), max - min, flags) == -1) {
6690                 continue;
6691             }
6692             if (h2g(min) == ts->info->stack_limit) {
6693                 pstrcpy(path, sizeof(path), "      [stack]");
6694             }
6695             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6696                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6697                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6698                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6699                     path[0] ? "         " : "", path);
6700         }
6701     }
6702 
6703     free(line);
6704     fclose(fp);
6705 
6706     return 0;
6707 }
6708 
6709 static int open_self_stat(void *cpu_env, int fd)
6710 {
6711     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6712     TaskState *ts = cpu->opaque;
6713     abi_ulong start_stack = ts->info->start_stack;
6714     int i;
6715 
6716     for (i = 0; i < 44; i++) {
6717       char buf[128];
6718       int len;
6719       uint64_t val = 0;
6720 
6721       if (i == 0) {
6722         /* pid */
6723         val = getpid();
6724         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6725       } else if (i == 1) {
6726         /* app name */
6727         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6728       } else if (i == 27) {
6729         /* stack bottom */
6730         val = start_stack;
6731         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6732       } else {
6733         /* for the rest, there is MasterCard */
6734         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6735       }
6736 
6737       len = strlen(buf);
6738       if (write(fd, buf, len) != len) {
6739           return -1;
6740       }
6741     }
6742 
6743     return 0;
6744 }
6745 
6746 static int open_self_auxv(void *cpu_env, int fd)
6747 {
6748     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6749     TaskState *ts = cpu->opaque;
6750     abi_ulong auxv = ts->info->saved_auxv;
6751     abi_ulong len = ts->info->auxv_len;
6752     char *ptr;
6753 
6754     /*
6755      * Auxiliary vector is stored in target process stack.
6756      * read in whole auxv vector and copy it to file
6757      */
6758     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6759     if (ptr != NULL) {
6760         while (len > 0) {
6761             ssize_t r;
6762             r = write(fd, ptr, len);
6763             if (r <= 0) {
6764                 break;
6765             }
6766             len -= r;
6767             ptr += r;
6768         }
6769         lseek(fd, 0, SEEK_SET);
6770         unlock_user(ptr, auxv, len);
6771     }
6772 
6773     return 0;
6774 }
6775 
6776 static int is_proc_myself(const char *filename, const char *entry)
6777 {
6778     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6779         filename += strlen("/proc/");
6780         if (!strncmp(filename, "self/", strlen("self/"))) {
6781             filename += strlen("self/");
6782         } else if (*filename >= '1' && *filename <= '9') {
6783             char myself[80];
6784             snprintf(myself, sizeof(myself), "%d/", getpid());
6785             if (!strncmp(filename, myself, strlen(myself))) {
6786                 filename += strlen(myself);
6787             } else {
6788                 return 0;
6789             }
6790         } else {
6791             return 0;
6792         }
6793         if (!strcmp(filename, entry)) {
6794             return 1;
6795         }
6796     }
6797     return 0;
6798 }
6799 
6800 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6801     defined(TARGET_SPARC) || defined(TARGET_M68K)
6802 static int is_proc(const char *filename, const char *entry)
6803 {
6804     return strcmp(filename, entry) == 0;
6805 }
6806 #endif
6807 
6808 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6809 static int open_net_route(void *cpu_env, int fd)
6810 {
6811     FILE *fp;
6812     char *line = NULL;
6813     size_t len = 0;
6814     ssize_t read;
6815 
6816     fp = fopen("/proc/net/route", "r");
6817     if (fp == NULL) {
6818         return -1;
6819     }
6820 
6821     /* read header */
6822 
6823     read = getline(&line, &len, fp);
6824     dprintf(fd, "%s", line);
6825 
6826     /* read routes */
6827 
6828     while ((read = getline(&line, &len, fp)) != -1) {
6829         char iface[16];
6830         uint32_t dest, gw, mask;
6831         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6832         int fields;
6833 
6834         fields = sscanf(line,
6835                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6836                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6837                         &mask, &mtu, &window, &irtt);
6838         if (fields != 11) {
6839             continue;
6840         }
6841         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6842                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6843                 metric, tswap32(mask), mtu, window, irtt);
6844     }
6845 
6846     free(line);
6847     fclose(fp);
6848 
6849     return 0;
6850 }
6851 #endif
6852 
6853 #if defined(TARGET_SPARC)
6854 static int open_cpuinfo(void *cpu_env, int fd)
6855 {
6856     dprintf(fd, "type\t\t: sun4u\n");
6857     return 0;
6858 }
6859 #endif
6860 
6861 #if defined(TARGET_M68K)
6862 static int open_hardware(void *cpu_env, int fd)
6863 {
6864     dprintf(fd, "Model:\t\tqemu-m68k\n");
6865     return 0;
6866 }
6867 #endif
6868 
6869 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6870 {
6871     struct fake_open {
6872         const char *filename;
6873         int (*fill)(void *cpu_env, int fd);
6874         int (*cmp)(const char *s1, const char *s2);
6875     };
6876     const struct fake_open *fake_open;
6877     static const struct fake_open fakes[] = {
6878         { "maps", open_self_maps, is_proc_myself },
6879         { "stat", open_self_stat, is_proc_myself },
6880         { "auxv", open_self_auxv, is_proc_myself },
6881         { "cmdline", open_self_cmdline, is_proc_myself },
6882 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6883         { "/proc/net/route", open_net_route, is_proc },
6884 #endif
6885 #if defined(TARGET_SPARC)
6886         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6887 #endif
6888 #if defined(TARGET_M68K)
6889         { "/proc/hardware", open_hardware, is_proc },
6890 #endif
6891         { NULL, NULL, NULL }
6892     };
6893 
6894     if (is_proc_myself(pathname, "exe")) {
6895         int execfd = qemu_getauxval(AT_EXECFD);
6896         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6897     }
6898 
6899     for (fake_open = fakes; fake_open->filename; fake_open++) {
6900         if (fake_open->cmp(pathname, fake_open->filename)) {
6901             break;
6902         }
6903     }
6904 
6905     if (fake_open->filename) {
6906         const char *tmpdir;
6907         char filename[PATH_MAX];
6908         int fd, r;
6909 
6910         /* create temporary file to map stat to */
6911         tmpdir = getenv("TMPDIR");
6912         if (!tmpdir)
6913             tmpdir = "/tmp";
6914         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6915         fd = mkstemp(filename);
6916         if (fd < 0) {
6917             return fd;
6918         }
6919         unlink(filename);
6920 
6921         if ((r = fake_open->fill(cpu_env, fd))) {
6922             int e = errno;
6923             close(fd);
6924             errno = e;
6925             return r;
6926         }
6927         lseek(fd, 0, SEEK_SET);
6928 
6929         return fd;
6930     }
6931 
6932     return safe_openat(dirfd, path(pathname), flags, mode);
6933 }
6934 
6935 #define TIMER_MAGIC 0x0caf0000
6936 #define TIMER_MAGIC_MASK 0xffff0000
6937 
6938 /* Convert QEMU provided timer ID back to internal 16bit index format */
6939 static target_timer_t get_timer_id(abi_long arg)
6940 {
6941     target_timer_t timerid = arg;
6942 
6943     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6944         return -TARGET_EINVAL;
6945     }
6946 
6947     timerid &= 0xffff;
6948 
6949     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6950         return -TARGET_EINVAL;
6951     }
6952 
6953     return timerid;
6954 }
6955 
6956 static int target_to_host_cpu_mask(unsigned long *host_mask,
6957                                    size_t host_size,
6958                                    abi_ulong target_addr,
6959                                    size_t target_size)
6960 {
6961     unsigned target_bits = sizeof(abi_ulong) * 8;
6962     unsigned host_bits = sizeof(*host_mask) * 8;
6963     abi_ulong *target_mask;
6964     unsigned i, j;
6965 
6966     assert(host_size >= target_size);
6967 
6968     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6969     if (!target_mask) {
6970         return -TARGET_EFAULT;
6971     }
6972     memset(host_mask, 0, host_size);
6973 
6974     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6975         unsigned bit = i * target_bits;
6976         abi_ulong val;
6977 
6978         __get_user(val, &target_mask[i]);
6979         for (j = 0; j < target_bits; j++, bit++) {
6980             if (val & (1UL << j)) {
6981                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6982             }
6983         }
6984     }
6985 
6986     unlock_user(target_mask, target_addr, 0);
6987     return 0;
6988 }
6989 
6990 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6991                                    size_t host_size,
6992                                    abi_ulong target_addr,
6993                                    size_t target_size)
6994 {
6995     unsigned target_bits = sizeof(abi_ulong) * 8;
6996     unsigned host_bits = sizeof(*host_mask) * 8;
6997     abi_ulong *target_mask;
6998     unsigned i, j;
6999 
7000     assert(host_size >= target_size);
7001 
7002     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7003     if (!target_mask) {
7004         return -TARGET_EFAULT;
7005     }
7006 
7007     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7008         unsigned bit = i * target_bits;
7009         abi_ulong val = 0;
7010 
7011         for (j = 0; j < target_bits; j++, bit++) {
7012             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7013                 val |= 1UL << j;
7014             }
7015         }
7016         __put_user(val, &target_mask[i]);
7017     }
7018 
7019     unlock_user(target_mask, target_addr, target_size);
7020     return 0;
7021 }
7022 
7023 /* This is an internal helper for do_syscall so that it is easier
7024  * to have a single return point, so that actions, such as logging
7025  * of syscall results, can be performed.
7026  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7027  */
7028 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7029                             abi_long arg2, abi_long arg3, abi_long arg4,
7030                             abi_long arg5, abi_long arg6, abi_long arg7,
7031                             abi_long arg8)
7032 {
7033     CPUState *cpu = ENV_GET_CPU(cpu_env);
7034     abi_long ret;
7035 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7036     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7037     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7038     struct stat st;
7039 #endif
7040 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7041     || defined(TARGET_NR_fstatfs)
7042     struct statfs stfs;
7043 #endif
7044     void *p;
7045 
7046     switch(num) {
7047     case TARGET_NR_exit:
7048         /* In old applications this may be used to implement _exit(2).
7049            However in threaded applictions it is used for thread termination,
7050            and _exit_group is used for application termination.
7051            Do thread termination if we have more then one thread.  */
7052 
7053         if (block_signals()) {
7054             return -TARGET_ERESTARTSYS;
7055         }
7056 
7057         cpu_list_lock();
7058 
7059         if (CPU_NEXT(first_cpu)) {
7060             TaskState *ts;
7061 
7062             /* Remove the CPU from the list.  */
7063             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7064 
7065             cpu_list_unlock();
7066 
7067             ts = cpu->opaque;
7068             if (ts->child_tidptr) {
7069                 put_user_u32(0, ts->child_tidptr);
7070                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7071                           NULL, NULL, 0);
7072             }
7073             thread_cpu = NULL;
7074             object_unref(OBJECT(cpu));
7075             g_free(ts);
7076             rcu_unregister_thread();
7077             pthread_exit(NULL);
7078         }
7079 
7080         cpu_list_unlock();
7081         preexit_cleanup(cpu_env, arg1);
7082         _exit(arg1);
7083         return 0; /* avoid warning */
7084     case TARGET_NR_read:
7085         if (arg2 == 0 && arg3 == 0) {
7086             return get_errno(safe_read(arg1, 0, 0));
7087         } else {
7088             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7089                 return -TARGET_EFAULT;
7090             ret = get_errno(safe_read(arg1, p, arg3));
7091             if (ret >= 0 &&
7092                 fd_trans_host_to_target_data(arg1)) {
7093                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7094             }
7095             unlock_user(p, arg2, ret);
7096         }
7097         return ret;
7098     case TARGET_NR_write:
7099         if (arg2 == 0 && arg3 == 0) {
7100             return get_errno(safe_write(arg1, 0, 0));
7101         }
7102         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7103             return -TARGET_EFAULT;
7104         if (fd_trans_target_to_host_data(arg1)) {
7105             void *copy = g_malloc(arg3);
7106             memcpy(copy, p, arg3);
7107             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7108             if (ret >= 0) {
7109                 ret = get_errno(safe_write(arg1, copy, ret));
7110             }
7111             g_free(copy);
7112         } else {
7113             ret = get_errno(safe_write(arg1, p, arg3));
7114         }
7115         unlock_user(p, arg2, 0);
7116         return ret;
7117 
7118 #ifdef TARGET_NR_open
7119     case TARGET_NR_open:
7120         if (!(p = lock_user_string(arg1)))
7121             return -TARGET_EFAULT;
7122         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7123                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7124                                   arg3));
7125         fd_trans_unregister(ret);
7126         unlock_user(p, arg1, 0);
7127         return ret;
7128 #endif
7129     case TARGET_NR_openat:
7130         if (!(p = lock_user_string(arg2)))
7131             return -TARGET_EFAULT;
7132         ret = get_errno(do_openat(cpu_env, arg1, p,
7133                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7134                                   arg4));
7135         fd_trans_unregister(ret);
7136         unlock_user(p, arg2, 0);
7137         return ret;
7138 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7139     case TARGET_NR_name_to_handle_at:
7140         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7141         return ret;
7142 #endif
7143 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7144     case TARGET_NR_open_by_handle_at:
7145         ret = do_open_by_handle_at(arg1, arg2, arg3);
7146         fd_trans_unregister(ret);
7147         return ret;
7148 #endif
7149     case TARGET_NR_close:
7150         fd_trans_unregister(arg1);
7151         return get_errno(close(arg1));
7152 
7153     case TARGET_NR_brk:
7154         return do_brk(arg1);
7155 #ifdef TARGET_NR_fork
7156     case TARGET_NR_fork:
7157         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7158 #endif
7159 #ifdef TARGET_NR_waitpid
7160     case TARGET_NR_waitpid:
7161         {
7162             int status;
7163             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7164             if (!is_error(ret) && arg2 && ret
7165                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7166                 return -TARGET_EFAULT;
7167         }
7168         return ret;
7169 #endif
7170 #ifdef TARGET_NR_waitid
7171     case TARGET_NR_waitid:
7172         {
7173             siginfo_t info;
7174             info.si_pid = 0;
7175             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7176             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7177                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7178                     return -TARGET_EFAULT;
7179                 host_to_target_siginfo(p, &info);
7180                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7181             }
7182         }
7183         return ret;
7184 #endif
7185 #ifdef TARGET_NR_creat /* not on alpha */
7186     case TARGET_NR_creat:
7187         if (!(p = lock_user_string(arg1)))
7188             return -TARGET_EFAULT;
7189         ret = get_errno(creat(p, arg2));
7190         fd_trans_unregister(ret);
7191         unlock_user(p, arg1, 0);
7192         return ret;
7193 #endif
7194 #ifdef TARGET_NR_link
7195     case TARGET_NR_link:
7196         {
7197             void * p2;
7198             p = lock_user_string(arg1);
7199             p2 = lock_user_string(arg2);
7200             if (!p || !p2)
7201                 ret = -TARGET_EFAULT;
7202             else
7203                 ret = get_errno(link(p, p2));
7204             unlock_user(p2, arg2, 0);
7205             unlock_user(p, arg1, 0);
7206         }
7207         return ret;
7208 #endif
7209 #if defined(TARGET_NR_linkat)
7210     case TARGET_NR_linkat:
7211         {
7212             void * p2 = NULL;
7213             if (!arg2 || !arg4)
7214                 return -TARGET_EFAULT;
7215             p  = lock_user_string(arg2);
7216             p2 = lock_user_string(arg4);
7217             if (!p || !p2)
7218                 ret = -TARGET_EFAULT;
7219             else
7220                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7221             unlock_user(p, arg2, 0);
7222             unlock_user(p2, arg4, 0);
7223         }
7224         return ret;
7225 #endif
7226 #ifdef TARGET_NR_unlink
7227     case TARGET_NR_unlink:
7228         if (!(p = lock_user_string(arg1)))
7229             return -TARGET_EFAULT;
7230         ret = get_errno(unlink(p));
7231         unlock_user(p, arg1, 0);
7232         return ret;
7233 #endif
7234 #if defined(TARGET_NR_unlinkat)
7235     case TARGET_NR_unlinkat:
7236         if (!(p = lock_user_string(arg2)))
7237             return -TARGET_EFAULT;
7238         ret = get_errno(unlinkat(arg1, p, arg3));
7239         unlock_user(p, arg2, 0);
7240         return ret;
7241 #endif
7242     case TARGET_NR_execve:
7243         {
7244             char **argp, **envp;
7245             int argc, envc;
7246             abi_ulong gp;
7247             abi_ulong guest_argp;
7248             abi_ulong guest_envp;
7249             abi_ulong addr;
7250             char **q;
7251             int total_size = 0;
7252 
7253             argc = 0;
7254             guest_argp = arg2;
7255             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7256                 if (get_user_ual(addr, gp))
7257                     return -TARGET_EFAULT;
7258                 if (!addr)
7259                     break;
7260                 argc++;
7261             }
7262             envc = 0;
7263             guest_envp = arg3;
7264             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7265                 if (get_user_ual(addr, gp))
7266                     return -TARGET_EFAULT;
7267                 if (!addr)
7268                     break;
7269                 envc++;
7270             }
7271 
7272             argp = g_new0(char *, argc + 1);
7273             envp = g_new0(char *, envc + 1);
7274 
7275             for (gp = guest_argp, q = argp; gp;
7276                   gp += sizeof(abi_ulong), q++) {
7277                 if (get_user_ual(addr, gp))
7278                     goto execve_efault;
7279                 if (!addr)
7280                     break;
7281                 if (!(*q = lock_user_string(addr)))
7282                     goto execve_efault;
7283                 total_size += strlen(*q) + 1;
7284             }
7285             *q = NULL;
7286 
7287             for (gp = guest_envp, q = envp; gp;
7288                   gp += sizeof(abi_ulong), q++) {
7289                 if (get_user_ual(addr, gp))
7290                     goto execve_efault;
7291                 if (!addr)
7292                     break;
7293                 if (!(*q = lock_user_string(addr)))
7294                     goto execve_efault;
7295                 total_size += strlen(*q) + 1;
7296             }
7297             *q = NULL;
7298 
7299             if (!(p = lock_user_string(arg1)))
7300                 goto execve_efault;
7301             /* Although execve() is not an interruptible syscall it is
7302              * a special case where we must use the safe_syscall wrapper:
7303              * if we allow a signal to happen before we make the host
7304              * syscall then we will 'lose' it, because at the point of
7305              * execve the process leaves QEMU's control. So we use the
7306              * safe syscall wrapper to ensure that we either take the
7307              * signal as a guest signal, or else it does not happen
7308              * before the execve completes and makes it the other
7309              * program's problem.
7310              */
7311             ret = get_errno(safe_execve(p, argp, envp));
7312             unlock_user(p, arg1, 0);
7313 
7314             goto execve_end;
7315 
7316         execve_efault:
7317             ret = -TARGET_EFAULT;
7318 
7319         execve_end:
7320             for (gp = guest_argp, q = argp; *q;
7321                   gp += sizeof(abi_ulong), q++) {
7322                 if (get_user_ual(addr, gp)
7323                     || !addr)
7324                     break;
7325                 unlock_user(*q, addr, 0);
7326             }
7327             for (gp = guest_envp, q = envp; *q;
7328                   gp += sizeof(abi_ulong), q++) {
7329                 if (get_user_ual(addr, gp)
7330                     || !addr)
7331                     break;
7332                 unlock_user(*q, addr, 0);
7333             }
7334 
7335             g_free(argp);
7336             g_free(envp);
7337         }
7338         return ret;
7339     case TARGET_NR_chdir:
7340         if (!(p = lock_user_string(arg1)))
7341             return -TARGET_EFAULT;
7342         ret = get_errno(chdir(p));
7343         unlock_user(p, arg1, 0);
7344         return ret;
7345 #ifdef TARGET_NR_time
7346     case TARGET_NR_time:
7347         {
7348             time_t host_time;
7349             ret = get_errno(time(&host_time));
7350             if (!is_error(ret)
7351                 && arg1
7352                 && put_user_sal(host_time, arg1))
7353                 return -TARGET_EFAULT;
7354         }
7355         return ret;
7356 #endif
7357 #ifdef TARGET_NR_mknod
7358     case TARGET_NR_mknod:
7359         if (!(p = lock_user_string(arg1)))
7360             return -TARGET_EFAULT;
7361         ret = get_errno(mknod(p, arg2, arg3));
7362         unlock_user(p, arg1, 0);
7363         return ret;
7364 #endif
7365 #if defined(TARGET_NR_mknodat)
7366     case TARGET_NR_mknodat:
7367         if (!(p = lock_user_string(arg2)))
7368             return -TARGET_EFAULT;
7369         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7370         unlock_user(p, arg2, 0);
7371         return ret;
7372 #endif
7373 #ifdef TARGET_NR_chmod
7374     case TARGET_NR_chmod:
7375         if (!(p = lock_user_string(arg1)))
7376             return -TARGET_EFAULT;
7377         ret = get_errno(chmod(p, arg2));
7378         unlock_user(p, arg1, 0);
7379         return ret;
7380 #endif
7381 #ifdef TARGET_NR_lseek
7382     case TARGET_NR_lseek:
7383         return get_errno(lseek(arg1, arg2, arg3));
7384 #endif
7385 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7386     /* Alpha specific */
7387     case TARGET_NR_getxpid:
7388         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7389         return get_errno(getpid());
7390 #endif
7391 #ifdef TARGET_NR_getpid
7392     case TARGET_NR_getpid:
7393         return get_errno(getpid());
7394 #endif
7395     case TARGET_NR_mount:
7396         {
7397             /* need to look at the data field */
7398             void *p2, *p3;
7399 
7400             if (arg1) {
7401                 p = lock_user_string(arg1);
7402                 if (!p) {
7403                     return -TARGET_EFAULT;
7404                 }
7405             } else {
7406                 p = NULL;
7407             }
7408 
7409             p2 = lock_user_string(arg2);
7410             if (!p2) {
7411                 if (arg1) {
7412                     unlock_user(p, arg1, 0);
7413                 }
7414                 return -TARGET_EFAULT;
7415             }
7416 
7417             if (arg3) {
7418                 p3 = lock_user_string(arg3);
7419                 if (!p3) {
7420                     if (arg1) {
7421                         unlock_user(p, arg1, 0);
7422                     }
7423                     unlock_user(p2, arg2, 0);
7424                     return -TARGET_EFAULT;
7425                 }
7426             } else {
7427                 p3 = NULL;
7428             }
7429 
7430             /* FIXME - arg5 should be locked, but it isn't clear how to
7431              * do that since it's not guaranteed to be a NULL-terminated
7432              * string.
7433              */
7434             if (!arg5) {
7435                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7436             } else {
7437                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7438             }
7439             ret = get_errno(ret);
7440 
7441             if (arg1) {
7442                 unlock_user(p, arg1, 0);
7443             }
7444             unlock_user(p2, arg2, 0);
7445             if (arg3) {
7446                 unlock_user(p3, arg3, 0);
7447             }
7448         }
7449         return ret;
7450 #ifdef TARGET_NR_umount
7451     case TARGET_NR_umount:
7452         if (!(p = lock_user_string(arg1)))
7453             return -TARGET_EFAULT;
7454         ret = get_errno(umount(p));
7455         unlock_user(p, arg1, 0);
7456         return ret;
7457 #endif
7458 #ifdef TARGET_NR_stime /* not on alpha */
7459     case TARGET_NR_stime:
7460         {
7461             time_t host_time;
7462             if (get_user_sal(host_time, arg1))
7463                 return -TARGET_EFAULT;
7464             return get_errno(stime(&host_time));
7465         }
7466 #endif
7467 #ifdef TARGET_NR_alarm /* not on alpha */
7468     case TARGET_NR_alarm:
7469         return alarm(arg1);
7470 #endif
7471 #ifdef TARGET_NR_pause /* not on alpha */
7472     case TARGET_NR_pause:
7473         if (!block_signals()) {
7474             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7475         }
7476         return -TARGET_EINTR;
7477 #endif
7478 #ifdef TARGET_NR_utime
7479     case TARGET_NR_utime:
7480         {
7481             struct utimbuf tbuf, *host_tbuf;
7482             struct target_utimbuf *target_tbuf;
7483             if (arg2) {
7484                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7485                     return -TARGET_EFAULT;
7486                 tbuf.actime = tswapal(target_tbuf->actime);
7487                 tbuf.modtime = tswapal(target_tbuf->modtime);
7488                 unlock_user_struct(target_tbuf, arg2, 0);
7489                 host_tbuf = &tbuf;
7490             } else {
7491                 host_tbuf = NULL;
7492             }
7493             if (!(p = lock_user_string(arg1)))
7494                 return -TARGET_EFAULT;
7495             ret = get_errno(utime(p, host_tbuf));
7496             unlock_user(p, arg1, 0);
7497         }
7498         return ret;
7499 #endif
7500 #ifdef TARGET_NR_utimes
7501     case TARGET_NR_utimes:
7502         {
7503             struct timeval *tvp, tv[2];
7504             if (arg2) {
7505                 if (copy_from_user_timeval(&tv[0], arg2)
7506                     || copy_from_user_timeval(&tv[1],
7507                                               arg2 + sizeof(struct target_timeval)))
7508                     return -TARGET_EFAULT;
7509                 tvp = tv;
7510             } else {
7511                 tvp = NULL;
7512             }
7513             if (!(p = lock_user_string(arg1)))
7514                 return -TARGET_EFAULT;
7515             ret = get_errno(utimes(p, tvp));
7516             unlock_user(p, arg1, 0);
7517         }
7518         return ret;
7519 #endif
7520 #if defined(TARGET_NR_futimesat)
7521     case TARGET_NR_futimesat:
7522         {
7523             struct timeval *tvp, tv[2];
7524             if (arg3) {
7525                 if (copy_from_user_timeval(&tv[0], arg3)
7526                     || copy_from_user_timeval(&tv[1],
7527                                               arg3 + sizeof(struct target_timeval)))
7528                     return -TARGET_EFAULT;
7529                 tvp = tv;
7530             } else {
7531                 tvp = NULL;
7532             }
7533             if (!(p = lock_user_string(arg2))) {
7534                 return -TARGET_EFAULT;
7535             }
7536             ret = get_errno(futimesat(arg1, path(p), tvp));
7537             unlock_user(p, arg2, 0);
7538         }
7539         return ret;
7540 #endif
7541 #ifdef TARGET_NR_access
7542     case TARGET_NR_access:
7543         if (!(p = lock_user_string(arg1))) {
7544             return -TARGET_EFAULT;
7545         }
7546         ret = get_errno(access(path(p), arg2));
7547         unlock_user(p, arg1, 0);
7548         return ret;
7549 #endif
7550 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7551     case TARGET_NR_faccessat:
7552         if (!(p = lock_user_string(arg2))) {
7553             return -TARGET_EFAULT;
7554         }
7555         ret = get_errno(faccessat(arg1, p, arg3, 0));
7556         unlock_user(p, arg2, 0);
7557         return ret;
7558 #endif
7559 #ifdef TARGET_NR_nice /* not on alpha */
7560     case TARGET_NR_nice:
7561         return get_errno(nice(arg1));
7562 #endif
7563     case TARGET_NR_sync:
7564         sync();
7565         return 0;
7566 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7567     case TARGET_NR_syncfs:
7568         return get_errno(syncfs(arg1));
7569 #endif
7570     case TARGET_NR_kill:
7571         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7572 #ifdef TARGET_NR_rename
7573     case TARGET_NR_rename:
7574         {
7575             void *p2;
7576             p = lock_user_string(arg1);
7577             p2 = lock_user_string(arg2);
7578             if (!p || !p2)
7579                 ret = -TARGET_EFAULT;
7580             else
7581                 ret = get_errno(rename(p, p2));
7582             unlock_user(p2, arg2, 0);
7583             unlock_user(p, arg1, 0);
7584         }
7585         return ret;
7586 #endif
7587 #if defined(TARGET_NR_renameat)
7588     case TARGET_NR_renameat:
7589         {
7590             void *p2;
7591             p  = lock_user_string(arg2);
7592             p2 = lock_user_string(arg4);
7593             if (!p || !p2)
7594                 ret = -TARGET_EFAULT;
7595             else
7596                 ret = get_errno(renameat(arg1, p, arg3, p2));
7597             unlock_user(p2, arg4, 0);
7598             unlock_user(p, arg2, 0);
7599         }
7600         return ret;
7601 #endif
7602 #if defined(TARGET_NR_renameat2)
7603     case TARGET_NR_renameat2:
7604         {
7605             void *p2;
7606             p  = lock_user_string(arg2);
7607             p2 = lock_user_string(arg4);
7608             if (!p || !p2) {
7609                 ret = -TARGET_EFAULT;
7610             } else {
7611                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7612             }
7613             unlock_user(p2, arg4, 0);
7614             unlock_user(p, arg2, 0);
7615         }
7616         return ret;
7617 #endif
7618 #ifdef TARGET_NR_mkdir
7619     case TARGET_NR_mkdir:
7620         if (!(p = lock_user_string(arg1)))
7621             return -TARGET_EFAULT;
7622         ret = get_errno(mkdir(p, arg2));
7623         unlock_user(p, arg1, 0);
7624         return ret;
7625 #endif
7626 #if defined(TARGET_NR_mkdirat)
7627     case TARGET_NR_mkdirat:
7628         if (!(p = lock_user_string(arg2)))
7629             return -TARGET_EFAULT;
7630         ret = get_errno(mkdirat(arg1, p, arg3));
7631         unlock_user(p, arg2, 0);
7632         return ret;
7633 #endif
7634 #ifdef TARGET_NR_rmdir
7635     case TARGET_NR_rmdir:
7636         if (!(p = lock_user_string(arg1)))
7637             return -TARGET_EFAULT;
7638         ret = get_errno(rmdir(p));
7639         unlock_user(p, arg1, 0);
7640         return ret;
7641 #endif
7642     case TARGET_NR_dup:
7643         ret = get_errno(dup(arg1));
7644         if (ret >= 0) {
7645             fd_trans_dup(arg1, ret);
7646         }
7647         return ret;
7648 #ifdef TARGET_NR_pipe
7649     case TARGET_NR_pipe:
7650         return do_pipe(cpu_env, arg1, 0, 0);
7651 #endif
7652 #ifdef TARGET_NR_pipe2
7653     case TARGET_NR_pipe2:
7654         return do_pipe(cpu_env, arg1,
7655                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7656 #endif
7657     case TARGET_NR_times:
7658         {
7659             struct target_tms *tmsp;
7660             struct tms tms;
7661             ret = get_errno(times(&tms));
7662             if (arg1) {
7663                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7664                 if (!tmsp)
7665                     return -TARGET_EFAULT;
7666                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7667                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7668                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7669                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7670             }
7671             if (!is_error(ret))
7672                 ret = host_to_target_clock_t(ret);
7673         }
7674         return ret;
7675     case TARGET_NR_acct:
7676         if (arg1 == 0) {
7677             ret = get_errno(acct(NULL));
7678         } else {
7679             if (!(p = lock_user_string(arg1))) {
7680                 return -TARGET_EFAULT;
7681             }
7682             ret = get_errno(acct(path(p)));
7683             unlock_user(p, arg1, 0);
7684         }
7685         return ret;
7686 #ifdef TARGET_NR_umount2
7687     case TARGET_NR_umount2:
7688         if (!(p = lock_user_string(arg1)))
7689             return -TARGET_EFAULT;
7690         ret = get_errno(umount2(p, arg2));
7691         unlock_user(p, arg1, 0);
7692         return ret;
7693 #endif
7694     case TARGET_NR_ioctl:
7695         return do_ioctl(arg1, arg2, arg3);
7696 #ifdef TARGET_NR_fcntl
7697     case TARGET_NR_fcntl:
7698         return do_fcntl(arg1, arg2, arg3);
7699 #endif
7700     case TARGET_NR_setpgid:
7701         return get_errno(setpgid(arg1, arg2));
7702     case TARGET_NR_umask:
7703         return get_errno(umask(arg1));
7704     case TARGET_NR_chroot:
7705         if (!(p = lock_user_string(arg1)))
7706             return -TARGET_EFAULT;
7707         ret = get_errno(chroot(p));
7708         unlock_user(p, arg1, 0);
7709         return ret;
7710 #ifdef TARGET_NR_dup2
7711     case TARGET_NR_dup2:
7712         ret = get_errno(dup2(arg1, arg2));
7713         if (ret >= 0) {
7714             fd_trans_dup(arg1, arg2);
7715         }
7716         return ret;
7717 #endif
7718 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7719     case TARGET_NR_dup3:
7720     {
7721         int host_flags;
7722 
7723         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7724             return -EINVAL;
7725         }
7726         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7727         ret = get_errno(dup3(arg1, arg2, host_flags));
7728         if (ret >= 0) {
7729             fd_trans_dup(arg1, arg2);
7730         }
7731         return ret;
7732     }
7733 #endif
7734 #ifdef TARGET_NR_getppid /* not on alpha */
7735     case TARGET_NR_getppid:
7736         return get_errno(getppid());
7737 #endif
7738 #ifdef TARGET_NR_getpgrp
7739     case TARGET_NR_getpgrp:
7740         return get_errno(getpgrp());
7741 #endif
7742     case TARGET_NR_setsid:
7743         return get_errno(setsid());
7744 #ifdef TARGET_NR_sigaction
7745     case TARGET_NR_sigaction:
7746         {
7747 #if defined(TARGET_ALPHA)
7748             struct target_sigaction act, oact, *pact = 0;
7749             struct target_old_sigaction *old_act;
7750             if (arg2) {
7751                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7752                     return -TARGET_EFAULT;
7753                 act._sa_handler = old_act->_sa_handler;
7754                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7755                 act.sa_flags = old_act->sa_flags;
7756                 act.sa_restorer = 0;
7757                 unlock_user_struct(old_act, arg2, 0);
7758                 pact = &act;
7759             }
7760             ret = get_errno(do_sigaction(arg1, pact, &oact));
7761             if (!is_error(ret) && arg3) {
7762                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7763                     return -TARGET_EFAULT;
7764                 old_act->_sa_handler = oact._sa_handler;
7765                 old_act->sa_mask = oact.sa_mask.sig[0];
7766                 old_act->sa_flags = oact.sa_flags;
7767                 unlock_user_struct(old_act, arg3, 1);
7768             }
7769 #elif defined(TARGET_MIPS)
7770 	    struct target_sigaction act, oact, *pact, *old_act;
7771 
7772 	    if (arg2) {
7773                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7774                     return -TARGET_EFAULT;
7775 		act._sa_handler = old_act->_sa_handler;
7776 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7777 		act.sa_flags = old_act->sa_flags;
7778 		unlock_user_struct(old_act, arg2, 0);
7779 		pact = &act;
7780 	    } else {
7781 		pact = NULL;
7782 	    }
7783 
7784 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7785 
7786 	    if (!is_error(ret) && arg3) {
7787                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7788                     return -TARGET_EFAULT;
7789 		old_act->_sa_handler = oact._sa_handler;
7790 		old_act->sa_flags = oact.sa_flags;
7791 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7792 		old_act->sa_mask.sig[1] = 0;
7793 		old_act->sa_mask.sig[2] = 0;
7794 		old_act->sa_mask.sig[3] = 0;
7795 		unlock_user_struct(old_act, arg3, 1);
7796 	    }
7797 #else
7798             struct target_old_sigaction *old_act;
7799             struct target_sigaction act, oact, *pact;
7800             if (arg2) {
7801                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7802                     return -TARGET_EFAULT;
7803                 act._sa_handler = old_act->_sa_handler;
7804                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7805                 act.sa_flags = old_act->sa_flags;
7806                 act.sa_restorer = old_act->sa_restorer;
7807 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7808                 act.ka_restorer = 0;
7809 #endif
7810                 unlock_user_struct(old_act, arg2, 0);
7811                 pact = &act;
7812             } else {
7813                 pact = NULL;
7814             }
7815             ret = get_errno(do_sigaction(arg1, pact, &oact));
7816             if (!is_error(ret) && arg3) {
7817                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7818                     return -TARGET_EFAULT;
7819                 old_act->_sa_handler = oact._sa_handler;
7820                 old_act->sa_mask = oact.sa_mask.sig[0];
7821                 old_act->sa_flags = oact.sa_flags;
7822                 old_act->sa_restorer = oact.sa_restorer;
7823                 unlock_user_struct(old_act, arg3, 1);
7824             }
7825 #endif
7826         }
7827         return ret;
7828 #endif
7829     case TARGET_NR_rt_sigaction:
7830         {
7831 #if defined(TARGET_ALPHA)
7832             /* For Alpha and SPARC this is a 5 argument syscall, with
7833              * a 'restorer' parameter which must be copied into the
7834              * sa_restorer field of the sigaction struct.
7835              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7836              * and arg5 is the sigsetsize.
7837              * Alpha also has a separate rt_sigaction struct that it uses
7838              * here; SPARC uses the usual sigaction struct.
7839              */
7840             struct target_rt_sigaction *rt_act;
7841             struct target_sigaction act, oact, *pact = 0;
7842 
7843             if (arg4 != sizeof(target_sigset_t)) {
7844                 return -TARGET_EINVAL;
7845             }
7846             if (arg2) {
7847                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7848                     return -TARGET_EFAULT;
7849                 act._sa_handler = rt_act->_sa_handler;
7850                 act.sa_mask = rt_act->sa_mask;
7851                 act.sa_flags = rt_act->sa_flags;
7852                 act.sa_restorer = arg5;
7853                 unlock_user_struct(rt_act, arg2, 0);
7854                 pact = &act;
7855             }
7856             ret = get_errno(do_sigaction(arg1, pact, &oact));
7857             if (!is_error(ret) && arg3) {
7858                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7859                     return -TARGET_EFAULT;
7860                 rt_act->_sa_handler = oact._sa_handler;
7861                 rt_act->sa_mask = oact.sa_mask;
7862                 rt_act->sa_flags = oact.sa_flags;
7863                 unlock_user_struct(rt_act, arg3, 1);
7864             }
7865 #else
7866 #ifdef TARGET_SPARC
7867             target_ulong restorer = arg4;
7868             target_ulong sigsetsize = arg5;
7869 #else
7870             target_ulong sigsetsize = arg4;
7871 #endif
7872             struct target_sigaction *act;
7873             struct target_sigaction *oact;
7874 
7875             if (sigsetsize != sizeof(target_sigset_t)) {
7876                 return -TARGET_EINVAL;
7877             }
7878             if (arg2) {
7879                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7880                     return -TARGET_EFAULT;
7881                 }
7882 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7883                 act->ka_restorer = restorer;
7884 #endif
7885             } else {
7886                 act = NULL;
7887             }
7888             if (arg3) {
7889                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7890                     ret = -TARGET_EFAULT;
7891                     goto rt_sigaction_fail;
7892                 }
7893             } else
7894                 oact = NULL;
7895             ret = get_errno(do_sigaction(arg1, act, oact));
7896 	rt_sigaction_fail:
7897             if (act)
7898                 unlock_user_struct(act, arg2, 0);
7899             if (oact)
7900                 unlock_user_struct(oact, arg3, 1);
7901 #endif
7902         }
7903         return ret;
7904 #ifdef TARGET_NR_sgetmask /* not on alpha */
7905     case TARGET_NR_sgetmask:
7906         {
7907             sigset_t cur_set;
7908             abi_ulong target_set;
7909             ret = do_sigprocmask(0, NULL, &cur_set);
7910             if (!ret) {
7911                 host_to_target_old_sigset(&target_set, &cur_set);
7912                 ret = target_set;
7913             }
7914         }
7915         return ret;
7916 #endif
7917 #ifdef TARGET_NR_ssetmask /* not on alpha */
7918     case TARGET_NR_ssetmask:
7919         {
7920             sigset_t set, oset;
7921             abi_ulong target_set = arg1;
7922             target_to_host_old_sigset(&set, &target_set);
7923             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7924             if (!ret) {
7925                 host_to_target_old_sigset(&target_set, &oset);
7926                 ret = target_set;
7927             }
7928         }
7929         return ret;
7930 #endif
7931 #ifdef TARGET_NR_sigprocmask
7932     case TARGET_NR_sigprocmask:
7933         {
7934 #if defined(TARGET_ALPHA)
7935             sigset_t set, oldset;
7936             abi_ulong mask;
7937             int how;
7938 
7939             switch (arg1) {
7940             case TARGET_SIG_BLOCK:
7941                 how = SIG_BLOCK;
7942                 break;
7943             case TARGET_SIG_UNBLOCK:
7944                 how = SIG_UNBLOCK;
7945                 break;
7946             case TARGET_SIG_SETMASK:
7947                 how = SIG_SETMASK;
7948                 break;
7949             default:
7950                 return -TARGET_EINVAL;
7951             }
7952             mask = arg2;
7953             target_to_host_old_sigset(&set, &mask);
7954 
7955             ret = do_sigprocmask(how, &set, &oldset);
7956             if (!is_error(ret)) {
7957                 host_to_target_old_sigset(&mask, &oldset);
7958                 ret = mask;
7959                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7960             }
7961 #else
7962             sigset_t set, oldset, *set_ptr;
7963             int how;
7964 
7965             if (arg2) {
7966                 switch (arg1) {
7967                 case TARGET_SIG_BLOCK:
7968                     how = SIG_BLOCK;
7969                     break;
7970                 case TARGET_SIG_UNBLOCK:
7971                     how = SIG_UNBLOCK;
7972                     break;
7973                 case TARGET_SIG_SETMASK:
7974                     how = SIG_SETMASK;
7975                     break;
7976                 default:
7977                     return -TARGET_EINVAL;
7978                 }
7979                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7980                     return -TARGET_EFAULT;
7981                 target_to_host_old_sigset(&set, p);
7982                 unlock_user(p, arg2, 0);
7983                 set_ptr = &set;
7984             } else {
7985                 how = 0;
7986                 set_ptr = NULL;
7987             }
7988             ret = do_sigprocmask(how, set_ptr, &oldset);
7989             if (!is_error(ret) && arg3) {
7990                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7991                     return -TARGET_EFAULT;
7992                 host_to_target_old_sigset(p, &oldset);
7993                 unlock_user(p, arg3, sizeof(target_sigset_t));
7994             }
7995 #endif
7996         }
7997         return ret;
7998 #endif
7999     case TARGET_NR_rt_sigprocmask:
8000         {
8001             int how = arg1;
8002             sigset_t set, oldset, *set_ptr;
8003 
8004             if (arg4 != sizeof(target_sigset_t)) {
8005                 return -TARGET_EINVAL;
8006             }
8007 
8008             if (arg2) {
8009                 switch(how) {
8010                 case TARGET_SIG_BLOCK:
8011                     how = SIG_BLOCK;
8012                     break;
8013                 case TARGET_SIG_UNBLOCK:
8014                     how = SIG_UNBLOCK;
8015                     break;
8016                 case TARGET_SIG_SETMASK:
8017                     how = SIG_SETMASK;
8018                     break;
8019                 default:
8020                     return -TARGET_EINVAL;
8021                 }
8022                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8023                     return -TARGET_EFAULT;
8024                 target_to_host_sigset(&set, p);
8025                 unlock_user(p, arg2, 0);
8026                 set_ptr = &set;
8027             } else {
8028                 how = 0;
8029                 set_ptr = NULL;
8030             }
8031             ret = do_sigprocmask(how, set_ptr, &oldset);
8032             if (!is_error(ret) && arg3) {
8033                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8034                     return -TARGET_EFAULT;
8035                 host_to_target_sigset(p, &oldset);
8036                 unlock_user(p, arg3, sizeof(target_sigset_t));
8037             }
8038         }
8039         return ret;
8040 #ifdef TARGET_NR_sigpending
8041     case TARGET_NR_sigpending:
8042         {
8043             sigset_t set;
8044             ret = get_errno(sigpending(&set));
8045             if (!is_error(ret)) {
8046                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8047                     return -TARGET_EFAULT;
8048                 host_to_target_old_sigset(p, &set);
8049                 unlock_user(p, arg1, sizeof(target_sigset_t));
8050             }
8051         }
8052         return ret;
8053 #endif
8054     case TARGET_NR_rt_sigpending:
8055         {
8056             sigset_t set;
8057 
8058             /* Yes, this check is >, not != like most. We follow the kernel's
8059              * logic and it does it like this because it implements
8060              * NR_sigpending through the same code path, and in that case
8061              * the old_sigset_t is smaller in size.
8062              */
8063             if (arg2 > sizeof(target_sigset_t)) {
8064                 return -TARGET_EINVAL;
8065             }
8066 
8067             ret = get_errno(sigpending(&set));
8068             if (!is_error(ret)) {
8069                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8070                     return -TARGET_EFAULT;
8071                 host_to_target_sigset(p, &set);
8072                 unlock_user(p, arg1, sizeof(target_sigset_t));
8073             }
8074         }
8075         return ret;
8076 #ifdef TARGET_NR_sigsuspend
8077     case TARGET_NR_sigsuspend:
8078         {
8079             TaskState *ts = cpu->opaque;
8080 #if defined(TARGET_ALPHA)
8081             abi_ulong mask = arg1;
8082             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8083 #else
8084             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8085                 return -TARGET_EFAULT;
8086             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8087             unlock_user(p, arg1, 0);
8088 #endif
8089             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8090                                                SIGSET_T_SIZE));
8091             if (ret != -TARGET_ERESTARTSYS) {
8092                 ts->in_sigsuspend = 1;
8093             }
8094         }
8095         return ret;
8096 #endif
8097     case TARGET_NR_rt_sigsuspend:
8098         {
8099             TaskState *ts = cpu->opaque;
8100 
8101             if (arg2 != sizeof(target_sigset_t)) {
8102                 return -TARGET_EINVAL;
8103             }
8104             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8105                 return -TARGET_EFAULT;
8106             target_to_host_sigset(&ts->sigsuspend_mask, p);
8107             unlock_user(p, arg1, 0);
8108             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8109                                                SIGSET_T_SIZE));
8110             if (ret != -TARGET_ERESTARTSYS) {
8111                 ts->in_sigsuspend = 1;
8112             }
8113         }
8114         return ret;
8115     case TARGET_NR_rt_sigtimedwait:
8116         {
8117             sigset_t set;
8118             struct timespec uts, *puts;
8119             siginfo_t uinfo;
8120 
8121             if (arg4 != sizeof(target_sigset_t)) {
8122                 return -TARGET_EINVAL;
8123             }
8124 
8125             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8126                 return -TARGET_EFAULT;
8127             target_to_host_sigset(&set, p);
8128             unlock_user(p, arg1, 0);
8129             if (arg3) {
8130                 puts = &uts;
8131                 target_to_host_timespec(puts, arg3);
8132             } else {
8133                 puts = NULL;
8134             }
8135             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8136                                                  SIGSET_T_SIZE));
8137             if (!is_error(ret)) {
8138                 if (arg2) {
8139                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8140                                   0);
8141                     if (!p) {
8142                         return -TARGET_EFAULT;
8143                     }
8144                     host_to_target_siginfo(p, &uinfo);
8145                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8146                 }
8147                 ret = host_to_target_signal(ret);
8148             }
8149         }
8150         return ret;
8151     case TARGET_NR_rt_sigqueueinfo:
8152         {
8153             siginfo_t uinfo;
8154 
8155             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8156             if (!p) {
8157                 return -TARGET_EFAULT;
8158             }
8159             target_to_host_siginfo(&uinfo, p);
8160             unlock_user(p, arg3, 0);
8161             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8162         }
8163         return ret;
8164     case TARGET_NR_rt_tgsigqueueinfo:
8165         {
8166             siginfo_t uinfo;
8167 
8168             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8169             if (!p) {
8170                 return -TARGET_EFAULT;
8171             }
8172             target_to_host_siginfo(&uinfo, p);
8173             unlock_user(p, arg4, 0);
8174             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8175         }
8176         return ret;
8177 #ifdef TARGET_NR_sigreturn
8178     case TARGET_NR_sigreturn:
8179         if (block_signals()) {
8180             return -TARGET_ERESTARTSYS;
8181         }
8182         return do_sigreturn(cpu_env);
8183 #endif
8184     case TARGET_NR_rt_sigreturn:
8185         if (block_signals()) {
8186             return -TARGET_ERESTARTSYS;
8187         }
8188         return do_rt_sigreturn(cpu_env);
8189     case TARGET_NR_sethostname:
8190         if (!(p = lock_user_string(arg1)))
8191             return -TARGET_EFAULT;
8192         ret = get_errno(sethostname(p, arg2));
8193         unlock_user(p, arg1, 0);
8194         return ret;
8195 #ifdef TARGET_NR_setrlimit
8196     case TARGET_NR_setrlimit:
8197         {
8198             int resource = target_to_host_resource(arg1);
8199             struct target_rlimit *target_rlim;
8200             struct rlimit rlim;
8201             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8202                 return -TARGET_EFAULT;
8203             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8204             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8205             unlock_user_struct(target_rlim, arg2, 0);
8206             /*
8207              * If we just passed through resource limit settings for memory then
8208              * they would also apply to QEMU's own allocations, and QEMU will
8209              * crash or hang or die if its allocations fail. Ideally we would
8210              * track the guest allocations in QEMU and apply the limits ourselves.
8211              * For now, just tell the guest the call succeeded but don't actually
8212              * limit anything.
8213              */
8214             if (resource != RLIMIT_AS &&
8215                 resource != RLIMIT_DATA &&
8216                 resource != RLIMIT_STACK) {
8217                 return get_errno(setrlimit(resource, &rlim));
8218             } else {
8219                 return 0;
8220             }
8221         }
8222 #endif
8223 #ifdef TARGET_NR_getrlimit
8224     case TARGET_NR_getrlimit:
8225         {
8226             int resource = target_to_host_resource(arg1);
8227             struct target_rlimit *target_rlim;
8228             struct rlimit rlim;
8229 
8230             ret = get_errno(getrlimit(resource, &rlim));
8231             if (!is_error(ret)) {
8232                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8233                     return -TARGET_EFAULT;
8234                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8235                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8236                 unlock_user_struct(target_rlim, arg2, 1);
8237             }
8238         }
8239         return ret;
8240 #endif
8241     case TARGET_NR_getrusage:
8242         {
8243             struct rusage rusage;
8244             ret = get_errno(getrusage(arg1, &rusage));
8245             if (!is_error(ret)) {
8246                 ret = host_to_target_rusage(arg2, &rusage);
8247             }
8248         }
8249         return ret;
8250     case TARGET_NR_gettimeofday:
8251         {
8252             struct timeval tv;
8253             ret = get_errno(gettimeofday(&tv, NULL));
8254             if (!is_error(ret)) {
8255                 if (copy_to_user_timeval(arg1, &tv))
8256                     return -TARGET_EFAULT;
8257             }
8258         }
8259         return ret;
8260     case TARGET_NR_settimeofday:
8261         {
8262             struct timeval tv, *ptv = NULL;
8263             struct timezone tz, *ptz = NULL;
8264 
8265             if (arg1) {
8266                 if (copy_from_user_timeval(&tv, arg1)) {
8267                     return -TARGET_EFAULT;
8268                 }
8269                 ptv = &tv;
8270             }
8271 
8272             if (arg2) {
8273                 if (copy_from_user_timezone(&tz, arg2)) {
8274                     return -TARGET_EFAULT;
8275                 }
8276                 ptz = &tz;
8277             }
8278 
8279             return get_errno(settimeofday(ptv, ptz));
8280         }
8281 #if defined(TARGET_NR_select)
8282     case TARGET_NR_select:
8283 #if defined(TARGET_WANT_NI_OLD_SELECT)
8284         /* some architectures used to have old_select here
8285          * but now ENOSYS it.
8286          */
8287         ret = -TARGET_ENOSYS;
8288 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8289         ret = do_old_select(arg1);
8290 #else
8291         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8292 #endif
8293         return ret;
8294 #endif
8295 #ifdef TARGET_NR_pselect6
8296     case TARGET_NR_pselect6:
8297         {
8298             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8299             fd_set rfds, wfds, efds;
8300             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8301             struct timespec ts, *ts_ptr;
8302 
8303             /*
8304              * The 6th arg is actually two args smashed together,
8305              * so we cannot use the C library.
8306              */
8307             sigset_t set;
8308             struct {
8309                 sigset_t *set;
8310                 size_t size;
8311             } sig, *sig_ptr;
8312 
8313             abi_ulong arg_sigset, arg_sigsize, *arg7;
8314             target_sigset_t *target_sigset;
8315 
8316             n = arg1;
8317             rfd_addr = arg2;
8318             wfd_addr = arg3;
8319             efd_addr = arg4;
8320             ts_addr = arg5;
8321 
8322             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8323             if (ret) {
8324                 return ret;
8325             }
8326             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8327             if (ret) {
8328                 return ret;
8329             }
8330             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8331             if (ret) {
8332                 return ret;
8333             }
8334 
8335             /*
8336              * This takes a timespec, and not a timeval, so we cannot
8337              * use the do_select() helper ...
8338              */
8339             if (ts_addr) {
8340                 if (target_to_host_timespec(&ts, ts_addr)) {
8341                     return -TARGET_EFAULT;
8342                 }
8343                 ts_ptr = &ts;
8344             } else {
8345                 ts_ptr = NULL;
8346             }
8347 
8348             /* Extract the two packed args for the sigset */
8349             if (arg6) {
8350                 sig_ptr = &sig;
8351                 sig.size = SIGSET_T_SIZE;
8352 
8353                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8354                 if (!arg7) {
8355                     return -TARGET_EFAULT;
8356                 }
8357                 arg_sigset = tswapal(arg7[0]);
8358                 arg_sigsize = tswapal(arg7[1]);
8359                 unlock_user(arg7, arg6, 0);
8360 
8361                 if (arg_sigset) {
8362                     sig.set = &set;
8363                     if (arg_sigsize != sizeof(*target_sigset)) {
8364                         /* Like the kernel, we enforce correct size sigsets */
8365                         return -TARGET_EINVAL;
8366                     }
8367                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8368                                               sizeof(*target_sigset), 1);
8369                     if (!target_sigset) {
8370                         return -TARGET_EFAULT;
8371                     }
8372                     target_to_host_sigset(&set, target_sigset);
8373                     unlock_user(target_sigset, arg_sigset, 0);
8374                 } else {
8375                     sig.set = NULL;
8376                 }
8377             } else {
8378                 sig_ptr = NULL;
8379             }
8380 
8381             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8382                                           ts_ptr, sig_ptr));
8383 
8384             if (!is_error(ret)) {
8385                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8386                     return -TARGET_EFAULT;
8387                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8388                     return -TARGET_EFAULT;
8389                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8390                     return -TARGET_EFAULT;
8391 
8392                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8393                     return -TARGET_EFAULT;
8394             }
8395         }
8396         return ret;
8397 #endif
8398 #ifdef TARGET_NR_symlink
8399     case TARGET_NR_symlink:
8400         {
8401             void *p2;
8402             p = lock_user_string(arg1);
8403             p2 = lock_user_string(arg2);
8404             if (!p || !p2)
8405                 ret = -TARGET_EFAULT;
8406             else
8407                 ret = get_errno(symlink(p, p2));
8408             unlock_user(p2, arg2, 0);
8409             unlock_user(p, arg1, 0);
8410         }
8411         return ret;
8412 #endif
8413 #if defined(TARGET_NR_symlinkat)
8414     case TARGET_NR_symlinkat:
8415         {
8416             void *p2;
8417             p  = lock_user_string(arg1);
8418             p2 = lock_user_string(arg3);
8419             if (!p || !p2)
8420                 ret = -TARGET_EFAULT;
8421             else
8422                 ret = get_errno(symlinkat(p, arg2, p2));
8423             unlock_user(p2, arg3, 0);
8424             unlock_user(p, arg1, 0);
8425         }
8426         return ret;
8427 #endif
8428 #ifdef TARGET_NR_readlink
8429     case TARGET_NR_readlink:
8430         {
8431             void *p2;
8432             p = lock_user_string(arg1);
8433             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8434             if (!p || !p2) {
8435                 ret = -TARGET_EFAULT;
8436             } else if (!arg3) {
8437                 /* Short circuit this for the magic exe check. */
8438                 ret = -TARGET_EINVAL;
8439             } else if (is_proc_myself((const char *)p, "exe")) {
8440                 char real[PATH_MAX], *temp;
8441                 temp = realpath(exec_path, real);
8442                 /* Return value is # of bytes that we wrote to the buffer. */
8443                 if (temp == NULL) {
8444                     ret = get_errno(-1);
8445                 } else {
8446                     /* Don't worry about sign mismatch as earlier mapping
8447                      * logic would have thrown a bad address error. */
8448                     ret = MIN(strlen(real), arg3);
8449                     /* We cannot NUL terminate the string. */
8450                     memcpy(p2, real, ret);
8451                 }
8452             } else {
8453                 ret = get_errno(readlink(path(p), p2, arg3));
8454             }
8455             unlock_user(p2, arg2, ret);
8456             unlock_user(p, arg1, 0);
8457         }
8458         return ret;
8459 #endif
8460 #if defined(TARGET_NR_readlinkat)
8461     case TARGET_NR_readlinkat:
8462         {
8463             void *p2;
8464             p  = lock_user_string(arg2);
8465             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8466             if (!p || !p2) {
8467                 ret = -TARGET_EFAULT;
8468             } else if (is_proc_myself((const char *)p, "exe")) {
8469                 char real[PATH_MAX], *temp;
8470                 temp = realpath(exec_path, real);
8471                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8472                 snprintf((char *)p2, arg4, "%s", real);
8473             } else {
8474                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8475             }
8476             unlock_user(p2, arg3, ret);
8477             unlock_user(p, arg2, 0);
8478         }
8479         return ret;
8480 #endif
8481 #ifdef TARGET_NR_swapon
8482     case TARGET_NR_swapon:
8483         if (!(p = lock_user_string(arg1)))
8484             return -TARGET_EFAULT;
8485         ret = get_errno(swapon(p, arg2));
8486         unlock_user(p, arg1, 0);
8487         return ret;
8488 #endif
8489     case TARGET_NR_reboot:
8490         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8491            /* arg4 must be ignored in all other cases */
8492            p = lock_user_string(arg4);
8493            if (!p) {
8494                return -TARGET_EFAULT;
8495            }
8496            ret = get_errno(reboot(arg1, arg2, arg3, p));
8497            unlock_user(p, arg4, 0);
8498         } else {
8499            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8500         }
8501         return ret;
8502 #ifdef TARGET_NR_mmap
8503     case TARGET_NR_mmap:
8504 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8505     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8506     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8507     || defined(TARGET_S390X)
8508         {
8509             abi_ulong *v;
8510             abi_ulong v1, v2, v3, v4, v5, v6;
8511             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8512                 return -TARGET_EFAULT;
8513             v1 = tswapal(v[0]);
8514             v2 = tswapal(v[1]);
8515             v3 = tswapal(v[2]);
8516             v4 = tswapal(v[3]);
8517             v5 = tswapal(v[4]);
8518             v6 = tswapal(v[5]);
8519             unlock_user(v, arg1, 0);
8520             ret = get_errno(target_mmap(v1, v2, v3,
8521                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8522                                         v5, v6));
8523         }
8524 #else
8525         ret = get_errno(target_mmap(arg1, arg2, arg3,
8526                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8527                                     arg5,
8528                                     arg6));
8529 #endif
8530         return ret;
8531 #endif
8532 #ifdef TARGET_NR_mmap2
8533     case TARGET_NR_mmap2:
8534 #ifndef MMAP_SHIFT
8535 #define MMAP_SHIFT 12
8536 #endif
8537         ret = target_mmap(arg1, arg2, arg3,
8538                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8539                           arg5, arg6 << MMAP_SHIFT);
8540         return get_errno(ret);
8541 #endif
8542     case TARGET_NR_munmap:
8543         return get_errno(target_munmap(arg1, arg2));
8544     case TARGET_NR_mprotect:
8545         {
8546             TaskState *ts = cpu->opaque;
8547             /* Special hack to detect libc making the stack executable.  */
8548             if ((arg3 & PROT_GROWSDOWN)
8549                 && arg1 >= ts->info->stack_limit
8550                 && arg1 <= ts->info->start_stack) {
8551                 arg3 &= ~PROT_GROWSDOWN;
8552                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8553                 arg1 = ts->info->stack_limit;
8554             }
8555         }
8556         return get_errno(target_mprotect(arg1, arg2, arg3));
8557 #ifdef TARGET_NR_mremap
8558     case TARGET_NR_mremap:
8559         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8560 #endif
8561         /* ??? msync/mlock/munlock are broken for softmmu.  */
8562 #ifdef TARGET_NR_msync
8563     case TARGET_NR_msync:
8564         return get_errno(msync(g2h(arg1), arg2, arg3));
8565 #endif
8566 #ifdef TARGET_NR_mlock
8567     case TARGET_NR_mlock:
8568         return get_errno(mlock(g2h(arg1), arg2));
8569 #endif
8570 #ifdef TARGET_NR_munlock
8571     case TARGET_NR_munlock:
8572         return get_errno(munlock(g2h(arg1), arg2));
8573 #endif
8574 #ifdef TARGET_NR_mlockall
8575     case TARGET_NR_mlockall:
8576         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8577 #endif
8578 #ifdef TARGET_NR_munlockall
8579     case TARGET_NR_munlockall:
8580         return get_errno(munlockall());
8581 #endif
8582 #ifdef TARGET_NR_truncate
8583     case TARGET_NR_truncate:
8584         if (!(p = lock_user_string(arg1)))
8585             return -TARGET_EFAULT;
8586         ret = get_errno(truncate(p, arg2));
8587         unlock_user(p, arg1, 0);
8588         return ret;
8589 #endif
8590 #ifdef TARGET_NR_ftruncate
8591     case TARGET_NR_ftruncate:
8592         return get_errno(ftruncate(arg1, arg2));
8593 #endif
8594     case TARGET_NR_fchmod:
8595         return get_errno(fchmod(arg1, arg2));
8596 #if defined(TARGET_NR_fchmodat)
8597     case TARGET_NR_fchmodat:
8598         if (!(p = lock_user_string(arg2)))
8599             return -TARGET_EFAULT;
8600         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8601         unlock_user(p, arg2, 0);
8602         return ret;
8603 #endif
8604     case TARGET_NR_getpriority:
8605         /* Note that negative values are valid for getpriority, so we must
8606            differentiate based on errno settings.  */
8607         errno = 0;
8608         ret = getpriority(arg1, arg2);
8609         if (ret == -1 && errno != 0) {
8610             return -host_to_target_errno(errno);
8611         }
8612 #ifdef TARGET_ALPHA
8613         /* Return value is the unbiased priority.  Signal no error.  */
8614         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8615 #else
8616         /* Return value is a biased priority to avoid negative numbers.  */
8617         ret = 20 - ret;
8618 #endif
8619         return ret;
8620     case TARGET_NR_setpriority:
8621         return get_errno(setpriority(arg1, arg2, arg3));
8622 #ifdef TARGET_NR_statfs
8623     case TARGET_NR_statfs:
8624         if (!(p = lock_user_string(arg1))) {
8625             return -TARGET_EFAULT;
8626         }
8627         ret = get_errno(statfs(path(p), &stfs));
8628         unlock_user(p, arg1, 0);
8629     convert_statfs:
8630         if (!is_error(ret)) {
8631             struct target_statfs *target_stfs;
8632 
8633             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8634                 return -TARGET_EFAULT;
8635             __put_user(stfs.f_type, &target_stfs->f_type);
8636             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8637             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8638             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8639             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8640             __put_user(stfs.f_files, &target_stfs->f_files);
8641             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8642             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8643             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8644             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8645             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8646 #ifdef _STATFS_F_FLAGS
8647             __put_user(stfs.f_flags, &target_stfs->f_flags);
8648 #else
8649             __put_user(0, &target_stfs->f_flags);
8650 #endif
8651             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8652             unlock_user_struct(target_stfs, arg2, 1);
8653         }
8654         return ret;
8655 #endif
8656 #ifdef TARGET_NR_fstatfs
8657     case TARGET_NR_fstatfs:
8658         ret = get_errno(fstatfs(arg1, &stfs));
8659         goto convert_statfs;
8660 #endif
8661 #ifdef TARGET_NR_statfs64
8662     case TARGET_NR_statfs64:
8663         if (!(p = lock_user_string(arg1))) {
8664             return -TARGET_EFAULT;
8665         }
8666         ret = get_errno(statfs(path(p), &stfs));
8667         unlock_user(p, arg1, 0);
8668     convert_statfs64:
8669         if (!is_error(ret)) {
8670             struct target_statfs64 *target_stfs;
8671 
8672             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8673                 return -TARGET_EFAULT;
8674             __put_user(stfs.f_type, &target_stfs->f_type);
8675             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8676             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8677             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8678             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8679             __put_user(stfs.f_files, &target_stfs->f_files);
8680             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8681             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8682             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8683             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8684             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8685             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8686             unlock_user_struct(target_stfs, arg3, 1);
8687         }
8688         return ret;
8689     case TARGET_NR_fstatfs64:
8690         ret = get_errno(fstatfs(arg1, &stfs));
8691         goto convert_statfs64;
8692 #endif
8693 #ifdef TARGET_NR_socketcall
8694     case TARGET_NR_socketcall:
8695         return do_socketcall(arg1, arg2);
8696 #endif
8697 #ifdef TARGET_NR_accept
8698     case TARGET_NR_accept:
8699         return do_accept4(arg1, arg2, arg3, 0);
8700 #endif
8701 #ifdef TARGET_NR_accept4
8702     case TARGET_NR_accept4:
8703         return do_accept4(arg1, arg2, arg3, arg4);
8704 #endif
8705 #ifdef TARGET_NR_bind
8706     case TARGET_NR_bind:
8707         return do_bind(arg1, arg2, arg3);
8708 #endif
8709 #ifdef TARGET_NR_connect
8710     case TARGET_NR_connect:
8711         return do_connect(arg1, arg2, arg3);
8712 #endif
8713 #ifdef TARGET_NR_getpeername
8714     case TARGET_NR_getpeername:
8715         return do_getpeername(arg1, arg2, arg3);
8716 #endif
8717 #ifdef TARGET_NR_getsockname
8718     case TARGET_NR_getsockname:
8719         return do_getsockname(arg1, arg2, arg3);
8720 #endif
8721 #ifdef TARGET_NR_getsockopt
8722     case TARGET_NR_getsockopt:
8723         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8724 #endif
8725 #ifdef TARGET_NR_listen
8726     case TARGET_NR_listen:
8727         return get_errno(listen(arg1, arg2));
8728 #endif
8729 #ifdef TARGET_NR_recv
8730     case TARGET_NR_recv:
8731         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8732 #endif
8733 #ifdef TARGET_NR_recvfrom
8734     case TARGET_NR_recvfrom:
8735         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8736 #endif
8737 #ifdef TARGET_NR_recvmsg
8738     case TARGET_NR_recvmsg:
8739         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8740 #endif
8741 #ifdef TARGET_NR_send
8742     case TARGET_NR_send:
8743         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8744 #endif
8745 #ifdef TARGET_NR_sendmsg
8746     case TARGET_NR_sendmsg:
8747         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8748 #endif
8749 #ifdef TARGET_NR_sendmmsg
8750     case TARGET_NR_sendmmsg:
8751         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8752     case TARGET_NR_recvmmsg:
8753         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8754 #endif
8755 #ifdef TARGET_NR_sendto
8756     case TARGET_NR_sendto:
8757         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8758 #endif
8759 #ifdef TARGET_NR_shutdown
8760     case TARGET_NR_shutdown:
8761         return get_errno(shutdown(arg1, arg2));
8762 #endif
8763 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8764     case TARGET_NR_getrandom:
8765         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8766         if (!p) {
8767             return -TARGET_EFAULT;
8768         }
8769         ret = get_errno(getrandom(p, arg2, arg3));
8770         unlock_user(p, arg1, ret);
8771         return ret;
8772 #endif
8773 #ifdef TARGET_NR_socket
8774     case TARGET_NR_socket:
8775         return do_socket(arg1, arg2, arg3);
8776 #endif
8777 #ifdef TARGET_NR_socketpair
8778     case TARGET_NR_socketpair:
8779         return do_socketpair(arg1, arg2, arg3, arg4);
8780 #endif
8781 #ifdef TARGET_NR_setsockopt
8782     case TARGET_NR_setsockopt:
8783         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8784 #endif
8785 #if defined(TARGET_NR_syslog)
8786     case TARGET_NR_syslog:
8787         {
8788             int len = arg2;
8789 
8790             switch (arg1) {
8791             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8792             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8793             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8794             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8795             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8796             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8797             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8798             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8799                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8800             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8801             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8802             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8803                 {
8804                     if (len < 0) {
8805                         return -TARGET_EINVAL;
8806                     }
8807                     if (len == 0) {
8808                         return 0;
8809                     }
8810                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8811                     if (!p) {
8812                         return -TARGET_EFAULT;
8813                     }
8814                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8815                     unlock_user(p, arg2, arg3);
8816                 }
8817                 return ret;
8818             default:
8819                 return -TARGET_EINVAL;
8820             }
8821         }
8822         break;
8823 #endif
8824     case TARGET_NR_setitimer:
8825         {
8826             struct itimerval value, ovalue, *pvalue;
8827 
8828             if (arg2) {
8829                 pvalue = &value;
8830                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8831                     || copy_from_user_timeval(&pvalue->it_value,
8832                                               arg2 + sizeof(struct target_timeval)))
8833                     return -TARGET_EFAULT;
8834             } else {
8835                 pvalue = NULL;
8836             }
8837             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8838             if (!is_error(ret) && arg3) {
8839                 if (copy_to_user_timeval(arg3,
8840                                          &ovalue.it_interval)
8841                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8842                                             &ovalue.it_value))
8843                     return -TARGET_EFAULT;
8844             }
8845         }
8846         return ret;
8847     case TARGET_NR_getitimer:
8848         {
8849             struct itimerval value;
8850 
8851             ret = get_errno(getitimer(arg1, &value));
8852             if (!is_error(ret) && arg2) {
8853                 if (copy_to_user_timeval(arg2,
8854                                          &value.it_interval)
8855                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8856                                             &value.it_value))
8857                     return -TARGET_EFAULT;
8858             }
8859         }
8860         return ret;
8861 #ifdef TARGET_NR_stat
8862     case TARGET_NR_stat:
8863         if (!(p = lock_user_string(arg1))) {
8864             return -TARGET_EFAULT;
8865         }
8866         ret = get_errno(stat(path(p), &st));
8867         unlock_user(p, arg1, 0);
8868         goto do_stat;
8869 #endif
8870 #ifdef TARGET_NR_lstat
8871     case TARGET_NR_lstat:
8872         if (!(p = lock_user_string(arg1))) {
8873             return -TARGET_EFAULT;
8874         }
8875         ret = get_errno(lstat(path(p), &st));
8876         unlock_user(p, arg1, 0);
8877         goto do_stat;
8878 #endif
8879 #ifdef TARGET_NR_fstat
8880     case TARGET_NR_fstat:
8881         {
8882             ret = get_errno(fstat(arg1, &st));
8883 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8884         do_stat:
8885 #endif
8886             if (!is_error(ret)) {
8887                 struct target_stat *target_st;
8888 
8889                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8890                     return -TARGET_EFAULT;
8891                 memset(target_st, 0, sizeof(*target_st));
8892                 __put_user(st.st_dev, &target_st->st_dev);
8893                 __put_user(st.st_ino, &target_st->st_ino);
8894                 __put_user(st.st_mode, &target_st->st_mode);
8895                 __put_user(st.st_uid, &target_st->st_uid);
8896                 __put_user(st.st_gid, &target_st->st_gid);
8897                 __put_user(st.st_nlink, &target_st->st_nlink);
8898                 __put_user(st.st_rdev, &target_st->st_rdev);
8899                 __put_user(st.st_size, &target_st->st_size);
8900                 __put_user(st.st_blksize, &target_st->st_blksize);
8901                 __put_user(st.st_blocks, &target_st->st_blocks);
8902                 __put_user(st.st_atime, &target_st->target_st_atime);
8903                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8904                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8905 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8906     defined(TARGET_STAT_HAVE_NSEC)
8907                 __put_user(st.st_atim.tv_nsec,
8908                            &target_st->target_st_atime_nsec);
8909                 __put_user(st.st_mtim.tv_nsec,
8910                            &target_st->target_st_mtime_nsec);
8911                 __put_user(st.st_ctim.tv_nsec,
8912                            &target_st->target_st_ctime_nsec);
8913 #endif
8914                 unlock_user_struct(target_st, arg2, 1);
8915             }
8916         }
8917         return ret;
8918 #endif
8919     case TARGET_NR_vhangup:
8920         return get_errno(vhangup());
8921 #ifdef TARGET_NR_syscall
8922     case TARGET_NR_syscall:
8923         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8924                           arg6, arg7, arg8, 0);
8925 #endif
8926     case TARGET_NR_wait4:
8927         {
8928             int status;
8929             abi_long status_ptr = arg2;
8930             struct rusage rusage, *rusage_ptr;
8931             abi_ulong target_rusage = arg4;
8932             abi_long rusage_err;
8933             if (target_rusage)
8934                 rusage_ptr = &rusage;
8935             else
8936                 rusage_ptr = NULL;
8937             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8938             if (!is_error(ret)) {
8939                 if (status_ptr && ret) {
8940                     status = host_to_target_waitstatus(status);
8941                     if (put_user_s32(status, status_ptr))
8942                         return -TARGET_EFAULT;
8943                 }
8944                 if (target_rusage) {
8945                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8946                     if (rusage_err) {
8947                         ret = rusage_err;
8948                     }
8949                 }
8950             }
8951         }
8952         return ret;
8953 #ifdef TARGET_NR_swapoff
8954     case TARGET_NR_swapoff:
8955         if (!(p = lock_user_string(arg1)))
8956             return -TARGET_EFAULT;
8957         ret = get_errno(swapoff(p));
8958         unlock_user(p, arg1, 0);
8959         return ret;
8960 #endif
8961     case TARGET_NR_sysinfo:
8962         {
8963             struct target_sysinfo *target_value;
8964             struct sysinfo value;
8965             ret = get_errno(sysinfo(&value));
8966             if (!is_error(ret) && arg1)
8967             {
8968                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8969                     return -TARGET_EFAULT;
8970                 __put_user(value.uptime, &target_value->uptime);
8971                 __put_user(value.loads[0], &target_value->loads[0]);
8972                 __put_user(value.loads[1], &target_value->loads[1]);
8973                 __put_user(value.loads[2], &target_value->loads[2]);
8974                 __put_user(value.totalram, &target_value->totalram);
8975                 __put_user(value.freeram, &target_value->freeram);
8976                 __put_user(value.sharedram, &target_value->sharedram);
8977                 __put_user(value.bufferram, &target_value->bufferram);
8978                 __put_user(value.totalswap, &target_value->totalswap);
8979                 __put_user(value.freeswap, &target_value->freeswap);
8980                 __put_user(value.procs, &target_value->procs);
8981                 __put_user(value.totalhigh, &target_value->totalhigh);
8982                 __put_user(value.freehigh, &target_value->freehigh);
8983                 __put_user(value.mem_unit, &target_value->mem_unit);
8984                 unlock_user_struct(target_value, arg1, 1);
8985             }
8986         }
8987         return ret;
8988 #ifdef TARGET_NR_ipc
8989     case TARGET_NR_ipc:
8990         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8991 #endif
8992 #ifdef TARGET_NR_semget
8993     case TARGET_NR_semget:
8994         return get_errno(semget(arg1, arg2, arg3));
8995 #endif
8996 #ifdef TARGET_NR_semop
8997     case TARGET_NR_semop:
8998         return do_semop(arg1, arg2, arg3);
8999 #endif
9000 #ifdef TARGET_NR_semctl
9001     case TARGET_NR_semctl:
9002         return do_semctl(arg1, arg2, arg3, arg4);
9003 #endif
9004 #ifdef TARGET_NR_msgctl
9005     case TARGET_NR_msgctl:
9006         return do_msgctl(arg1, arg2, arg3);
9007 #endif
9008 #ifdef TARGET_NR_msgget
9009     case TARGET_NR_msgget:
9010         return get_errno(msgget(arg1, arg2));
9011 #endif
9012 #ifdef TARGET_NR_msgrcv
9013     case TARGET_NR_msgrcv:
9014         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9015 #endif
9016 #ifdef TARGET_NR_msgsnd
9017     case TARGET_NR_msgsnd:
9018         return do_msgsnd(arg1, arg2, arg3, arg4);
9019 #endif
9020 #ifdef TARGET_NR_shmget
9021     case TARGET_NR_shmget:
9022         return get_errno(shmget(arg1, arg2, arg3));
9023 #endif
9024 #ifdef TARGET_NR_shmctl
9025     case TARGET_NR_shmctl:
9026         return do_shmctl(arg1, arg2, arg3);
9027 #endif
9028 #ifdef TARGET_NR_shmat
9029     case TARGET_NR_shmat:
9030         return do_shmat(cpu_env, arg1, arg2, arg3);
9031 #endif
9032 #ifdef TARGET_NR_shmdt
9033     case TARGET_NR_shmdt:
9034         return do_shmdt(arg1);
9035 #endif
9036     case TARGET_NR_fsync:
9037         return get_errno(fsync(arg1));
9038     case TARGET_NR_clone:
9039         /* Linux manages to have three different orderings for its
9040          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9041          * match the kernel's CONFIG_CLONE_* settings.
9042          * Microblaze is further special in that it uses a sixth
9043          * implicit argument to clone for the TLS pointer.
9044          */
9045 #if defined(TARGET_MICROBLAZE)
9046         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9047 #elif defined(TARGET_CLONE_BACKWARDS)
9048         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9049 #elif defined(TARGET_CLONE_BACKWARDS2)
9050         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9051 #else
9052         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9053 #endif
9054         return ret;
9055 #ifdef __NR_exit_group
9056         /* new thread calls */
9057     case TARGET_NR_exit_group:
9058         preexit_cleanup(cpu_env, arg1);
9059         return get_errno(exit_group(arg1));
9060 #endif
9061     case TARGET_NR_setdomainname:
9062         if (!(p = lock_user_string(arg1)))
9063             return -TARGET_EFAULT;
9064         ret = get_errno(setdomainname(p, arg2));
9065         unlock_user(p, arg1, 0);
9066         return ret;
9067     case TARGET_NR_uname:
9068         /* no need to transcode because we use the linux syscall */
9069         {
9070             struct new_utsname * buf;
9071 
9072             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9073                 return -TARGET_EFAULT;
9074             ret = get_errno(sys_uname(buf));
9075             if (!is_error(ret)) {
9076                 /* Overwrite the native machine name with whatever is being
9077                    emulated. */
9078                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9079                           sizeof(buf->machine));
9080                 /* Allow the user to override the reported release.  */
9081                 if (qemu_uname_release && *qemu_uname_release) {
9082                     g_strlcpy(buf->release, qemu_uname_release,
9083                               sizeof(buf->release));
9084                 }
9085             }
9086             unlock_user_struct(buf, arg1, 1);
9087         }
9088         return ret;
9089 #ifdef TARGET_I386
9090     case TARGET_NR_modify_ldt:
9091         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9092 #if !defined(TARGET_X86_64)
9093     case TARGET_NR_vm86:
9094         return do_vm86(cpu_env, arg1, arg2);
9095 #endif
9096 #endif
9097     case TARGET_NR_adjtimex:
9098         {
9099             struct timex host_buf;
9100 
9101             if (target_to_host_timex(&host_buf, arg1) != 0) {
9102                 return -TARGET_EFAULT;
9103             }
9104             ret = get_errno(adjtimex(&host_buf));
9105             if (!is_error(ret)) {
9106                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9107                     return -TARGET_EFAULT;
9108                 }
9109             }
9110         }
9111         return ret;
9112 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9113     case TARGET_NR_clock_adjtime:
9114         {
9115             struct timex htx, *phtx = &htx;
9116 
9117             if (target_to_host_timex(phtx, arg2) != 0) {
9118                 return -TARGET_EFAULT;
9119             }
9120             ret = get_errno(clock_adjtime(arg1, phtx));
9121             if (!is_error(ret) && phtx) {
9122                 if (host_to_target_timex(arg2, phtx) != 0) {
9123                     return -TARGET_EFAULT;
9124                 }
9125             }
9126         }
9127         return ret;
9128 #endif
9129     case TARGET_NR_getpgid:
9130         return get_errno(getpgid(arg1));
9131     case TARGET_NR_fchdir:
9132         return get_errno(fchdir(arg1));
9133     case TARGET_NR_personality:
9134         return get_errno(personality(arg1));
9135 #ifdef TARGET_NR__llseek /* Not on alpha */
9136     case TARGET_NR__llseek:
9137         {
9138             int64_t res;
9139 #if !defined(__NR_llseek)
9140             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9141             if (res == -1) {
9142                 ret = get_errno(res);
9143             } else {
9144                 ret = 0;
9145             }
9146 #else
9147             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9148 #endif
9149             if ((ret == 0) && put_user_s64(res, arg4)) {
9150                 return -TARGET_EFAULT;
9151             }
9152         }
9153         return ret;
9154 #endif
9155 #ifdef TARGET_NR_getdents
9156     case TARGET_NR_getdents:
9157 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9158 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9159         {
9160             struct target_dirent *target_dirp;
9161             struct linux_dirent *dirp;
9162             abi_long count = arg3;
9163 
9164             dirp = g_try_malloc(count);
9165             if (!dirp) {
9166                 return -TARGET_ENOMEM;
9167             }
9168 
9169             ret = get_errno(sys_getdents(arg1, dirp, count));
9170             if (!is_error(ret)) {
9171                 struct linux_dirent *de;
9172 		struct target_dirent *tde;
9173                 int len = ret;
9174                 int reclen, treclen;
9175 		int count1, tnamelen;
9176 
9177 		count1 = 0;
9178                 de = dirp;
9179                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9180                     return -TARGET_EFAULT;
9181 		tde = target_dirp;
9182                 while (len > 0) {
9183                     reclen = de->d_reclen;
9184                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9185                     assert(tnamelen >= 0);
9186                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9187                     assert(count1 + treclen <= count);
9188                     tde->d_reclen = tswap16(treclen);
9189                     tde->d_ino = tswapal(de->d_ino);
9190                     tde->d_off = tswapal(de->d_off);
9191                     memcpy(tde->d_name, de->d_name, tnamelen);
9192                     de = (struct linux_dirent *)((char *)de + reclen);
9193                     len -= reclen;
9194                     tde = (struct target_dirent *)((char *)tde + treclen);
9195 		    count1 += treclen;
9196                 }
9197 		ret = count1;
9198                 unlock_user(target_dirp, arg2, ret);
9199             }
9200             g_free(dirp);
9201         }
9202 #else
9203         {
9204             struct linux_dirent *dirp;
9205             abi_long count = arg3;
9206 
9207             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9208                 return -TARGET_EFAULT;
9209             ret = get_errno(sys_getdents(arg1, dirp, count));
9210             if (!is_error(ret)) {
9211                 struct linux_dirent *de;
9212                 int len = ret;
9213                 int reclen;
9214                 de = dirp;
9215                 while (len > 0) {
9216                     reclen = de->d_reclen;
9217                     if (reclen > len)
9218                         break;
9219                     de->d_reclen = tswap16(reclen);
9220                     tswapls(&de->d_ino);
9221                     tswapls(&de->d_off);
9222                     de = (struct linux_dirent *)((char *)de + reclen);
9223                     len -= reclen;
9224                 }
9225             }
9226             unlock_user(dirp, arg2, ret);
9227         }
9228 #endif
9229 #else
9230         /* Implement getdents in terms of getdents64 */
9231         {
9232             struct linux_dirent64 *dirp;
9233             abi_long count = arg3;
9234 
9235             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9236             if (!dirp) {
9237                 return -TARGET_EFAULT;
9238             }
9239             ret = get_errno(sys_getdents64(arg1, dirp, count));
9240             if (!is_error(ret)) {
9241                 /* Convert the dirent64 structs to target dirent.  We do this
9242                  * in-place, since we can guarantee that a target_dirent is no
9243                  * larger than a dirent64; however this means we have to be
9244                  * careful to read everything before writing in the new format.
9245                  */
9246                 struct linux_dirent64 *de;
9247                 struct target_dirent *tde;
9248                 int len = ret;
9249                 int tlen = 0;
9250 
9251                 de = dirp;
9252                 tde = (struct target_dirent *)dirp;
9253                 while (len > 0) {
9254                     int namelen, treclen;
9255                     int reclen = de->d_reclen;
9256                     uint64_t ino = de->d_ino;
9257                     int64_t off = de->d_off;
9258                     uint8_t type = de->d_type;
9259 
9260                     namelen = strlen(de->d_name);
9261                     treclen = offsetof(struct target_dirent, d_name)
9262                         + namelen + 2;
9263                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9264 
9265                     memmove(tde->d_name, de->d_name, namelen + 1);
9266                     tde->d_ino = tswapal(ino);
9267                     tde->d_off = tswapal(off);
9268                     tde->d_reclen = tswap16(treclen);
9269                     /* The target_dirent type is in what was formerly a padding
9270                      * byte at the end of the structure:
9271                      */
9272                     *(((char *)tde) + treclen - 1) = type;
9273 
9274                     de = (struct linux_dirent64 *)((char *)de + reclen);
9275                     tde = (struct target_dirent *)((char *)tde + treclen);
9276                     len -= reclen;
9277                     tlen += treclen;
9278                 }
9279                 ret = tlen;
9280             }
9281             unlock_user(dirp, arg2, ret);
9282         }
9283 #endif
9284         return ret;
9285 #endif /* TARGET_NR_getdents */
9286 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9287     case TARGET_NR_getdents64:
9288         {
9289             struct linux_dirent64 *dirp;
9290             abi_long count = arg3;
9291             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9292                 return -TARGET_EFAULT;
9293             ret = get_errno(sys_getdents64(arg1, dirp, count));
9294             if (!is_error(ret)) {
9295                 struct linux_dirent64 *de;
9296                 int len = ret;
9297                 int reclen;
9298                 de = dirp;
9299                 while (len > 0) {
9300                     reclen = de->d_reclen;
9301                     if (reclen > len)
9302                         break;
9303                     de->d_reclen = tswap16(reclen);
9304                     tswap64s((uint64_t *)&de->d_ino);
9305                     tswap64s((uint64_t *)&de->d_off);
9306                     de = (struct linux_dirent64 *)((char *)de + reclen);
9307                     len -= reclen;
9308                 }
9309             }
9310             unlock_user(dirp, arg2, ret);
9311         }
9312         return ret;
9313 #endif /* TARGET_NR_getdents64 */
9314 #if defined(TARGET_NR__newselect)
9315     case TARGET_NR__newselect:
9316         return do_select(arg1, arg2, arg3, arg4, arg5);
9317 #endif
9318 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9319 # ifdef TARGET_NR_poll
9320     case TARGET_NR_poll:
9321 # endif
9322 # ifdef TARGET_NR_ppoll
9323     case TARGET_NR_ppoll:
9324 # endif
9325         {
9326             struct target_pollfd *target_pfd;
9327             unsigned int nfds = arg2;
9328             struct pollfd *pfd;
9329             unsigned int i;
9330 
9331             pfd = NULL;
9332             target_pfd = NULL;
9333             if (nfds) {
9334                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9335                     return -TARGET_EINVAL;
9336                 }
9337 
9338                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9339                                        sizeof(struct target_pollfd) * nfds, 1);
9340                 if (!target_pfd) {
9341                     return -TARGET_EFAULT;
9342                 }
9343 
9344                 pfd = alloca(sizeof(struct pollfd) * nfds);
9345                 for (i = 0; i < nfds; i++) {
9346                     pfd[i].fd = tswap32(target_pfd[i].fd);
9347                     pfd[i].events = tswap16(target_pfd[i].events);
9348                 }
9349             }
9350 
9351             switch (num) {
9352 # ifdef TARGET_NR_ppoll
9353             case TARGET_NR_ppoll:
9354             {
9355                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9356                 target_sigset_t *target_set;
9357                 sigset_t _set, *set = &_set;
9358 
9359                 if (arg3) {
9360                     if (target_to_host_timespec(timeout_ts, arg3)) {
9361                         unlock_user(target_pfd, arg1, 0);
9362                         return -TARGET_EFAULT;
9363                     }
9364                 } else {
9365                     timeout_ts = NULL;
9366                 }
9367 
9368                 if (arg4) {
9369                     if (arg5 != sizeof(target_sigset_t)) {
9370                         unlock_user(target_pfd, arg1, 0);
9371                         return -TARGET_EINVAL;
9372                     }
9373 
9374                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9375                     if (!target_set) {
9376                         unlock_user(target_pfd, arg1, 0);
9377                         return -TARGET_EFAULT;
9378                     }
9379                     target_to_host_sigset(set, target_set);
9380                 } else {
9381                     set = NULL;
9382                 }
9383 
9384                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9385                                            set, SIGSET_T_SIZE));
9386 
9387                 if (!is_error(ret) && arg3) {
9388                     host_to_target_timespec(arg3, timeout_ts);
9389                 }
9390                 if (arg4) {
9391                     unlock_user(target_set, arg4, 0);
9392                 }
9393                 break;
9394             }
9395 # endif
9396 # ifdef TARGET_NR_poll
9397             case TARGET_NR_poll:
9398             {
9399                 struct timespec ts, *pts;
9400 
9401                 if (arg3 >= 0) {
9402                     /* Convert ms to secs, ns */
9403                     ts.tv_sec = arg3 / 1000;
9404                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9405                     pts = &ts;
9406                 } else {
9407                     /* -ve poll() timeout means "infinite" */
9408                     pts = NULL;
9409                 }
9410                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9411                 break;
9412             }
9413 # endif
9414             default:
9415                 g_assert_not_reached();
9416             }
9417 
9418             if (!is_error(ret)) {
9419                 for(i = 0; i < nfds; i++) {
9420                     target_pfd[i].revents = tswap16(pfd[i].revents);
9421                 }
9422             }
9423             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9424         }
9425         return ret;
9426 #endif
9427     case TARGET_NR_flock:
9428         /* NOTE: the flock constant seems to be the same for every
9429            Linux platform */
9430         return get_errno(safe_flock(arg1, arg2));
9431     case TARGET_NR_readv:
9432         {
9433             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9434             if (vec != NULL) {
9435                 ret = get_errno(safe_readv(arg1, vec, arg3));
9436                 unlock_iovec(vec, arg2, arg3, 1);
9437             } else {
9438                 ret = -host_to_target_errno(errno);
9439             }
9440         }
9441         return ret;
9442     case TARGET_NR_writev:
9443         {
9444             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9445             if (vec != NULL) {
9446                 ret = get_errno(safe_writev(arg1, vec, arg3));
9447                 unlock_iovec(vec, arg2, arg3, 0);
9448             } else {
9449                 ret = -host_to_target_errno(errno);
9450             }
9451         }
9452         return ret;
9453 #if defined(TARGET_NR_preadv)
9454     case TARGET_NR_preadv:
9455         {
9456             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9457             if (vec != NULL) {
9458                 unsigned long low, high;
9459 
9460                 target_to_host_low_high(arg4, arg5, &low, &high);
9461                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9462                 unlock_iovec(vec, arg2, arg3, 1);
9463             } else {
9464                 ret = -host_to_target_errno(errno);
9465            }
9466         }
9467         return ret;
9468 #endif
9469 #if defined(TARGET_NR_pwritev)
9470     case TARGET_NR_pwritev:
9471         {
9472             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9473             if (vec != NULL) {
9474                 unsigned long low, high;
9475 
9476                 target_to_host_low_high(arg4, arg5, &low, &high);
9477                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9478                 unlock_iovec(vec, arg2, arg3, 0);
9479             } else {
9480                 ret = -host_to_target_errno(errno);
9481            }
9482         }
9483         return ret;
9484 #endif
9485     case TARGET_NR_getsid:
9486         return get_errno(getsid(arg1));
9487 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9488     case TARGET_NR_fdatasync:
9489         return get_errno(fdatasync(arg1));
9490 #endif
9491 #ifdef TARGET_NR__sysctl
9492     case TARGET_NR__sysctl:
9493         /* We don't implement this, but ENOTDIR is always a safe
9494            return value. */
9495         return -TARGET_ENOTDIR;
9496 #endif
9497     case TARGET_NR_sched_getaffinity:
9498         {
9499             unsigned int mask_size;
9500             unsigned long *mask;
9501 
9502             /*
9503              * sched_getaffinity needs multiples of ulong, so need to take
9504              * care of mismatches between target ulong and host ulong sizes.
9505              */
9506             if (arg2 & (sizeof(abi_ulong) - 1)) {
9507                 return -TARGET_EINVAL;
9508             }
9509             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9510 
9511             mask = alloca(mask_size);
9512             memset(mask, 0, mask_size);
9513             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9514 
9515             if (!is_error(ret)) {
9516                 if (ret > arg2) {
9517                     /* More data returned than the caller's buffer will fit.
9518                      * This only happens if sizeof(abi_long) < sizeof(long)
9519                      * and the caller passed us a buffer holding an odd number
9520                      * of abi_longs. If the host kernel is actually using the
9521                      * extra 4 bytes then fail EINVAL; otherwise we can just
9522                      * ignore them and only copy the interesting part.
9523                      */
9524                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9525                     if (numcpus > arg2 * 8) {
9526                         return -TARGET_EINVAL;
9527                     }
9528                     ret = arg2;
9529                 }
9530 
9531                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9532                     return -TARGET_EFAULT;
9533                 }
9534             }
9535         }
9536         return ret;
9537     case TARGET_NR_sched_setaffinity:
9538         {
9539             unsigned int mask_size;
9540             unsigned long *mask;
9541 
9542             /*
9543              * sched_setaffinity needs multiples of ulong, so need to take
9544              * care of mismatches between target ulong and host ulong sizes.
9545              */
9546             if (arg2 & (sizeof(abi_ulong) - 1)) {
9547                 return -TARGET_EINVAL;
9548             }
9549             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9550             mask = alloca(mask_size);
9551 
9552             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9553             if (ret) {
9554                 return ret;
9555             }
9556 
9557             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9558         }
9559     case TARGET_NR_getcpu:
9560         {
9561             unsigned cpu, node;
9562             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9563                                        arg2 ? &node : NULL,
9564                                        NULL));
9565             if (is_error(ret)) {
9566                 return ret;
9567             }
9568             if (arg1 && put_user_u32(cpu, arg1)) {
9569                 return -TARGET_EFAULT;
9570             }
9571             if (arg2 && put_user_u32(node, arg2)) {
9572                 return -TARGET_EFAULT;
9573             }
9574         }
9575         return ret;
9576     case TARGET_NR_sched_setparam:
9577         {
9578             struct sched_param *target_schp;
9579             struct sched_param schp;
9580 
9581             if (arg2 == 0) {
9582                 return -TARGET_EINVAL;
9583             }
9584             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9585                 return -TARGET_EFAULT;
9586             schp.sched_priority = tswap32(target_schp->sched_priority);
9587             unlock_user_struct(target_schp, arg2, 0);
9588             return get_errno(sched_setparam(arg1, &schp));
9589         }
9590     case TARGET_NR_sched_getparam:
9591         {
9592             struct sched_param *target_schp;
9593             struct sched_param schp;
9594 
9595             if (arg2 == 0) {
9596                 return -TARGET_EINVAL;
9597             }
9598             ret = get_errno(sched_getparam(arg1, &schp));
9599             if (!is_error(ret)) {
9600                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9601                     return -TARGET_EFAULT;
9602                 target_schp->sched_priority = tswap32(schp.sched_priority);
9603                 unlock_user_struct(target_schp, arg2, 1);
9604             }
9605         }
9606         return ret;
9607     case TARGET_NR_sched_setscheduler:
9608         {
9609             struct sched_param *target_schp;
9610             struct sched_param schp;
9611             if (arg3 == 0) {
9612                 return -TARGET_EINVAL;
9613             }
9614             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9615                 return -TARGET_EFAULT;
9616             schp.sched_priority = tswap32(target_schp->sched_priority);
9617             unlock_user_struct(target_schp, arg3, 0);
9618             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9619         }
9620     case TARGET_NR_sched_getscheduler:
9621         return get_errno(sched_getscheduler(arg1));
9622     case TARGET_NR_sched_yield:
9623         return get_errno(sched_yield());
9624     case TARGET_NR_sched_get_priority_max:
9625         return get_errno(sched_get_priority_max(arg1));
9626     case TARGET_NR_sched_get_priority_min:
9627         return get_errno(sched_get_priority_min(arg1));
9628     case TARGET_NR_sched_rr_get_interval:
9629         {
9630             struct timespec ts;
9631             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9632             if (!is_error(ret)) {
9633                 ret = host_to_target_timespec(arg2, &ts);
9634             }
9635         }
9636         return ret;
9637     case TARGET_NR_nanosleep:
9638         {
9639             struct timespec req, rem;
9640             target_to_host_timespec(&req, arg1);
9641             ret = get_errno(safe_nanosleep(&req, &rem));
9642             if (is_error(ret) && arg2) {
9643                 host_to_target_timespec(arg2, &rem);
9644             }
9645         }
9646         return ret;
9647     case TARGET_NR_prctl:
9648         switch (arg1) {
9649         case PR_GET_PDEATHSIG:
9650         {
9651             int deathsig;
9652             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9653             if (!is_error(ret) && arg2
9654                 && put_user_ual(deathsig, arg2)) {
9655                 return -TARGET_EFAULT;
9656             }
9657             return ret;
9658         }
9659 #ifdef PR_GET_NAME
9660         case PR_GET_NAME:
9661         {
9662             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9663             if (!name) {
9664                 return -TARGET_EFAULT;
9665             }
9666             ret = get_errno(prctl(arg1, (unsigned long)name,
9667                                   arg3, arg4, arg5));
9668             unlock_user(name, arg2, 16);
9669             return ret;
9670         }
9671         case PR_SET_NAME:
9672         {
9673             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9674             if (!name) {
9675                 return -TARGET_EFAULT;
9676             }
9677             ret = get_errno(prctl(arg1, (unsigned long)name,
9678                                   arg3, arg4, arg5));
9679             unlock_user(name, arg2, 0);
9680             return ret;
9681         }
9682 #endif
9683 #ifdef TARGET_MIPS
9684         case TARGET_PR_GET_FP_MODE:
9685         {
9686             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9687             ret = 0;
9688             if (env->CP0_Status & (1 << CP0St_FR)) {
9689                 ret |= TARGET_PR_FP_MODE_FR;
9690             }
9691             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9692                 ret |= TARGET_PR_FP_MODE_FRE;
9693             }
9694             return ret;
9695         }
9696         case TARGET_PR_SET_FP_MODE:
9697         {
9698             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9699             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9700             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9701             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9702             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9703 
9704             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9705                                             TARGET_PR_FP_MODE_FRE;
9706 
9707             /* If nothing to change, return right away, successfully.  */
9708             if (old_fr == new_fr && old_fre == new_fre) {
9709                 return 0;
9710             }
9711             /* Check the value is valid */
9712             if (arg2 & ~known_bits) {
9713                 return -TARGET_EOPNOTSUPP;
9714             }
9715             /* Setting FRE without FR is not supported.  */
9716             if (new_fre && !new_fr) {
9717                 return -TARGET_EOPNOTSUPP;
9718             }
9719             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9720                 /* FR1 is not supported */
9721                 return -TARGET_EOPNOTSUPP;
9722             }
9723             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9724                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9725                 /* cannot set FR=0 */
9726                 return -TARGET_EOPNOTSUPP;
9727             }
9728             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9729                 /* Cannot set FRE=1 */
9730                 return -TARGET_EOPNOTSUPP;
9731             }
9732 
9733             int i;
9734             fpr_t *fpr = env->active_fpu.fpr;
9735             for (i = 0; i < 32 ; i += 2) {
9736                 if (!old_fr && new_fr) {
9737                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9738                 } else if (old_fr && !new_fr) {
9739                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9740                 }
9741             }
9742 
9743             if (new_fr) {
9744                 env->CP0_Status |= (1 << CP0St_FR);
9745                 env->hflags |= MIPS_HFLAG_F64;
9746             } else {
9747                 env->CP0_Status &= ~(1 << CP0St_FR);
9748                 env->hflags &= ~MIPS_HFLAG_F64;
9749             }
9750             if (new_fre) {
9751                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9752                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9753                     env->hflags |= MIPS_HFLAG_FRE;
9754                 }
9755             } else {
9756                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9757                 env->hflags &= ~MIPS_HFLAG_FRE;
9758             }
9759 
9760             return 0;
9761         }
9762 #endif /* MIPS */
9763 #ifdef TARGET_AARCH64
9764         case TARGET_PR_SVE_SET_VL:
9765             /*
9766              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9767              * PR_SVE_VL_INHERIT.  Note the kernel definition
9768              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9769              * even though the current architectural maximum is VQ=16.
9770              */
9771             ret = -TARGET_EINVAL;
9772             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9773                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9774                 CPUARMState *env = cpu_env;
9775                 ARMCPU *cpu = arm_env_get_cpu(env);
9776                 uint32_t vq, old_vq;
9777 
9778                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9779                 vq = MAX(arg2 / 16, 1);
9780                 vq = MIN(vq, cpu->sve_max_vq);
9781 
9782                 if (vq < old_vq) {
9783                     aarch64_sve_narrow_vq(env, vq);
9784                 }
9785                 env->vfp.zcr_el[1] = vq - 1;
9786                 ret = vq * 16;
9787             }
9788             return ret;
9789         case TARGET_PR_SVE_GET_VL:
9790             ret = -TARGET_EINVAL;
9791             {
9792                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9793                 if (cpu_isar_feature(aa64_sve, cpu)) {
9794                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9795                 }
9796             }
9797             return ret;
9798         case TARGET_PR_PAC_RESET_KEYS:
9799             {
9800                 CPUARMState *env = cpu_env;
9801                 ARMCPU *cpu = arm_env_get_cpu(env);
9802 
9803                 if (arg3 || arg4 || arg5) {
9804                     return -TARGET_EINVAL;
9805                 }
9806                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9807                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9808                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9809                                TARGET_PR_PAC_APGAKEY);
9810                     if (arg2 == 0) {
9811                         arg2 = all;
9812                     } else if (arg2 & ~all) {
9813                         return -TARGET_EINVAL;
9814                     }
9815                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9816                         arm_init_pauth_key(&env->apia_key);
9817                     }
9818                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9819                         arm_init_pauth_key(&env->apib_key);
9820                     }
9821                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9822                         arm_init_pauth_key(&env->apda_key);
9823                     }
9824                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9825                         arm_init_pauth_key(&env->apdb_key);
9826                     }
9827                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9828                         arm_init_pauth_key(&env->apga_key);
9829                     }
9830                     return 0;
9831                 }
9832             }
9833             return -TARGET_EINVAL;
9834 #endif /* AARCH64 */
9835         case PR_GET_SECCOMP:
9836         case PR_SET_SECCOMP:
9837             /* Disable seccomp to prevent the target disabling syscalls we
9838              * need. */
9839             return -TARGET_EINVAL;
9840         default:
9841             /* Most prctl options have no pointer arguments */
9842             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9843         }
9844         break;
9845 #ifdef TARGET_NR_arch_prctl
9846     case TARGET_NR_arch_prctl:
9847 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9848         return do_arch_prctl(cpu_env, arg1, arg2);
9849 #else
9850 #error unreachable
9851 #endif
9852 #endif
9853 #ifdef TARGET_NR_pread64
9854     case TARGET_NR_pread64:
9855         if (regpairs_aligned(cpu_env, num)) {
9856             arg4 = arg5;
9857             arg5 = arg6;
9858         }
9859         if (arg2 == 0 && arg3 == 0) {
9860             /* Special-case NULL buffer and zero length, which should succeed */
9861             p = 0;
9862         } else {
9863             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9864             if (!p) {
9865                 return -TARGET_EFAULT;
9866             }
9867         }
9868         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9869         unlock_user(p, arg2, ret);
9870         return ret;
9871     case TARGET_NR_pwrite64:
9872         if (regpairs_aligned(cpu_env, num)) {
9873             arg4 = arg5;
9874             arg5 = arg6;
9875         }
9876         if (arg2 == 0 && arg3 == 0) {
9877             /* Special-case NULL buffer and zero length, which should succeed */
9878             p = 0;
9879         } else {
9880             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9881             if (!p) {
9882                 return -TARGET_EFAULT;
9883             }
9884         }
9885         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9886         unlock_user(p, arg2, 0);
9887         return ret;
9888 #endif
9889     case TARGET_NR_getcwd:
9890         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9891             return -TARGET_EFAULT;
9892         ret = get_errno(sys_getcwd1(p, arg2));
9893         unlock_user(p, arg1, ret);
9894         return ret;
9895     case TARGET_NR_capget:
9896     case TARGET_NR_capset:
9897     {
9898         struct target_user_cap_header *target_header;
9899         struct target_user_cap_data *target_data = NULL;
9900         struct __user_cap_header_struct header;
9901         struct __user_cap_data_struct data[2];
9902         struct __user_cap_data_struct *dataptr = NULL;
9903         int i, target_datalen;
9904         int data_items = 1;
9905 
9906         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9907             return -TARGET_EFAULT;
9908         }
9909         header.version = tswap32(target_header->version);
9910         header.pid = tswap32(target_header->pid);
9911 
9912         if (header.version != _LINUX_CAPABILITY_VERSION) {
9913             /* Version 2 and up takes pointer to two user_data structs */
9914             data_items = 2;
9915         }
9916 
9917         target_datalen = sizeof(*target_data) * data_items;
9918 
9919         if (arg2) {
9920             if (num == TARGET_NR_capget) {
9921                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9922             } else {
9923                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9924             }
9925             if (!target_data) {
9926                 unlock_user_struct(target_header, arg1, 0);
9927                 return -TARGET_EFAULT;
9928             }
9929 
9930             if (num == TARGET_NR_capset) {
9931                 for (i = 0; i < data_items; i++) {
9932                     data[i].effective = tswap32(target_data[i].effective);
9933                     data[i].permitted = tswap32(target_data[i].permitted);
9934                     data[i].inheritable = tswap32(target_data[i].inheritable);
9935                 }
9936             }
9937 
9938             dataptr = data;
9939         }
9940 
9941         if (num == TARGET_NR_capget) {
9942             ret = get_errno(capget(&header, dataptr));
9943         } else {
9944             ret = get_errno(capset(&header, dataptr));
9945         }
9946 
9947         /* The kernel always updates version for both capget and capset */
9948         target_header->version = tswap32(header.version);
9949         unlock_user_struct(target_header, arg1, 1);
9950 
9951         if (arg2) {
9952             if (num == TARGET_NR_capget) {
9953                 for (i = 0; i < data_items; i++) {
9954                     target_data[i].effective = tswap32(data[i].effective);
9955                     target_data[i].permitted = tswap32(data[i].permitted);
9956                     target_data[i].inheritable = tswap32(data[i].inheritable);
9957                 }
9958                 unlock_user(target_data, arg2, target_datalen);
9959             } else {
9960                 unlock_user(target_data, arg2, 0);
9961             }
9962         }
9963         return ret;
9964     }
9965     case TARGET_NR_sigaltstack:
9966         return do_sigaltstack(arg1, arg2,
9967                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9968 
9969 #ifdef CONFIG_SENDFILE
9970 #ifdef TARGET_NR_sendfile
9971     case TARGET_NR_sendfile:
9972     {
9973         off_t *offp = NULL;
9974         off_t off;
9975         if (arg3) {
9976             ret = get_user_sal(off, arg3);
9977             if (is_error(ret)) {
9978                 return ret;
9979             }
9980             offp = &off;
9981         }
9982         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9983         if (!is_error(ret) && arg3) {
9984             abi_long ret2 = put_user_sal(off, arg3);
9985             if (is_error(ret2)) {
9986                 ret = ret2;
9987             }
9988         }
9989         return ret;
9990     }
9991 #endif
9992 #ifdef TARGET_NR_sendfile64
9993     case TARGET_NR_sendfile64:
9994     {
9995         off_t *offp = NULL;
9996         off_t off;
9997         if (arg3) {
9998             ret = get_user_s64(off, arg3);
9999             if (is_error(ret)) {
10000                 return ret;
10001             }
10002             offp = &off;
10003         }
10004         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10005         if (!is_error(ret) && arg3) {
10006             abi_long ret2 = put_user_s64(off, arg3);
10007             if (is_error(ret2)) {
10008                 ret = ret2;
10009             }
10010         }
10011         return ret;
10012     }
10013 #endif
10014 #endif
10015 #ifdef TARGET_NR_vfork
10016     case TARGET_NR_vfork:
10017         return get_errno(do_fork(cpu_env,
10018                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10019                          0, 0, 0, 0));
10020 #endif
10021 #ifdef TARGET_NR_ugetrlimit
10022     case TARGET_NR_ugetrlimit:
10023     {
10024 	struct rlimit rlim;
10025 	int resource = target_to_host_resource(arg1);
10026 	ret = get_errno(getrlimit(resource, &rlim));
10027 	if (!is_error(ret)) {
10028 	    struct target_rlimit *target_rlim;
10029             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10030                 return -TARGET_EFAULT;
10031 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10032 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10033             unlock_user_struct(target_rlim, arg2, 1);
10034 	}
10035         return ret;
10036     }
10037 #endif
10038 #ifdef TARGET_NR_truncate64
10039     case TARGET_NR_truncate64:
10040         if (!(p = lock_user_string(arg1)))
10041             return -TARGET_EFAULT;
10042 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10043         unlock_user(p, arg1, 0);
10044         return ret;
10045 #endif
10046 #ifdef TARGET_NR_ftruncate64
10047     case TARGET_NR_ftruncate64:
10048         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10049 #endif
10050 #ifdef TARGET_NR_stat64
10051     case TARGET_NR_stat64:
10052         if (!(p = lock_user_string(arg1))) {
10053             return -TARGET_EFAULT;
10054         }
10055         ret = get_errno(stat(path(p), &st));
10056         unlock_user(p, arg1, 0);
10057         if (!is_error(ret))
10058             ret = host_to_target_stat64(cpu_env, arg2, &st);
10059         return ret;
10060 #endif
10061 #ifdef TARGET_NR_lstat64
10062     case TARGET_NR_lstat64:
10063         if (!(p = lock_user_string(arg1))) {
10064             return -TARGET_EFAULT;
10065         }
10066         ret = get_errno(lstat(path(p), &st));
10067         unlock_user(p, arg1, 0);
10068         if (!is_error(ret))
10069             ret = host_to_target_stat64(cpu_env, arg2, &st);
10070         return ret;
10071 #endif
10072 #ifdef TARGET_NR_fstat64
10073     case TARGET_NR_fstat64:
10074         ret = get_errno(fstat(arg1, &st));
10075         if (!is_error(ret))
10076             ret = host_to_target_stat64(cpu_env, arg2, &st);
10077         return ret;
10078 #endif
10079 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10080 #ifdef TARGET_NR_fstatat64
10081     case TARGET_NR_fstatat64:
10082 #endif
10083 #ifdef TARGET_NR_newfstatat
10084     case TARGET_NR_newfstatat:
10085 #endif
10086         if (!(p = lock_user_string(arg2))) {
10087             return -TARGET_EFAULT;
10088         }
10089         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10090         unlock_user(p, arg2, 0);
10091         if (!is_error(ret))
10092             ret = host_to_target_stat64(cpu_env, arg3, &st);
10093         return ret;
10094 #endif
10095 #ifdef TARGET_NR_lchown
10096     case TARGET_NR_lchown:
10097         if (!(p = lock_user_string(arg1)))
10098             return -TARGET_EFAULT;
10099         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10100         unlock_user(p, arg1, 0);
10101         return ret;
10102 #endif
10103 #ifdef TARGET_NR_getuid
10104     case TARGET_NR_getuid:
10105         return get_errno(high2lowuid(getuid()));
10106 #endif
10107 #ifdef TARGET_NR_getgid
10108     case TARGET_NR_getgid:
10109         return get_errno(high2lowgid(getgid()));
10110 #endif
10111 #ifdef TARGET_NR_geteuid
10112     case TARGET_NR_geteuid:
10113         return get_errno(high2lowuid(geteuid()));
10114 #endif
10115 #ifdef TARGET_NR_getegid
10116     case TARGET_NR_getegid:
10117         return get_errno(high2lowgid(getegid()));
10118 #endif
10119     case TARGET_NR_setreuid:
10120         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10121     case TARGET_NR_setregid:
10122         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10123     case TARGET_NR_getgroups:
10124         {
10125             int gidsetsize = arg1;
10126             target_id *target_grouplist;
10127             gid_t *grouplist;
10128             int i;
10129 
10130             grouplist = alloca(gidsetsize * sizeof(gid_t));
10131             ret = get_errno(getgroups(gidsetsize, grouplist));
10132             if (gidsetsize == 0)
10133                 return ret;
10134             if (!is_error(ret)) {
10135                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10136                 if (!target_grouplist)
10137                     return -TARGET_EFAULT;
10138                 for(i = 0;i < ret; i++)
10139                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10140                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10141             }
10142         }
10143         return ret;
10144     case TARGET_NR_setgroups:
10145         {
10146             int gidsetsize = arg1;
10147             target_id *target_grouplist;
10148             gid_t *grouplist = NULL;
10149             int i;
10150             if (gidsetsize) {
10151                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10152                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10153                 if (!target_grouplist) {
10154                     return -TARGET_EFAULT;
10155                 }
10156                 for (i = 0; i < gidsetsize; i++) {
10157                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10158                 }
10159                 unlock_user(target_grouplist, arg2, 0);
10160             }
10161             return get_errno(setgroups(gidsetsize, grouplist));
10162         }
10163     case TARGET_NR_fchown:
10164         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10165 #if defined(TARGET_NR_fchownat)
10166     case TARGET_NR_fchownat:
10167         if (!(p = lock_user_string(arg2)))
10168             return -TARGET_EFAULT;
10169         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10170                                  low2highgid(arg4), arg5));
10171         unlock_user(p, arg2, 0);
10172         return ret;
10173 #endif
10174 #ifdef TARGET_NR_setresuid
10175     case TARGET_NR_setresuid:
10176         return get_errno(sys_setresuid(low2highuid(arg1),
10177                                        low2highuid(arg2),
10178                                        low2highuid(arg3)));
10179 #endif
10180 #ifdef TARGET_NR_getresuid
10181     case TARGET_NR_getresuid:
10182         {
10183             uid_t ruid, euid, suid;
10184             ret = get_errno(getresuid(&ruid, &euid, &suid));
10185             if (!is_error(ret)) {
10186                 if (put_user_id(high2lowuid(ruid), arg1)
10187                     || put_user_id(high2lowuid(euid), arg2)
10188                     || put_user_id(high2lowuid(suid), arg3))
10189                     return -TARGET_EFAULT;
10190             }
10191         }
10192         return ret;
10193 #endif
10194 #ifdef TARGET_NR_getresgid
10195     case TARGET_NR_setresgid:
10196         return get_errno(sys_setresgid(low2highgid(arg1),
10197                                        low2highgid(arg2),
10198                                        low2highgid(arg3)));
10199 #endif
10200 #ifdef TARGET_NR_getresgid
10201     case TARGET_NR_getresgid:
10202         {
10203             gid_t rgid, egid, sgid;
10204             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10205             if (!is_error(ret)) {
10206                 if (put_user_id(high2lowgid(rgid), arg1)
10207                     || put_user_id(high2lowgid(egid), arg2)
10208                     || put_user_id(high2lowgid(sgid), arg3))
10209                     return -TARGET_EFAULT;
10210             }
10211         }
10212         return ret;
10213 #endif
10214 #ifdef TARGET_NR_chown
10215     case TARGET_NR_chown:
10216         if (!(p = lock_user_string(arg1)))
10217             return -TARGET_EFAULT;
10218         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10219         unlock_user(p, arg1, 0);
10220         return ret;
10221 #endif
10222     case TARGET_NR_setuid:
10223         return get_errno(sys_setuid(low2highuid(arg1)));
10224     case TARGET_NR_setgid:
10225         return get_errno(sys_setgid(low2highgid(arg1)));
10226     case TARGET_NR_setfsuid:
10227         return get_errno(setfsuid(arg1));
10228     case TARGET_NR_setfsgid:
10229         return get_errno(setfsgid(arg1));
10230 
10231 #ifdef TARGET_NR_lchown32
10232     case TARGET_NR_lchown32:
10233         if (!(p = lock_user_string(arg1)))
10234             return -TARGET_EFAULT;
10235         ret = get_errno(lchown(p, arg2, arg3));
10236         unlock_user(p, arg1, 0);
10237         return ret;
10238 #endif
10239 #ifdef TARGET_NR_getuid32
10240     case TARGET_NR_getuid32:
10241         return get_errno(getuid());
10242 #endif
10243 
10244 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10245    /* Alpha specific */
10246     case TARGET_NR_getxuid:
10247          {
10248             uid_t euid;
10249             euid=geteuid();
10250             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10251          }
10252         return get_errno(getuid());
10253 #endif
10254 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10255    /* Alpha specific */
10256     case TARGET_NR_getxgid:
10257          {
10258             uid_t egid;
10259             egid=getegid();
10260             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10261          }
10262         return get_errno(getgid());
10263 #endif
10264 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10265     /* Alpha specific */
10266     case TARGET_NR_osf_getsysinfo:
10267         ret = -TARGET_EOPNOTSUPP;
10268         switch (arg1) {
10269           case TARGET_GSI_IEEE_FP_CONTROL:
10270             {
10271                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10272                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10273 
10274                 swcr &= ~SWCR_STATUS_MASK;
10275                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10276 
10277                 if (put_user_u64 (swcr, arg2))
10278                         return -TARGET_EFAULT;
10279                 ret = 0;
10280             }
10281             break;
10282 
10283           /* case GSI_IEEE_STATE_AT_SIGNAL:
10284              -- Not implemented in linux kernel.
10285              case GSI_UACPROC:
10286              -- Retrieves current unaligned access state; not much used.
10287              case GSI_PROC_TYPE:
10288              -- Retrieves implver information; surely not used.
10289              case GSI_GET_HWRPB:
10290              -- Grabs a copy of the HWRPB; surely not used.
10291           */
10292         }
10293         return ret;
10294 #endif
10295 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10296     /* Alpha specific */
10297     case TARGET_NR_osf_setsysinfo:
10298         ret = -TARGET_EOPNOTSUPP;
10299         switch (arg1) {
10300           case TARGET_SSI_IEEE_FP_CONTROL:
10301             {
10302                 uint64_t swcr, fpcr;
10303 
10304                 if (get_user_u64 (swcr, arg2)) {
10305                     return -TARGET_EFAULT;
10306                 }
10307 
10308                 /*
10309                  * The kernel calls swcr_update_status to update the
10310                  * status bits from the fpcr at every point that it
10311                  * could be queried.  Therefore, we store the status
10312                  * bits only in FPCR.
10313                  */
10314                 ((CPUAlphaState *)cpu_env)->swcr
10315                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10316 
10317                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10318                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10319                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10320                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10321                 ret = 0;
10322             }
10323             break;
10324 
10325           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10326             {
10327                 uint64_t exc, fpcr, fex;
10328 
10329                 if (get_user_u64(exc, arg2)) {
10330                     return -TARGET_EFAULT;
10331                 }
10332                 exc &= SWCR_STATUS_MASK;
10333                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10334 
10335                 /* Old exceptions are not signaled.  */
10336                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10337                 fex = exc & ~fex;
10338                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10339                 fex &= ((CPUArchState *)cpu_env)->swcr;
10340 
10341                 /* Update the hardware fpcr.  */
10342                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10343                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10344 
10345                 if (fex) {
10346                     int si_code = TARGET_FPE_FLTUNK;
10347                     target_siginfo_t info;
10348 
10349                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10350                         si_code = TARGET_FPE_FLTUND;
10351                     }
10352                     if (fex & SWCR_TRAP_ENABLE_INE) {
10353                         si_code = TARGET_FPE_FLTRES;
10354                     }
10355                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10356                         si_code = TARGET_FPE_FLTUND;
10357                     }
10358                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10359                         si_code = TARGET_FPE_FLTOVF;
10360                     }
10361                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10362                         si_code = TARGET_FPE_FLTDIV;
10363                     }
10364                     if (fex & SWCR_TRAP_ENABLE_INV) {
10365                         si_code = TARGET_FPE_FLTINV;
10366                     }
10367 
10368                     info.si_signo = SIGFPE;
10369                     info.si_errno = 0;
10370                     info.si_code = si_code;
10371                     info._sifields._sigfault._addr
10372                         = ((CPUArchState *)cpu_env)->pc;
10373                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10374                                  QEMU_SI_FAULT, &info);
10375                 }
10376                 ret = 0;
10377             }
10378             break;
10379 
10380           /* case SSI_NVPAIRS:
10381              -- Used with SSIN_UACPROC to enable unaligned accesses.
10382              case SSI_IEEE_STATE_AT_SIGNAL:
10383              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10384              -- Not implemented in linux kernel
10385           */
10386         }
10387         return ret;
10388 #endif
10389 #ifdef TARGET_NR_osf_sigprocmask
10390     /* Alpha specific.  */
10391     case TARGET_NR_osf_sigprocmask:
10392         {
10393             abi_ulong mask;
10394             int how;
10395             sigset_t set, oldset;
10396 
10397             switch(arg1) {
10398             case TARGET_SIG_BLOCK:
10399                 how = SIG_BLOCK;
10400                 break;
10401             case TARGET_SIG_UNBLOCK:
10402                 how = SIG_UNBLOCK;
10403                 break;
10404             case TARGET_SIG_SETMASK:
10405                 how = SIG_SETMASK;
10406                 break;
10407             default:
10408                 return -TARGET_EINVAL;
10409             }
10410             mask = arg2;
10411             target_to_host_old_sigset(&set, &mask);
10412             ret = do_sigprocmask(how, &set, &oldset);
10413             if (!ret) {
10414                 host_to_target_old_sigset(&mask, &oldset);
10415                 ret = mask;
10416             }
10417         }
10418         return ret;
10419 #endif
10420 
10421 #ifdef TARGET_NR_getgid32
10422     case TARGET_NR_getgid32:
10423         return get_errno(getgid());
10424 #endif
10425 #ifdef TARGET_NR_geteuid32
10426     case TARGET_NR_geteuid32:
10427         return get_errno(geteuid());
10428 #endif
10429 #ifdef TARGET_NR_getegid32
10430     case TARGET_NR_getegid32:
10431         return get_errno(getegid());
10432 #endif
10433 #ifdef TARGET_NR_setreuid32
10434     case TARGET_NR_setreuid32:
10435         return get_errno(setreuid(arg1, arg2));
10436 #endif
10437 #ifdef TARGET_NR_setregid32
10438     case TARGET_NR_setregid32:
10439         return get_errno(setregid(arg1, arg2));
10440 #endif
10441 #ifdef TARGET_NR_getgroups32
10442     case TARGET_NR_getgroups32:
10443         {
10444             int gidsetsize = arg1;
10445             uint32_t *target_grouplist;
10446             gid_t *grouplist;
10447             int i;
10448 
10449             grouplist = alloca(gidsetsize * sizeof(gid_t));
10450             ret = get_errno(getgroups(gidsetsize, grouplist));
10451             if (gidsetsize == 0)
10452                 return ret;
10453             if (!is_error(ret)) {
10454                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10455                 if (!target_grouplist) {
10456                     return -TARGET_EFAULT;
10457                 }
10458                 for(i = 0;i < ret; i++)
10459                     target_grouplist[i] = tswap32(grouplist[i]);
10460                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10461             }
10462         }
10463         return ret;
10464 #endif
10465 #ifdef TARGET_NR_setgroups32
10466     case TARGET_NR_setgroups32:
10467         {
10468             int gidsetsize = arg1;
10469             uint32_t *target_grouplist;
10470             gid_t *grouplist;
10471             int i;
10472 
10473             grouplist = alloca(gidsetsize * sizeof(gid_t));
10474             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10475             if (!target_grouplist) {
10476                 return -TARGET_EFAULT;
10477             }
10478             for(i = 0;i < gidsetsize; i++)
10479                 grouplist[i] = tswap32(target_grouplist[i]);
10480             unlock_user(target_grouplist, arg2, 0);
10481             return get_errno(setgroups(gidsetsize, grouplist));
10482         }
10483 #endif
10484 #ifdef TARGET_NR_fchown32
10485     case TARGET_NR_fchown32:
10486         return get_errno(fchown(arg1, arg2, arg3));
10487 #endif
10488 #ifdef TARGET_NR_setresuid32
10489     case TARGET_NR_setresuid32:
10490         return get_errno(sys_setresuid(arg1, arg2, arg3));
10491 #endif
10492 #ifdef TARGET_NR_getresuid32
10493     case TARGET_NR_getresuid32:
10494         {
10495             uid_t ruid, euid, suid;
10496             ret = get_errno(getresuid(&ruid, &euid, &suid));
10497             if (!is_error(ret)) {
10498                 if (put_user_u32(ruid, arg1)
10499                     || put_user_u32(euid, arg2)
10500                     || put_user_u32(suid, arg3))
10501                     return -TARGET_EFAULT;
10502             }
10503         }
10504         return ret;
10505 #endif
10506 #ifdef TARGET_NR_setresgid32
10507     case TARGET_NR_setresgid32:
10508         return get_errno(sys_setresgid(arg1, arg2, arg3));
10509 #endif
10510 #ifdef TARGET_NR_getresgid32
10511     case TARGET_NR_getresgid32:
10512         {
10513             gid_t rgid, egid, sgid;
10514             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10515             if (!is_error(ret)) {
10516                 if (put_user_u32(rgid, arg1)
10517                     || put_user_u32(egid, arg2)
10518                     || put_user_u32(sgid, arg3))
10519                     return -TARGET_EFAULT;
10520             }
10521         }
10522         return ret;
10523 #endif
10524 #ifdef TARGET_NR_chown32
10525     case TARGET_NR_chown32:
10526         if (!(p = lock_user_string(arg1)))
10527             return -TARGET_EFAULT;
10528         ret = get_errno(chown(p, arg2, arg3));
10529         unlock_user(p, arg1, 0);
10530         return ret;
10531 #endif
10532 #ifdef TARGET_NR_setuid32
10533     case TARGET_NR_setuid32:
10534         return get_errno(sys_setuid(arg1));
10535 #endif
10536 #ifdef TARGET_NR_setgid32
10537     case TARGET_NR_setgid32:
10538         return get_errno(sys_setgid(arg1));
10539 #endif
10540 #ifdef TARGET_NR_setfsuid32
10541     case TARGET_NR_setfsuid32:
10542         return get_errno(setfsuid(arg1));
10543 #endif
10544 #ifdef TARGET_NR_setfsgid32
10545     case TARGET_NR_setfsgid32:
10546         return get_errno(setfsgid(arg1));
10547 #endif
10548 #ifdef TARGET_NR_mincore
10549     case TARGET_NR_mincore:
10550         {
10551             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10552             if (!a) {
10553                 return -TARGET_ENOMEM;
10554             }
10555             p = lock_user_string(arg3);
10556             if (!p) {
10557                 ret = -TARGET_EFAULT;
10558             } else {
10559                 ret = get_errno(mincore(a, arg2, p));
10560                 unlock_user(p, arg3, ret);
10561             }
10562             unlock_user(a, arg1, 0);
10563         }
10564         return ret;
10565 #endif
10566 #ifdef TARGET_NR_arm_fadvise64_64
10567     case TARGET_NR_arm_fadvise64_64:
10568         /* arm_fadvise64_64 looks like fadvise64_64 but
10569          * with different argument order: fd, advice, offset, len
10570          * rather than the usual fd, offset, len, advice.
10571          * Note that offset and len are both 64-bit so appear as
10572          * pairs of 32-bit registers.
10573          */
10574         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10575                             target_offset64(arg5, arg6), arg2);
10576         return -host_to_target_errno(ret);
10577 #endif
10578 
10579 #if TARGET_ABI_BITS == 32
10580 
10581 #ifdef TARGET_NR_fadvise64_64
10582     case TARGET_NR_fadvise64_64:
10583 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10584         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10585         ret = arg2;
10586         arg2 = arg3;
10587         arg3 = arg4;
10588         arg4 = arg5;
10589         arg5 = arg6;
10590         arg6 = ret;
10591 #else
10592         /* 6 args: fd, offset (high, low), len (high, low), advice */
10593         if (regpairs_aligned(cpu_env, num)) {
10594             /* offset is in (3,4), len in (5,6) and advice in 7 */
10595             arg2 = arg3;
10596             arg3 = arg4;
10597             arg4 = arg5;
10598             arg5 = arg6;
10599             arg6 = arg7;
10600         }
10601 #endif
10602         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10603                             target_offset64(arg4, arg5), arg6);
10604         return -host_to_target_errno(ret);
10605 #endif
10606 
10607 #ifdef TARGET_NR_fadvise64
10608     case TARGET_NR_fadvise64:
10609         /* 5 args: fd, offset (high, low), len, advice */
10610         if (regpairs_aligned(cpu_env, num)) {
10611             /* offset is in (3,4), len in 5 and advice in 6 */
10612             arg2 = arg3;
10613             arg3 = arg4;
10614             arg4 = arg5;
10615             arg5 = arg6;
10616         }
10617         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10618         return -host_to_target_errno(ret);
10619 #endif
10620 
10621 #else /* not a 32-bit ABI */
10622 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10623 #ifdef TARGET_NR_fadvise64_64
10624     case TARGET_NR_fadvise64_64:
10625 #endif
10626 #ifdef TARGET_NR_fadvise64
10627     case TARGET_NR_fadvise64:
10628 #endif
10629 #ifdef TARGET_S390X
10630         switch (arg4) {
10631         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10632         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10633         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10634         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10635         default: break;
10636         }
10637 #endif
10638         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10639 #endif
10640 #endif /* end of 64-bit ABI fadvise handling */
10641 
10642 #ifdef TARGET_NR_madvise
10643     case TARGET_NR_madvise:
10644         /* A straight passthrough may not be safe because qemu sometimes
10645            turns private file-backed mappings into anonymous mappings.
10646            This will break MADV_DONTNEED.
10647            This is a hint, so ignoring and returning success is ok.  */
10648         return 0;
10649 #endif
10650 #if TARGET_ABI_BITS == 32
10651     case TARGET_NR_fcntl64:
10652     {
10653 	int cmd;
10654 	struct flock64 fl;
10655         from_flock64_fn *copyfrom = copy_from_user_flock64;
10656         to_flock64_fn *copyto = copy_to_user_flock64;
10657 
10658 #ifdef TARGET_ARM
10659         if (!((CPUARMState *)cpu_env)->eabi) {
10660             copyfrom = copy_from_user_oabi_flock64;
10661             copyto = copy_to_user_oabi_flock64;
10662         }
10663 #endif
10664 
10665 	cmd = target_to_host_fcntl_cmd(arg2);
10666         if (cmd == -TARGET_EINVAL) {
10667             return cmd;
10668         }
10669 
10670         switch(arg2) {
10671         case TARGET_F_GETLK64:
10672             ret = copyfrom(&fl, arg3);
10673             if (ret) {
10674                 break;
10675             }
10676             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10677             if (ret == 0) {
10678                 ret = copyto(arg3, &fl);
10679             }
10680 	    break;
10681 
10682         case TARGET_F_SETLK64:
10683         case TARGET_F_SETLKW64:
10684             ret = copyfrom(&fl, arg3);
10685             if (ret) {
10686                 break;
10687             }
10688             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10689 	    break;
10690         default:
10691             ret = do_fcntl(arg1, arg2, arg3);
10692             break;
10693         }
10694         return ret;
10695     }
10696 #endif
10697 #ifdef TARGET_NR_cacheflush
10698     case TARGET_NR_cacheflush:
10699         /* self-modifying code is handled automatically, so nothing needed */
10700         return 0;
10701 #endif
10702 #ifdef TARGET_NR_getpagesize
10703     case TARGET_NR_getpagesize:
10704         return TARGET_PAGE_SIZE;
10705 #endif
10706     case TARGET_NR_gettid:
10707         return get_errno(sys_gettid());
10708 #ifdef TARGET_NR_readahead
10709     case TARGET_NR_readahead:
10710 #if TARGET_ABI_BITS == 32
10711         if (regpairs_aligned(cpu_env, num)) {
10712             arg2 = arg3;
10713             arg3 = arg4;
10714             arg4 = arg5;
10715         }
10716         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10717 #else
10718         ret = get_errno(readahead(arg1, arg2, arg3));
10719 #endif
10720         return ret;
10721 #endif
10722 #ifdef CONFIG_ATTR
10723 #ifdef TARGET_NR_setxattr
10724     case TARGET_NR_listxattr:
10725     case TARGET_NR_llistxattr:
10726     {
10727         void *p, *b = 0;
10728         if (arg2) {
10729             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10730             if (!b) {
10731                 return -TARGET_EFAULT;
10732             }
10733         }
10734         p = lock_user_string(arg1);
10735         if (p) {
10736             if (num == TARGET_NR_listxattr) {
10737                 ret = get_errno(listxattr(p, b, arg3));
10738             } else {
10739                 ret = get_errno(llistxattr(p, b, arg3));
10740             }
10741         } else {
10742             ret = -TARGET_EFAULT;
10743         }
10744         unlock_user(p, arg1, 0);
10745         unlock_user(b, arg2, arg3);
10746         return ret;
10747     }
10748     case TARGET_NR_flistxattr:
10749     {
10750         void *b = 0;
10751         if (arg2) {
10752             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10753             if (!b) {
10754                 return -TARGET_EFAULT;
10755             }
10756         }
10757         ret = get_errno(flistxattr(arg1, b, arg3));
10758         unlock_user(b, arg2, arg3);
10759         return ret;
10760     }
10761     case TARGET_NR_setxattr:
10762     case TARGET_NR_lsetxattr:
10763         {
10764             void *p, *n, *v = 0;
10765             if (arg3) {
10766                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10767                 if (!v) {
10768                     return -TARGET_EFAULT;
10769                 }
10770             }
10771             p = lock_user_string(arg1);
10772             n = lock_user_string(arg2);
10773             if (p && n) {
10774                 if (num == TARGET_NR_setxattr) {
10775                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10776                 } else {
10777                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10778                 }
10779             } else {
10780                 ret = -TARGET_EFAULT;
10781             }
10782             unlock_user(p, arg1, 0);
10783             unlock_user(n, arg2, 0);
10784             unlock_user(v, arg3, 0);
10785         }
10786         return ret;
10787     case TARGET_NR_fsetxattr:
10788         {
10789             void *n, *v = 0;
10790             if (arg3) {
10791                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10792                 if (!v) {
10793                     return -TARGET_EFAULT;
10794                 }
10795             }
10796             n = lock_user_string(arg2);
10797             if (n) {
10798                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10799             } else {
10800                 ret = -TARGET_EFAULT;
10801             }
10802             unlock_user(n, arg2, 0);
10803             unlock_user(v, arg3, 0);
10804         }
10805         return ret;
10806     case TARGET_NR_getxattr:
10807     case TARGET_NR_lgetxattr:
10808         {
10809             void *p, *n, *v = 0;
10810             if (arg3) {
10811                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10812                 if (!v) {
10813                     return -TARGET_EFAULT;
10814                 }
10815             }
10816             p = lock_user_string(arg1);
10817             n = lock_user_string(arg2);
10818             if (p && n) {
10819                 if (num == TARGET_NR_getxattr) {
10820                     ret = get_errno(getxattr(p, n, v, arg4));
10821                 } else {
10822                     ret = get_errno(lgetxattr(p, n, v, arg4));
10823                 }
10824             } else {
10825                 ret = -TARGET_EFAULT;
10826             }
10827             unlock_user(p, arg1, 0);
10828             unlock_user(n, arg2, 0);
10829             unlock_user(v, arg3, arg4);
10830         }
10831         return ret;
10832     case TARGET_NR_fgetxattr:
10833         {
10834             void *n, *v = 0;
10835             if (arg3) {
10836                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10837                 if (!v) {
10838                     return -TARGET_EFAULT;
10839                 }
10840             }
10841             n = lock_user_string(arg2);
10842             if (n) {
10843                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10844             } else {
10845                 ret = -TARGET_EFAULT;
10846             }
10847             unlock_user(n, arg2, 0);
10848             unlock_user(v, arg3, arg4);
10849         }
10850         return ret;
10851     case TARGET_NR_removexattr:
10852     case TARGET_NR_lremovexattr:
10853         {
10854             void *p, *n;
10855             p = lock_user_string(arg1);
10856             n = lock_user_string(arg2);
10857             if (p && n) {
10858                 if (num == TARGET_NR_removexattr) {
10859                     ret = get_errno(removexattr(p, n));
10860                 } else {
10861                     ret = get_errno(lremovexattr(p, n));
10862                 }
10863             } else {
10864                 ret = -TARGET_EFAULT;
10865             }
10866             unlock_user(p, arg1, 0);
10867             unlock_user(n, arg2, 0);
10868         }
10869         return ret;
10870     case TARGET_NR_fremovexattr:
10871         {
10872             void *n;
10873             n = lock_user_string(arg2);
10874             if (n) {
10875                 ret = get_errno(fremovexattr(arg1, n));
10876             } else {
10877                 ret = -TARGET_EFAULT;
10878             }
10879             unlock_user(n, arg2, 0);
10880         }
10881         return ret;
10882 #endif
10883 #endif /* CONFIG_ATTR */
10884 #ifdef TARGET_NR_set_thread_area
10885     case TARGET_NR_set_thread_area:
10886 #if defined(TARGET_MIPS)
10887       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10888       return 0;
10889 #elif defined(TARGET_CRIS)
10890       if (arg1 & 0xff)
10891           ret = -TARGET_EINVAL;
10892       else {
10893           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10894           ret = 0;
10895       }
10896       return ret;
10897 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10898       return do_set_thread_area(cpu_env, arg1);
10899 #elif defined(TARGET_M68K)
10900       {
10901           TaskState *ts = cpu->opaque;
10902           ts->tp_value = arg1;
10903           return 0;
10904       }
10905 #else
10906       return -TARGET_ENOSYS;
10907 #endif
10908 #endif
10909 #ifdef TARGET_NR_get_thread_area
10910     case TARGET_NR_get_thread_area:
10911 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10912         return do_get_thread_area(cpu_env, arg1);
10913 #elif defined(TARGET_M68K)
10914         {
10915             TaskState *ts = cpu->opaque;
10916             return ts->tp_value;
10917         }
10918 #else
10919         return -TARGET_ENOSYS;
10920 #endif
10921 #endif
10922 #ifdef TARGET_NR_getdomainname
10923     case TARGET_NR_getdomainname:
10924         return -TARGET_ENOSYS;
10925 #endif
10926 
10927 #ifdef TARGET_NR_clock_settime
10928     case TARGET_NR_clock_settime:
10929     {
10930         struct timespec ts;
10931 
10932         ret = target_to_host_timespec(&ts, arg2);
10933         if (!is_error(ret)) {
10934             ret = get_errno(clock_settime(arg1, &ts));
10935         }
10936         return ret;
10937     }
10938 #endif
10939 #ifdef TARGET_NR_clock_gettime
10940     case TARGET_NR_clock_gettime:
10941     {
10942         struct timespec ts;
10943         ret = get_errno(clock_gettime(arg1, &ts));
10944         if (!is_error(ret)) {
10945             ret = host_to_target_timespec(arg2, &ts);
10946         }
10947         return ret;
10948     }
10949 #endif
10950 #ifdef TARGET_NR_clock_getres
10951     case TARGET_NR_clock_getres:
10952     {
10953         struct timespec ts;
10954         ret = get_errno(clock_getres(arg1, &ts));
10955         if (!is_error(ret)) {
10956             host_to_target_timespec(arg2, &ts);
10957         }
10958         return ret;
10959     }
10960 #endif
10961 #ifdef TARGET_NR_clock_nanosleep
10962     case TARGET_NR_clock_nanosleep:
10963     {
10964         struct timespec ts;
10965         target_to_host_timespec(&ts, arg3);
10966         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10967                                              &ts, arg4 ? &ts : NULL));
10968         if (arg4)
10969             host_to_target_timespec(arg4, &ts);
10970 
10971 #if defined(TARGET_PPC)
10972         /* clock_nanosleep is odd in that it returns positive errno values.
10973          * On PPC, CR0 bit 3 should be set in such a situation. */
10974         if (ret && ret != -TARGET_ERESTARTSYS) {
10975             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10976         }
10977 #endif
10978         return ret;
10979     }
10980 #endif
10981 
10982 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10983     case TARGET_NR_set_tid_address:
10984         return get_errno(set_tid_address((int *)g2h(arg1)));
10985 #endif
10986 
10987     case TARGET_NR_tkill:
10988         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10989 
10990     case TARGET_NR_tgkill:
10991         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10992                          target_to_host_signal(arg3)));
10993 
10994 #ifdef TARGET_NR_set_robust_list
10995     case TARGET_NR_set_robust_list:
10996     case TARGET_NR_get_robust_list:
10997         /* The ABI for supporting robust futexes has userspace pass
10998          * the kernel a pointer to a linked list which is updated by
10999          * userspace after the syscall; the list is walked by the kernel
11000          * when the thread exits. Since the linked list in QEMU guest
11001          * memory isn't a valid linked list for the host and we have
11002          * no way to reliably intercept the thread-death event, we can't
11003          * support these. Silently return ENOSYS so that guest userspace
11004          * falls back to a non-robust futex implementation (which should
11005          * be OK except in the corner case of the guest crashing while
11006          * holding a mutex that is shared with another process via
11007          * shared memory).
11008          */
11009         return -TARGET_ENOSYS;
11010 #endif
11011 
11012 #if defined(TARGET_NR_utimensat)
11013     case TARGET_NR_utimensat:
11014         {
11015             struct timespec *tsp, ts[2];
11016             if (!arg3) {
11017                 tsp = NULL;
11018             } else {
11019                 target_to_host_timespec(ts, arg3);
11020                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11021                 tsp = ts;
11022             }
11023             if (!arg2)
11024                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11025             else {
11026                 if (!(p = lock_user_string(arg2))) {
11027                     return -TARGET_EFAULT;
11028                 }
11029                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11030                 unlock_user(p, arg2, 0);
11031             }
11032         }
11033         return ret;
11034 #endif
11035     case TARGET_NR_futex:
11036         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11037 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11038     case TARGET_NR_inotify_init:
11039         ret = get_errno(sys_inotify_init());
11040         if (ret >= 0) {
11041             fd_trans_register(ret, &target_inotify_trans);
11042         }
11043         return ret;
11044 #endif
11045 #ifdef CONFIG_INOTIFY1
11046 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11047     case TARGET_NR_inotify_init1:
11048         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11049                                           fcntl_flags_tbl)));
11050         if (ret >= 0) {
11051             fd_trans_register(ret, &target_inotify_trans);
11052         }
11053         return ret;
11054 #endif
11055 #endif
11056 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11057     case TARGET_NR_inotify_add_watch:
11058         p = lock_user_string(arg2);
11059         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11060         unlock_user(p, arg2, 0);
11061         return ret;
11062 #endif
11063 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11064     case TARGET_NR_inotify_rm_watch:
11065         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11066 #endif
11067 
11068 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11069     case TARGET_NR_mq_open:
11070         {
11071             struct mq_attr posix_mq_attr;
11072             struct mq_attr *pposix_mq_attr;
11073             int host_flags;
11074 
11075             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11076             pposix_mq_attr = NULL;
11077             if (arg4) {
11078                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11079                     return -TARGET_EFAULT;
11080                 }
11081                 pposix_mq_attr = &posix_mq_attr;
11082             }
11083             p = lock_user_string(arg1 - 1);
11084             if (!p) {
11085                 return -TARGET_EFAULT;
11086             }
11087             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11088             unlock_user (p, arg1, 0);
11089         }
11090         return ret;
11091 
11092     case TARGET_NR_mq_unlink:
11093         p = lock_user_string(arg1 - 1);
11094         if (!p) {
11095             return -TARGET_EFAULT;
11096         }
11097         ret = get_errno(mq_unlink(p));
11098         unlock_user (p, arg1, 0);
11099         return ret;
11100 
11101     case TARGET_NR_mq_timedsend:
11102         {
11103             struct timespec ts;
11104 
11105             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11106             if (arg5 != 0) {
11107                 target_to_host_timespec(&ts, arg5);
11108                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11109                 host_to_target_timespec(arg5, &ts);
11110             } else {
11111                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11112             }
11113             unlock_user (p, arg2, arg3);
11114         }
11115         return ret;
11116 
11117     case TARGET_NR_mq_timedreceive:
11118         {
11119             struct timespec ts;
11120             unsigned int prio;
11121 
11122             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11123             if (arg5 != 0) {
11124                 target_to_host_timespec(&ts, arg5);
11125                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11126                                                      &prio, &ts));
11127                 host_to_target_timespec(arg5, &ts);
11128             } else {
11129                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11130                                                      &prio, NULL));
11131             }
11132             unlock_user (p, arg2, arg3);
11133             if (arg4 != 0)
11134                 put_user_u32(prio, arg4);
11135         }
11136         return ret;
11137 
11138     /* Not implemented for now... */
11139 /*     case TARGET_NR_mq_notify: */
11140 /*         break; */
11141 
11142     case TARGET_NR_mq_getsetattr:
11143         {
11144             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11145             ret = 0;
11146             if (arg2 != 0) {
11147                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11148                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11149                                            &posix_mq_attr_out));
11150             } else if (arg3 != 0) {
11151                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11152             }
11153             if (ret == 0 && arg3 != 0) {
11154                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11155             }
11156         }
11157         return ret;
11158 #endif
11159 
11160 #ifdef CONFIG_SPLICE
11161 #ifdef TARGET_NR_tee
11162     case TARGET_NR_tee:
11163         {
11164             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11165         }
11166         return ret;
11167 #endif
11168 #ifdef TARGET_NR_splice
11169     case TARGET_NR_splice:
11170         {
11171             loff_t loff_in, loff_out;
11172             loff_t *ploff_in = NULL, *ploff_out = NULL;
11173             if (arg2) {
11174                 if (get_user_u64(loff_in, arg2)) {
11175                     return -TARGET_EFAULT;
11176                 }
11177                 ploff_in = &loff_in;
11178             }
11179             if (arg4) {
11180                 if (get_user_u64(loff_out, arg4)) {
11181                     return -TARGET_EFAULT;
11182                 }
11183                 ploff_out = &loff_out;
11184             }
11185             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11186             if (arg2) {
11187                 if (put_user_u64(loff_in, arg2)) {
11188                     return -TARGET_EFAULT;
11189                 }
11190             }
11191             if (arg4) {
11192                 if (put_user_u64(loff_out, arg4)) {
11193                     return -TARGET_EFAULT;
11194                 }
11195             }
11196         }
11197         return ret;
11198 #endif
11199 #ifdef TARGET_NR_vmsplice
11200 	case TARGET_NR_vmsplice:
11201         {
11202             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11203             if (vec != NULL) {
11204                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11205                 unlock_iovec(vec, arg2, arg3, 0);
11206             } else {
11207                 ret = -host_to_target_errno(errno);
11208             }
11209         }
11210         return ret;
11211 #endif
11212 #endif /* CONFIG_SPLICE */
11213 #ifdef CONFIG_EVENTFD
11214 #if defined(TARGET_NR_eventfd)
11215     case TARGET_NR_eventfd:
11216         ret = get_errno(eventfd(arg1, 0));
11217         if (ret >= 0) {
11218             fd_trans_register(ret, &target_eventfd_trans);
11219         }
11220         return ret;
11221 #endif
11222 #if defined(TARGET_NR_eventfd2)
11223     case TARGET_NR_eventfd2:
11224     {
11225         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11226         if (arg2 & TARGET_O_NONBLOCK) {
11227             host_flags |= O_NONBLOCK;
11228         }
11229         if (arg2 & TARGET_O_CLOEXEC) {
11230             host_flags |= O_CLOEXEC;
11231         }
11232         ret = get_errno(eventfd(arg1, host_flags));
11233         if (ret >= 0) {
11234             fd_trans_register(ret, &target_eventfd_trans);
11235         }
11236         return ret;
11237     }
11238 #endif
11239 #endif /* CONFIG_EVENTFD  */
11240 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11241     case TARGET_NR_fallocate:
11242 #if TARGET_ABI_BITS == 32
11243         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11244                                   target_offset64(arg5, arg6)));
11245 #else
11246         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11247 #endif
11248         return ret;
11249 #endif
11250 #if defined(CONFIG_SYNC_FILE_RANGE)
11251 #if defined(TARGET_NR_sync_file_range)
11252     case TARGET_NR_sync_file_range:
11253 #if TARGET_ABI_BITS == 32
11254 #if defined(TARGET_MIPS)
11255         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11256                                         target_offset64(arg5, arg6), arg7));
11257 #else
11258         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11259                                         target_offset64(arg4, arg5), arg6));
11260 #endif /* !TARGET_MIPS */
11261 #else
11262         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11263 #endif
11264         return ret;
11265 #endif
11266 #if defined(TARGET_NR_sync_file_range2)
11267     case TARGET_NR_sync_file_range2:
11268         /* This is like sync_file_range but the arguments are reordered */
11269 #if TARGET_ABI_BITS == 32
11270         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11271                                         target_offset64(arg5, arg6), arg2));
11272 #else
11273         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11274 #endif
11275         return ret;
11276 #endif
11277 #endif
11278 #if defined(TARGET_NR_signalfd4)
11279     case TARGET_NR_signalfd4:
11280         return do_signalfd4(arg1, arg2, arg4);
11281 #endif
11282 #if defined(TARGET_NR_signalfd)
11283     case TARGET_NR_signalfd:
11284         return do_signalfd4(arg1, arg2, 0);
11285 #endif
11286 #if defined(CONFIG_EPOLL)
11287 #if defined(TARGET_NR_epoll_create)
11288     case TARGET_NR_epoll_create:
11289         return get_errno(epoll_create(arg1));
11290 #endif
11291 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11292     case TARGET_NR_epoll_create1:
11293         return get_errno(epoll_create1(arg1));
11294 #endif
11295 #if defined(TARGET_NR_epoll_ctl)
11296     case TARGET_NR_epoll_ctl:
11297     {
11298         struct epoll_event ep;
11299         struct epoll_event *epp = 0;
11300         if (arg4) {
11301             struct target_epoll_event *target_ep;
11302             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11303                 return -TARGET_EFAULT;
11304             }
11305             ep.events = tswap32(target_ep->events);
11306             /* The epoll_data_t union is just opaque data to the kernel,
11307              * so we transfer all 64 bits across and need not worry what
11308              * actual data type it is.
11309              */
11310             ep.data.u64 = tswap64(target_ep->data.u64);
11311             unlock_user_struct(target_ep, arg4, 0);
11312             epp = &ep;
11313         }
11314         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11315     }
11316 #endif
11317 
11318 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11319 #if defined(TARGET_NR_epoll_wait)
11320     case TARGET_NR_epoll_wait:
11321 #endif
11322 #if defined(TARGET_NR_epoll_pwait)
11323     case TARGET_NR_epoll_pwait:
11324 #endif
11325     {
11326         struct target_epoll_event *target_ep;
11327         struct epoll_event *ep;
11328         int epfd = arg1;
11329         int maxevents = arg3;
11330         int timeout = arg4;
11331 
11332         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11333             return -TARGET_EINVAL;
11334         }
11335 
11336         target_ep = lock_user(VERIFY_WRITE, arg2,
11337                               maxevents * sizeof(struct target_epoll_event), 1);
11338         if (!target_ep) {
11339             return -TARGET_EFAULT;
11340         }
11341 
11342         ep = g_try_new(struct epoll_event, maxevents);
11343         if (!ep) {
11344             unlock_user(target_ep, arg2, 0);
11345             return -TARGET_ENOMEM;
11346         }
11347 
11348         switch (num) {
11349 #if defined(TARGET_NR_epoll_pwait)
11350         case TARGET_NR_epoll_pwait:
11351         {
11352             target_sigset_t *target_set;
11353             sigset_t _set, *set = &_set;
11354 
11355             if (arg5) {
11356                 if (arg6 != sizeof(target_sigset_t)) {
11357                     ret = -TARGET_EINVAL;
11358                     break;
11359                 }
11360 
11361                 target_set = lock_user(VERIFY_READ, arg5,
11362                                        sizeof(target_sigset_t), 1);
11363                 if (!target_set) {
11364                     ret = -TARGET_EFAULT;
11365                     break;
11366                 }
11367                 target_to_host_sigset(set, target_set);
11368                 unlock_user(target_set, arg5, 0);
11369             } else {
11370                 set = NULL;
11371             }
11372 
11373             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11374                                              set, SIGSET_T_SIZE));
11375             break;
11376         }
11377 #endif
11378 #if defined(TARGET_NR_epoll_wait)
11379         case TARGET_NR_epoll_wait:
11380             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11381                                              NULL, 0));
11382             break;
11383 #endif
11384         default:
11385             ret = -TARGET_ENOSYS;
11386         }
11387         if (!is_error(ret)) {
11388             int i;
11389             for (i = 0; i < ret; i++) {
11390                 target_ep[i].events = tswap32(ep[i].events);
11391                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11392             }
11393             unlock_user(target_ep, arg2,
11394                         ret * sizeof(struct target_epoll_event));
11395         } else {
11396             unlock_user(target_ep, arg2, 0);
11397         }
11398         g_free(ep);
11399         return ret;
11400     }
11401 #endif
11402 #endif
11403 #ifdef TARGET_NR_prlimit64
11404     case TARGET_NR_prlimit64:
11405     {
11406         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11407         struct target_rlimit64 *target_rnew, *target_rold;
11408         struct host_rlimit64 rnew, rold, *rnewp = 0;
11409         int resource = target_to_host_resource(arg2);
11410         if (arg3) {
11411             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11412                 return -TARGET_EFAULT;
11413             }
11414             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11415             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11416             unlock_user_struct(target_rnew, arg3, 0);
11417             rnewp = &rnew;
11418         }
11419 
11420         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11421         if (!is_error(ret) && arg4) {
11422             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11423                 return -TARGET_EFAULT;
11424             }
11425             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11426             target_rold->rlim_max = tswap64(rold.rlim_max);
11427             unlock_user_struct(target_rold, arg4, 1);
11428         }
11429         return ret;
11430     }
11431 #endif
11432 #ifdef TARGET_NR_gethostname
11433     case TARGET_NR_gethostname:
11434     {
11435         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11436         if (name) {
11437             ret = get_errno(gethostname(name, arg2));
11438             unlock_user(name, arg1, arg2);
11439         } else {
11440             ret = -TARGET_EFAULT;
11441         }
11442         return ret;
11443     }
11444 #endif
11445 #ifdef TARGET_NR_atomic_cmpxchg_32
11446     case TARGET_NR_atomic_cmpxchg_32:
11447     {
11448         /* should use start_exclusive from main.c */
11449         abi_ulong mem_value;
11450         if (get_user_u32(mem_value, arg6)) {
11451             target_siginfo_t info;
11452             info.si_signo = SIGSEGV;
11453             info.si_errno = 0;
11454             info.si_code = TARGET_SEGV_MAPERR;
11455             info._sifields._sigfault._addr = arg6;
11456             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11457                          QEMU_SI_FAULT, &info);
11458             ret = 0xdeadbeef;
11459 
11460         }
11461         if (mem_value == arg2)
11462             put_user_u32(arg1, arg6);
11463         return mem_value;
11464     }
11465 #endif
11466 #ifdef TARGET_NR_atomic_barrier
11467     case TARGET_NR_atomic_barrier:
11468         /* Like the kernel implementation and the
11469            qemu arm barrier, no-op this? */
11470         return 0;
11471 #endif
11472 
11473 #ifdef TARGET_NR_timer_create
11474     case TARGET_NR_timer_create:
11475     {
11476         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11477 
11478         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11479 
11480         int clkid = arg1;
11481         int timer_index = next_free_host_timer();
11482 
11483         if (timer_index < 0) {
11484             ret = -TARGET_EAGAIN;
11485         } else {
11486             timer_t *phtimer = g_posix_timers  + timer_index;
11487 
11488             if (arg2) {
11489                 phost_sevp = &host_sevp;
11490                 ret = target_to_host_sigevent(phost_sevp, arg2);
11491                 if (ret != 0) {
11492                     return ret;
11493                 }
11494             }
11495 
11496             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11497             if (ret) {
11498                 phtimer = NULL;
11499             } else {
11500                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11501                     return -TARGET_EFAULT;
11502                 }
11503             }
11504         }
11505         return ret;
11506     }
11507 #endif
11508 
11509 #ifdef TARGET_NR_timer_settime
11510     case TARGET_NR_timer_settime:
11511     {
11512         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11513          * struct itimerspec * old_value */
11514         target_timer_t timerid = get_timer_id(arg1);
11515 
11516         if (timerid < 0) {
11517             ret = timerid;
11518         } else if (arg3 == 0) {
11519             ret = -TARGET_EINVAL;
11520         } else {
11521             timer_t htimer = g_posix_timers[timerid];
11522             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11523 
11524             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11525                 return -TARGET_EFAULT;
11526             }
11527             ret = get_errno(
11528                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11529             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11530                 return -TARGET_EFAULT;
11531             }
11532         }
11533         return ret;
11534     }
11535 #endif
11536 
11537 #ifdef TARGET_NR_timer_gettime
11538     case TARGET_NR_timer_gettime:
11539     {
11540         /* args: timer_t timerid, struct itimerspec *curr_value */
11541         target_timer_t timerid = get_timer_id(arg1);
11542 
11543         if (timerid < 0) {
11544             ret = timerid;
11545         } else if (!arg2) {
11546             ret = -TARGET_EFAULT;
11547         } else {
11548             timer_t htimer = g_posix_timers[timerid];
11549             struct itimerspec hspec;
11550             ret = get_errno(timer_gettime(htimer, &hspec));
11551 
11552             if (host_to_target_itimerspec(arg2, &hspec)) {
11553                 ret = -TARGET_EFAULT;
11554             }
11555         }
11556         return ret;
11557     }
11558 #endif
11559 
11560 #ifdef TARGET_NR_timer_getoverrun
11561     case TARGET_NR_timer_getoverrun:
11562     {
11563         /* args: timer_t timerid */
11564         target_timer_t timerid = get_timer_id(arg1);
11565 
11566         if (timerid < 0) {
11567             ret = timerid;
11568         } else {
11569             timer_t htimer = g_posix_timers[timerid];
11570             ret = get_errno(timer_getoverrun(htimer));
11571         }
11572         fd_trans_unregister(ret);
11573         return ret;
11574     }
11575 #endif
11576 
11577 #ifdef TARGET_NR_timer_delete
11578     case TARGET_NR_timer_delete:
11579     {
11580         /* args: timer_t timerid */
11581         target_timer_t timerid = get_timer_id(arg1);
11582 
11583         if (timerid < 0) {
11584             ret = timerid;
11585         } else {
11586             timer_t htimer = g_posix_timers[timerid];
11587             ret = get_errno(timer_delete(htimer));
11588             g_posix_timers[timerid] = 0;
11589         }
11590         return ret;
11591     }
11592 #endif
11593 
11594 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11595     case TARGET_NR_timerfd_create:
11596         return get_errno(timerfd_create(arg1,
11597                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11598 #endif
11599 
11600 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11601     case TARGET_NR_timerfd_gettime:
11602         {
11603             struct itimerspec its_curr;
11604 
11605             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11606 
11607             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11608                 return -TARGET_EFAULT;
11609             }
11610         }
11611         return ret;
11612 #endif
11613 
11614 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11615     case TARGET_NR_timerfd_settime:
11616         {
11617             struct itimerspec its_new, its_old, *p_new;
11618 
11619             if (arg3) {
11620                 if (target_to_host_itimerspec(&its_new, arg3)) {
11621                     return -TARGET_EFAULT;
11622                 }
11623                 p_new = &its_new;
11624             } else {
11625                 p_new = NULL;
11626             }
11627 
11628             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11629 
11630             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11631                 return -TARGET_EFAULT;
11632             }
11633         }
11634         return ret;
11635 #endif
11636 
11637 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11638     case TARGET_NR_ioprio_get:
11639         return get_errno(ioprio_get(arg1, arg2));
11640 #endif
11641 
11642 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11643     case TARGET_NR_ioprio_set:
11644         return get_errno(ioprio_set(arg1, arg2, arg3));
11645 #endif
11646 
11647 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11648     case TARGET_NR_setns:
11649         return get_errno(setns(arg1, arg2));
11650 #endif
11651 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11652     case TARGET_NR_unshare:
11653         return get_errno(unshare(arg1));
11654 #endif
11655 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11656     case TARGET_NR_kcmp:
11657         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11658 #endif
11659 #ifdef TARGET_NR_swapcontext
11660     case TARGET_NR_swapcontext:
11661         /* PowerPC specific.  */
11662         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11663 #endif
11664 
11665     default:
11666         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11667         return -TARGET_ENOSYS;
11668     }
11669     return ret;
11670 }
11671 
11672 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11673                     abi_long arg2, abi_long arg3, abi_long arg4,
11674                     abi_long arg5, abi_long arg6, abi_long arg7,
11675                     abi_long arg8)
11676 {
11677     CPUState *cpu = ENV_GET_CPU(cpu_env);
11678     abi_long ret;
11679 
11680 #ifdef DEBUG_ERESTARTSYS
11681     /* Debug-only code for exercising the syscall-restart code paths
11682      * in the per-architecture cpu main loops: restart every syscall
11683      * the guest makes once before letting it through.
11684      */
11685     {
11686         static bool flag;
11687         flag = !flag;
11688         if (flag) {
11689             return -TARGET_ERESTARTSYS;
11690         }
11691     }
11692 #endif
11693 
11694     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11695                              arg5, arg6, arg7, arg8);
11696 
11697     if (unlikely(do_strace)) {
11698         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11699         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11700                           arg5, arg6, arg7, arg8);
11701         print_syscall_ret(num, ret);
11702     } else {
11703         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11704                           arg5, arg6, arg7, arg8);
11705     }
11706 
11707     trace_guest_user_syscall_ret(cpu, num, ret);
11708     return ret;
11709 }
11710