xref: /openbmc/qemu/linux-user/syscall.c (revision fff69382)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "fd-trans.h"
111 
112 #ifndef CLONE_IO
113 #define CLONE_IO                0x80000000      /* Clone io context */
114 #endif
115 
116 /* We can't directly call the host clone syscall, because this will
117  * badly confuse libc (breaking mutexes, for example). So we must
118  * divide clone flags into:
119  *  * flag combinations that look like pthread_create()
120  *  * flag combinations that look like fork()
121  *  * flags we can implement within QEMU itself
122  *  * flags we can't support and will return an error for
123  */
124 /* For thread creation, all these flags must be present; for
125  * fork, none must be present.
126  */
127 #define CLONE_THREAD_FLAGS                              \
128     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
129      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
130 
131 /* These flags are ignored:
132  * CLONE_DETACHED is now ignored by the kernel;
133  * CLONE_IO is just an optimisation hint to the I/O scheduler
134  */
135 #define CLONE_IGNORED_FLAGS                     \
136     (CLONE_DETACHED | CLONE_IO)
137 
138 /* Flags for fork which we can implement within QEMU itself */
139 #define CLONE_OPTIONAL_FORK_FLAGS               \
140     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
141      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
142 
143 /* Flags for thread creation which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
145     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
146      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
147 
148 #define CLONE_INVALID_FORK_FLAGS                                        \
149     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
150 
151 #define CLONE_INVALID_THREAD_FLAGS                                      \
152     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
153        CLONE_IGNORED_FLAGS))
154 
155 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
156  * have almost all been allocated. We cannot support any of
157  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
158  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
159  * The checks against the invalid thread masks above will catch these.
160  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
161  */
162 
163 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
164  * once. This exercises the codepaths for restart.
165  */
166 //#define DEBUG_ERESTARTSYS
167 
168 //#include <linux/msdos_fs.h>
169 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
170 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
171 
172 #undef _syscall0
173 #undef _syscall1
174 #undef _syscall2
175 #undef _syscall3
176 #undef _syscall4
177 #undef _syscall5
178 #undef _syscall6
179 
180 #define _syscall0(type,name)		\
181 static type name (void)			\
182 {					\
183 	return syscall(__NR_##name);	\
184 }
185 
186 #define _syscall1(type,name,type1,arg1)		\
187 static type name (type1 arg1)			\
188 {						\
189 	return syscall(__NR_##name, arg1);	\
190 }
191 
192 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
193 static type name (type1 arg1,type2 arg2)		\
194 {							\
195 	return syscall(__NR_##name, arg1, arg2);	\
196 }
197 
198 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
199 static type name (type1 arg1,type2 arg2,type3 arg3)		\
200 {								\
201 	return syscall(__NR_##name, arg1, arg2, arg3);		\
202 }
203 
204 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
205 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
206 {										\
207 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
208 }
209 
210 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
211 		  type5,arg5)							\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
215 }
216 
217 
218 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
219 		  type5,arg5,type6,arg6)					\
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
221                   type6 arg6)							\
222 {										\
223 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
224 }
225 
226 
227 #define __NR_sys_uname __NR_uname
228 #define __NR_sys_getcwd1 __NR_getcwd
229 #define __NR_sys_getdents __NR_getdents
230 #define __NR_sys_getdents64 __NR_getdents64
231 #define __NR_sys_getpriority __NR_getpriority
232 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
233 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
234 #define __NR_sys_syslog __NR_syslog
235 #define __NR_sys_futex __NR_futex
236 #define __NR_sys_inotify_init __NR_inotify_init
237 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
238 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
239 
240 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
241 #define __NR__llseek __NR_lseek
242 #endif
243 
244 /* Newer kernel ports have llseek() instead of _llseek() */
245 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
246 #define TARGET_NR__llseek TARGET_NR_llseek
247 #endif
248 
249 #define __NR_sys_gettid __NR_gettid
250 _syscall0(int, sys_gettid)
251 
252 /* For the 64-bit guest on 32-bit host case we must emulate
253  * getdents using getdents64, because otherwise the host
254  * might hand us back more dirent records than we can fit
255  * into the guest buffer after structure format conversion.
256  * Otherwise we emulate getdents with getdents if the host has it.
257  */
258 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
259 #define EMULATE_GETDENTS_WITH_GETDENTS
260 #endif
261 
262 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if (defined(TARGET_NR_getdents) && \
266       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
267     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #endif
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
272           loff_t *, res, uint, wh);
273 #endif
274 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
275 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276           siginfo_t *, uinfo)
277 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
278 #ifdef __NR_exit_group
279 _syscall1(int,exit_group,int,error_code)
280 #endif
281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
282 _syscall1(int,set_tid_address,int *,tidptr)
283 #endif
284 #if defined(TARGET_NR_futex) && defined(__NR_futex)
285 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
286           const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #endif
288 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
289 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
290           unsigned long *, user_mask_ptr);
291 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
292 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
293           unsigned long *, user_mask_ptr);
294 #define __NR_sys_getcpu __NR_getcpu
295 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
296 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297           void *, arg);
298 _syscall2(int, capget, struct __user_cap_header_struct *, header,
299           struct __user_cap_data_struct *, data);
300 _syscall2(int, capset, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
303 _syscall2(int, ioprio_get, int, which, int, who)
304 #endif
305 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
306 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #endif
308 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
309 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
310 #endif
311 
312 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
313 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
314           unsigned long, idx1, unsigned long, idx2)
315 #endif
316 
317 static bitmask_transtbl fcntl_flags_tbl[] = {
318   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
319   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
320   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
321   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
322   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
323   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
324   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
325   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
326   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
327   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
328   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
329   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
330   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
331 #if defined(O_DIRECT)
332   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
333 #endif
334 #if defined(O_NOATIME)
335   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
336 #endif
337 #if defined(O_CLOEXEC)
338   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
339 #endif
340 #if defined(O_PATH)
341   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
342 #endif
343 #if defined(O_TMPFILE)
344   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
345 #endif
346   /* Don't terminate the list prematurely on 64-bit host+guest.  */
347 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
348   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
349 #endif
350   { 0, 0, 0, 0 }
351 };
352 
353 static int sys_getcwd1(char *buf, size_t size)
354 {
355   if (getcwd(buf, size) == NULL) {
356       /* getcwd() sets errno */
357       return (-1);
358   }
359   return strlen(buf)+1;
360 }
361 
362 #ifdef TARGET_NR_utimensat
363 #if defined(__NR_utimensat)
364 #define __NR_sys_utimensat __NR_utimensat
365 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
366           const struct timespec *,tsp,int,flags)
367 #else
368 static int sys_utimensat(int dirfd, const char *pathname,
369                          const struct timespec times[2], int flags)
370 {
371     errno = ENOSYS;
372     return -1;
373 }
374 #endif
375 #endif /* TARGET_NR_utimensat */
376 
377 #ifdef TARGET_NR_renameat2
378 #if defined(__NR_renameat2)
379 #define __NR_sys_renameat2 __NR_renameat2
380 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
381           const char *, new, unsigned int, flags)
382 #else
383 static int sys_renameat2(int oldfd, const char *old,
384                          int newfd, const char *new, int flags)
385 {
386     if (flags == 0) {
387         return renameat(oldfd, old, newfd, new);
388     }
389     errno = ENOSYS;
390     return -1;
391 }
392 #endif
393 #endif /* TARGET_NR_renameat2 */
394 
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
397 
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
400 {
401   return (inotify_init());
402 }
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
406 {
407   return (inotify_add_watch(fd, pathname, mask));
408 }
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
412 {
413   return (inotify_rm_watch(fd, wd));
414 }
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
419 {
420   return (inotify_init1(flags));
421 }
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY  */
431 
432 #if defined(TARGET_NR_prlimit64)
433 #ifndef __NR_prlimit64
434 # define __NR_prlimit64 -1
435 #endif
436 #define __NR_sys_prlimit64 __NR_prlimit64
437 /* The glibc rlimit structure may not be that used by the underlying syscall */
438 struct host_rlimit64 {
439     uint64_t rlim_cur;
440     uint64_t rlim_max;
441 };
442 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
443           const struct host_rlimit64 *, new_limit,
444           struct host_rlimit64 *, old_limit)
445 #endif
446 
447 
448 #if defined(TARGET_NR_timer_create)
449 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
450 static timer_t g_posix_timers[32] = { 0, } ;
451 
452 static inline int next_free_host_timer(void)
453 {
454     int k ;
455     /* FIXME: Does finding the next free slot require a lock? */
456     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
457         if (g_posix_timers[k] == 0) {
458             g_posix_timers[k] = (timer_t) 1;
459             return k;
460         }
461     }
462     return -1;
463 }
464 #endif
465 
466 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
467 #ifdef TARGET_ARM
468 static inline int regpairs_aligned(void *cpu_env, int num)
469 {
470     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
471 }
472 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
473 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476  * of registers which translates to the same as ARM/MIPS, because we start with
477  * r3 as arg1 */
478 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
479 #elif defined(TARGET_SH4)
480 /* SH4 doesn't align register pairs, except for p{read,write}64 */
481 static inline int regpairs_aligned(void *cpu_env, int num)
482 {
483     switch (num) {
484     case TARGET_NR_pread64:
485     case TARGET_NR_pwrite64:
486         return 1;
487 
488     default:
489         return 0;
490     }
491 }
492 #elif defined(TARGET_XTENSA)
493 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
494 #else
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
721               struct rusage *, rusage)
722 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
723               int, options, struct rusage *, rusage)
724 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
725 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
726               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
727 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
728               struct timespec *, tsp, const sigset_t *, sigmask,
729               size_t, sigsetsize)
730 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
731               int, maxevents, int, timeout, const sigset_t *, sigmask,
732               size_t, sigsetsize)
733 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
734               const struct timespec *,timeout,int *,uaddr2,int,val3)
735 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
736 safe_syscall2(int, kill, pid_t, pid, int, sig)
737 safe_syscall2(int, tkill, int, tid, int, sig)
738 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
739 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
740 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
741 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
742               unsigned long, pos_l, unsigned long, pos_h)
743 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
746               socklen_t, addrlen)
747 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
748               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
749 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
750               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
751 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
752 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
753 safe_syscall2(int, flock, int, fd, int, operation)
754 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
755               const struct timespec *, uts, size_t, sigsetsize)
756 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
757               int, flags)
758 safe_syscall2(int, nanosleep, const struct timespec *, req,
759               struct timespec *, rem)
760 #ifdef TARGET_NR_clock_nanosleep
761 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
762               const struct timespec *, req, struct timespec *, rem)
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
768               long, msgtype, int, flags)
769 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
770               unsigned, nsops, const struct timespec *, timeout)
771 #else
772 /* This host kernel architecture uses a single ipc syscall; fake up
773  * wrappers for the sub-operations to hide this implementation detail.
774  * Annoyingly we can't include linux/ipc.h to get the constant definitions
775  * for the call parameter because some structs in there conflict with the
776  * sys/ipc.h ones. So we just define them here, and rely on them being
777  * the same for all host architectures.
778  */
779 #define Q_SEMTIMEDOP 4
780 #define Q_MSGSND 11
781 #define Q_MSGRCV 12
782 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
783 
784 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
785               void *, ptr, long, fifth)
786 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
787 {
788     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
789 }
790 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
791 {
792     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
793 }
794 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
795                            const struct timespec *timeout)
796 {
797     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
798                     (long)timeout);
799 }
800 #endif
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
803               size_t, len, unsigned, prio, const struct timespec *, timeout)
804 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
805               size_t, len, unsigned *, prio, const struct timespec *, timeout)
806 #endif
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808  * "third argument might be integer or pointer or not present" behaviour of
809  * the libc function.
810  */
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814  *  use the flock64 struct rather than unsuffixed flock
815  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
816  */
817 #ifdef __NR_fcntl64
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
819 #else
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
821 #endif
822 
823 static inline int host_to_target_sock_type(int host_type)
824 {
825     int target_type;
826 
827     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
828     case SOCK_DGRAM:
829         target_type = TARGET_SOCK_DGRAM;
830         break;
831     case SOCK_STREAM:
832         target_type = TARGET_SOCK_STREAM;
833         break;
834     default:
835         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
836         break;
837     }
838 
839 #if defined(SOCK_CLOEXEC)
840     if (host_type & SOCK_CLOEXEC) {
841         target_type |= TARGET_SOCK_CLOEXEC;
842     }
843 #endif
844 
845 #if defined(SOCK_NONBLOCK)
846     if (host_type & SOCK_NONBLOCK) {
847         target_type |= TARGET_SOCK_NONBLOCK;
848     }
849 #endif
850 
851     return target_type;
852 }
853 
854 static abi_ulong target_brk;
855 static abi_ulong target_original_brk;
856 static abi_ulong brk_page;
857 
858 void target_set_brk(abi_ulong new_brk)
859 {
860     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
861     brk_page = HOST_PAGE_ALIGN(target_brk);
862 }
863 
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
866 
867 /* do_brk() must return target values and target errnos. */
868 abi_long do_brk(abi_ulong new_brk)
869 {
870     abi_long mapped_addr;
871     abi_ulong new_alloc_size;
872 
873     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
874 
875     if (!new_brk) {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
877         return target_brk;
878     }
879     if (new_brk < target_original_brk) {
880         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
881                    target_brk);
882         return target_brk;
883     }
884 
885     /* If the new brk is less than the highest page reserved to the
886      * target heap allocation, set it and we're almost done...  */
887     if (new_brk <= brk_page) {
888         /* Heap contents are initialized to zero, as for anonymous
889          * mapped pages.  */
890         if (new_brk > target_brk) {
891             memset(g2h(target_brk), 0, new_brk - target_brk);
892         }
893 	target_brk = new_brk;
894         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
895 	return target_brk;
896     }
897 
898     /* We need to allocate more memory after the brk... Note that
899      * we don't use MAP_FIXED because that will map over the top of
900      * any existing mapping (like the one with the host libc or qemu
901      * itself); instead we treat "mapped but at wrong address" as
902      * a failure and unmap again.
903      */
904     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
905     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
906                                         PROT_READ|PROT_WRITE,
907                                         MAP_ANON|MAP_PRIVATE, 0, 0));
908 
909     if (mapped_addr == brk_page) {
910         /* Heap contents are initialized to zero, as for anonymous
911          * mapped pages.  Technically the new pages are already
912          * initialized to zero since they *are* anonymous mapped
913          * pages, however we have to take care with the contents that
914          * come from the remaining part of the previous page: it may
915          * contains garbage data due to a previous heap usage (grown
916          * then shrunken).  */
917         memset(g2h(target_brk), 0, brk_page - target_brk);
918 
919         target_brk = new_brk;
920         brk_page = HOST_PAGE_ALIGN(target_brk);
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
922             target_brk);
923         return target_brk;
924     } else if (mapped_addr != -1) {
925         /* Mapped but at wrong address, meaning there wasn't actually
926          * enough space for this brk.
927          */
928         target_munmap(mapped_addr, new_alloc_size);
929         mapped_addr = -1;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
931     }
932     else {
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
934     }
935 
936 #if defined(TARGET_ALPHA)
937     /* We (partially) emulate OSF/1 on Alpha, which requires we
938        return a proper errno, not an unchanged brk value.  */
939     return -TARGET_ENOMEM;
940 #endif
941     /* For everything else, return the previous break. */
942     return target_brk;
943 }
944 
945 static inline abi_long copy_from_user_fdset(fd_set *fds,
946                                             abi_ulong target_fds_addr,
947                                             int n)
948 {
949     int i, nw, j, k;
950     abi_ulong b, *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_READ,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  1)))
957         return -TARGET_EFAULT;
958 
959     FD_ZERO(fds);
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         /* grab the abi_ulong */
963         __get_user(b, &target_fds[i]);
964         for (j = 0; j < TARGET_ABI_BITS; j++) {
965             /* check the bit inside the abi_ulong */
966             if ((b >> j) & 1)
967                 FD_SET(k, fds);
968             k++;
969         }
970     }
971 
972     unlock_user(target_fds, target_fds_addr, 0);
973 
974     return 0;
975 }
976 
977 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
978                                                  abi_ulong target_fds_addr,
979                                                  int n)
980 {
981     if (target_fds_addr) {
982         if (copy_from_user_fdset(fds, target_fds_addr, n))
983             return -TARGET_EFAULT;
984         *fds_ptr = fds;
985     } else {
986         *fds_ptr = NULL;
987     }
988     return 0;
989 }
990 
991 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
992                                           const fd_set *fds,
993                                           int n)
994 {
995     int i, nw, j, k;
996     abi_long v;
997     abi_ulong *target_fds;
998 
999     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1000     if (!(target_fds = lock_user(VERIFY_WRITE,
1001                                  target_fds_addr,
1002                                  sizeof(abi_ulong) * nw,
1003                                  0)))
1004         return -TARGET_EFAULT;
1005 
1006     k = 0;
1007     for (i = 0; i < nw; i++) {
1008         v = 0;
1009         for (j = 0; j < TARGET_ABI_BITS; j++) {
1010             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1011             k++;
1012         }
1013         __put_user(v, &target_fds[i]);
1014     }
1015 
1016     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1017 
1018     return 0;
1019 }
1020 
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1023 #else
1024 #define HOST_HZ 100
1025 #endif
1026 
1027 static inline abi_long host_to_target_clock_t(long ticks)
1028 {
1029 #if HOST_HZ == TARGET_HZ
1030     return ticks;
1031 #else
1032     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1033 #endif
1034 }
1035 
1036 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1037                                              const struct rusage *rusage)
1038 {
1039     struct target_rusage *target_rusage;
1040 
1041     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1042         return -TARGET_EFAULT;
1043     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1044     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1045     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1046     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1047     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1048     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1049     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1050     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1051     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1052     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1053     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1054     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1055     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1056     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1057     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1058     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1059     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1060     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1061     unlock_user_struct(target_rusage, target_addr, 1);
1062 
1063     return 0;
1064 }
1065 
1066 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1067 {
1068     abi_ulong target_rlim_swap;
1069     rlim_t result;
1070 
1071     target_rlim_swap = tswapal(target_rlim);
1072     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1073         return RLIM_INFINITY;
1074 
1075     result = target_rlim_swap;
1076     if (target_rlim_swap != (rlim_t)result)
1077         return RLIM_INFINITY;
1078 
1079     return result;
1080 }
1081 
1082 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1083 {
1084     abi_ulong target_rlim_swap;
1085     abi_ulong result;
1086 
1087     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1088         target_rlim_swap = TARGET_RLIM_INFINITY;
1089     else
1090         target_rlim_swap = rlim;
1091     result = tswapal(target_rlim_swap);
1092 
1093     return result;
1094 }
1095 
1096 static inline int target_to_host_resource(int code)
1097 {
1098     switch (code) {
1099     case TARGET_RLIMIT_AS:
1100         return RLIMIT_AS;
1101     case TARGET_RLIMIT_CORE:
1102         return RLIMIT_CORE;
1103     case TARGET_RLIMIT_CPU:
1104         return RLIMIT_CPU;
1105     case TARGET_RLIMIT_DATA:
1106         return RLIMIT_DATA;
1107     case TARGET_RLIMIT_FSIZE:
1108         return RLIMIT_FSIZE;
1109     case TARGET_RLIMIT_LOCKS:
1110         return RLIMIT_LOCKS;
1111     case TARGET_RLIMIT_MEMLOCK:
1112         return RLIMIT_MEMLOCK;
1113     case TARGET_RLIMIT_MSGQUEUE:
1114         return RLIMIT_MSGQUEUE;
1115     case TARGET_RLIMIT_NICE:
1116         return RLIMIT_NICE;
1117     case TARGET_RLIMIT_NOFILE:
1118         return RLIMIT_NOFILE;
1119     case TARGET_RLIMIT_NPROC:
1120         return RLIMIT_NPROC;
1121     case TARGET_RLIMIT_RSS:
1122         return RLIMIT_RSS;
1123     case TARGET_RLIMIT_RTPRIO:
1124         return RLIMIT_RTPRIO;
1125     case TARGET_RLIMIT_SIGPENDING:
1126         return RLIMIT_SIGPENDING;
1127     case TARGET_RLIMIT_STACK:
1128         return RLIMIT_STACK;
1129     default:
1130         return code;
1131     }
1132 }
1133 
1134 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1135                                               abi_ulong target_tv_addr)
1136 {
1137     struct target_timeval *target_tv;
1138 
1139     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1140         return -TARGET_EFAULT;
1141 
1142     __get_user(tv->tv_sec, &target_tv->tv_sec);
1143     __get_user(tv->tv_usec, &target_tv->tv_usec);
1144 
1145     unlock_user_struct(target_tv, target_tv_addr, 0);
1146 
1147     return 0;
1148 }
1149 
1150 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1151                                             const struct timeval *tv)
1152 {
1153     struct target_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1156         return -TARGET_EFAULT;
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1167                                                abi_ulong target_tz_addr)
1168 {
1169     struct target_timezone *target_tz;
1170 
1171     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1172         return -TARGET_EFAULT;
1173     }
1174 
1175     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1176     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1177 
1178     unlock_user_struct(target_tz, target_tz_addr, 0);
1179 
1180     return 0;
1181 }
1182 
1183 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1184 #include <mqueue.h>
1185 
1186 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1187                                               abi_ulong target_mq_attr_addr)
1188 {
1189     struct target_mq_attr *target_mq_attr;
1190 
1191     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1192                           target_mq_attr_addr, 1))
1193         return -TARGET_EFAULT;
1194 
1195     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1196     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1197     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1198     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1199 
1200     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1201 
1202     return 0;
1203 }
1204 
1205 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1206                                             const struct mq_attr *attr)
1207 {
1208     struct target_mq_attr *target_mq_attr;
1209 
1210     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1211                           target_mq_attr_addr, 0))
1212         return -TARGET_EFAULT;
1213 
1214     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1215     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1216     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1217     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1218 
1219     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1220 
1221     return 0;
1222 }
1223 #endif
1224 
1225 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1226 /* do_select() must return target values and target errnos. */
1227 static abi_long do_select(int n,
1228                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1229                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1230 {
1231     fd_set rfds, wfds, efds;
1232     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1233     struct timeval tv;
1234     struct timespec ts, *ts_ptr;
1235     abi_long ret;
1236 
1237     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1238     if (ret) {
1239         return ret;
1240     }
1241     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1242     if (ret) {
1243         return ret;
1244     }
1245     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1246     if (ret) {
1247         return ret;
1248     }
1249 
1250     if (target_tv_addr) {
1251         if (copy_from_user_timeval(&tv, target_tv_addr))
1252             return -TARGET_EFAULT;
1253         ts.tv_sec = tv.tv_sec;
1254         ts.tv_nsec = tv.tv_usec * 1000;
1255         ts_ptr = &ts;
1256     } else {
1257         ts_ptr = NULL;
1258     }
1259 
1260     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1261                                   ts_ptr, NULL));
1262 
1263     if (!is_error(ret)) {
1264         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1265             return -TARGET_EFAULT;
1266         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1267             return -TARGET_EFAULT;
1268         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1269             return -TARGET_EFAULT;
1270 
1271         if (target_tv_addr) {
1272             tv.tv_sec = ts.tv_sec;
1273             tv.tv_usec = ts.tv_nsec / 1000;
1274             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1275                 return -TARGET_EFAULT;
1276             }
1277         }
1278     }
1279 
1280     return ret;
1281 }
1282 
1283 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1284 static abi_long do_old_select(abi_ulong arg1)
1285 {
1286     struct target_sel_arg_struct *sel;
1287     abi_ulong inp, outp, exp, tvp;
1288     long nsel;
1289 
1290     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1291         return -TARGET_EFAULT;
1292     }
1293 
1294     nsel = tswapal(sel->n);
1295     inp = tswapal(sel->inp);
1296     outp = tswapal(sel->outp);
1297     exp = tswapal(sel->exp);
1298     tvp = tswapal(sel->tvp);
1299 
1300     unlock_user_struct(sel, arg1, 0);
1301 
1302     return do_select(nsel, inp, outp, exp, tvp);
1303 }
1304 #endif
1305 #endif
1306 
1307 static abi_long do_pipe2(int host_pipe[], int flags)
1308 {
1309 #ifdef CONFIG_PIPE2
1310     return pipe2(host_pipe, flags);
1311 #else
1312     return -ENOSYS;
1313 #endif
1314 }
1315 
1316 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1317                         int flags, int is_pipe2)
1318 {
1319     int host_pipe[2];
1320     abi_long ret;
1321     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1322 
1323     if (is_error(ret))
1324         return get_errno(ret);
1325 
1326     /* Several targets have special calling conventions for the original
1327        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1328     if (!is_pipe2) {
1329 #if defined(TARGET_ALPHA)
1330         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1331         return host_pipe[0];
1332 #elif defined(TARGET_MIPS)
1333         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1334         return host_pipe[0];
1335 #elif defined(TARGET_SH4)
1336         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1337         return host_pipe[0];
1338 #elif defined(TARGET_SPARC)
1339         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1340         return host_pipe[0];
1341 #endif
1342     }
1343 
1344     if (put_user_s32(host_pipe[0], pipedes)
1345         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1346         return -TARGET_EFAULT;
1347     return get_errno(ret);
1348 }
1349 
1350 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1351                                               abi_ulong target_addr,
1352                                               socklen_t len)
1353 {
1354     struct target_ip_mreqn *target_smreqn;
1355 
1356     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1357     if (!target_smreqn)
1358         return -TARGET_EFAULT;
1359     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1360     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1361     if (len == sizeof(struct target_ip_mreqn))
1362         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1363     unlock_user(target_smreqn, target_addr, 0);
1364 
1365     return 0;
1366 }
1367 
1368 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1369                                                abi_ulong target_addr,
1370                                                socklen_t len)
1371 {
1372     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1373     sa_family_t sa_family;
1374     struct target_sockaddr *target_saddr;
1375 
1376     if (fd_trans_target_to_host_addr(fd)) {
1377         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1378     }
1379 
1380     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1381     if (!target_saddr)
1382         return -TARGET_EFAULT;
1383 
1384     sa_family = tswap16(target_saddr->sa_family);
1385 
1386     /* Oops. The caller might send a incomplete sun_path; sun_path
1387      * must be terminated by \0 (see the manual page), but
1388      * unfortunately it is quite common to specify sockaddr_un
1389      * length as "strlen(x->sun_path)" while it should be
1390      * "strlen(...) + 1". We'll fix that here if needed.
1391      * Linux kernel has a similar feature.
1392      */
1393 
1394     if (sa_family == AF_UNIX) {
1395         if (len < unix_maxlen && len > 0) {
1396             char *cp = (char*)target_saddr;
1397 
1398             if ( cp[len-1] && !cp[len] )
1399                 len++;
1400         }
1401         if (len > unix_maxlen)
1402             len = unix_maxlen;
1403     }
1404 
1405     memcpy(addr, target_saddr, len);
1406     addr->sa_family = sa_family;
1407     if (sa_family == AF_NETLINK) {
1408         struct sockaddr_nl *nladdr;
1409 
1410         nladdr = (struct sockaddr_nl *)addr;
1411         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1412         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1413     } else if (sa_family == AF_PACKET) {
1414 	struct target_sockaddr_ll *lladdr;
1415 
1416 	lladdr = (struct target_sockaddr_ll *)addr;
1417 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1418 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1419     }
1420     unlock_user(target_saddr, target_addr, 0);
1421 
1422     return 0;
1423 }
1424 
1425 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1426                                                struct sockaddr *addr,
1427                                                socklen_t len)
1428 {
1429     struct target_sockaddr *target_saddr;
1430 
1431     if (len == 0) {
1432         return 0;
1433     }
1434     assert(addr);
1435 
1436     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439     memcpy(target_saddr, addr, len);
1440     if (len >= offsetof(struct target_sockaddr, sa_family) +
1441         sizeof(target_saddr->sa_family)) {
1442         target_saddr->sa_family = tswap16(addr->sa_family);
1443     }
1444     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1445         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1446         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1447         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1448     } else if (addr->sa_family == AF_PACKET) {
1449         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1450         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1451         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1452     } else if (addr->sa_family == AF_INET6 &&
1453                len >= sizeof(struct target_sockaddr_in6)) {
1454         struct target_sockaddr_in6 *target_in6 =
1455                (struct target_sockaddr_in6 *)target_saddr;
1456         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1457     }
1458     unlock_user(target_saddr, target_addr, len);
1459 
1460     return 0;
1461 }
1462 
1463 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1464                                            struct target_msghdr *target_msgh)
1465 {
1466     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1467     abi_long msg_controllen;
1468     abi_ulong target_cmsg_addr;
1469     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1470     socklen_t space = 0;
1471 
1472     msg_controllen = tswapal(target_msgh->msg_controllen);
1473     if (msg_controllen < sizeof (struct target_cmsghdr))
1474         goto the_end;
1475     target_cmsg_addr = tswapal(target_msgh->msg_control);
1476     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1477     target_cmsg_start = target_cmsg;
1478     if (!target_cmsg)
1479         return -TARGET_EFAULT;
1480 
1481     while (cmsg && target_cmsg) {
1482         void *data = CMSG_DATA(cmsg);
1483         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1484 
1485         int len = tswapal(target_cmsg->cmsg_len)
1486             - sizeof(struct target_cmsghdr);
1487 
1488         space += CMSG_SPACE(len);
1489         if (space > msgh->msg_controllen) {
1490             space -= CMSG_SPACE(len);
1491             /* This is a QEMU bug, since we allocated the payload
1492              * area ourselves (unlike overflow in host-to-target
1493              * conversion, which is just the guest giving us a buffer
1494              * that's too small). It can't happen for the payload types
1495              * we currently support; if it becomes an issue in future
1496              * we would need to improve our allocation strategy to
1497              * something more intelligent than "twice the size of the
1498              * target buffer we're reading from".
1499              */
1500             gemu_log("Host cmsg overflow\n");
1501             break;
1502         }
1503 
1504         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1505             cmsg->cmsg_level = SOL_SOCKET;
1506         } else {
1507             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1508         }
1509         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1510         cmsg->cmsg_len = CMSG_LEN(len);
1511 
1512         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1513             int *fd = (int *)data;
1514             int *target_fd = (int *)target_data;
1515             int i, numfds = len / sizeof(int);
1516 
1517             for (i = 0; i < numfds; i++) {
1518                 __get_user(fd[i], target_fd + i);
1519             }
1520         } else if (cmsg->cmsg_level == SOL_SOCKET
1521                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1522             struct ucred *cred = (struct ucred *)data;
1523             struct target_ucred *target_cred =
1524                 (struct target_ucred *)target_data;
1525 
1526             __get_user(cred->pid, &target_cred->pid);
1527             __get_user(cred->uid, &target_cred->uid);
1528             __get_user(cred->gid, &target_cred->gid);
1529         } else {
1530             gemu_log("Unsupported ancillary data: %d/%d\n",
1531                                         cmsg->cmsg_level, cmsg->cmsg_type);
1532             memcpy(data, target_data, len);
1533         }
1534 
1535         cmsg = CMSG_NXTHDR(msgh, cmsg);
1536         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1537                                          target_cmsg_start);
1538     }
1539     unlock_user(target_cmsg, target_cmsg_addr, 0);
1540  the_end:
1541     msgh->msg_controllen = space;
1542     return 0;
1543 }
1544 
1545 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1546                                            struct msghdr *msgh)
1547 {
1548     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1549     abi_long msg_controllen;
1550     abi_ulong target_cmsg_addr;
1551     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1552     socklen_t space = 0;
1553 
1554     msg_controllen = tswapal(target_msgh->msg_controllen);
1555     if (msg_controllen < sizeof (struct target_cmsghdr))
1556         goto the_end;
1557     target_cmsg_addr = tswapal(target_msgh->msg_control);
1558     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1559     target_cmsg_start = target_cmsg;
1560     if (!target_cmsg)
1561         return -TARGET_EFAULT;
1562 
1563     while (cmsg && target_cmsg) {
1564         void *data = CMSG_DATA(cmsg);
1565         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1566 
1567         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1568         int tgt_len, tgt_space;
1569 
1570         /* We never copy a half-header but may copy half-data;
1571          * this is Linux's behaviour in put_cmsg(). Note that
1572          * truncation here is a guest problem (which we report
1573          * to the guest via the CTRUNC bit), unlike truncation
1574          * in target_to_host_cmsg, which is a QEMU bug.
1575          */
1576         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1577             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1578             break;
1579         }
1580 
1581         if (cmsg->cmsg_level == SOL_SOCKET) {
1582             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1583         } else {
1584             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1585         }
1586         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1587 
1588         /* Payload types which need a different size of payload on
1589          * the target must adjust tgt_len here.
1590          */
1591         tgt_len = len;
1592         switch (cmsg->cmsg_level) {
1593         case SOL_SOCKET:
1594             switch (cmsg->cmsg_type) {
1595             case SO_TIMESTAMP:
1596                 tgt_len = sizeof(struct target_timeval);
1597                 break;
1598             default:
1599                 break;
1600             }
1601             break;
1602         default:
1603             break;
1604         }
1605 
1606         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1607             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1608             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1609         }
1610 
1611         /* We must now copy-and-convert len bytes of payload
1612          * into tgt_len bytes of destination space. Bear in mind
1613          * that in both source and destination we may be dealing
1614          * with a truncated value!
1615          */
1616         switch (cmsg->cmsg_level) {
1617         case SOL_SOCKET:
1618             switch (cmsg->cmsg_type) {
1619             case SCM_RIGHTS:
1620             {
1621                 int *fd = (int *)data;
1622                 int *target_fd = (int *)target_data;
1623                 int i, numfds = tgt_len / sizeof(int);
1624 
1625                 for (i = 0; i < numfds; i++) {
1626                     __put_user(fd[i], target_fd + i);
1627                 }
1628                 break;
1629             }
1630             case SO_TIMESTAMP:
1631             {
1632                 struct timeval *tv = (struct timeval *)data;
1633                 struct target_timeval *target_tv =
1634                     (struct target_timeval *)target_data;
1635 
1636                 if (len != sizeof(struct timeval) ||
1637                     tgt_len != sizeof(struct target_timeval)) {
1638                     goto unimplemented;
1639                 }
1640 
1641                 /* copy struct timeval to target */
1642                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1643                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1644                 break;
1645             }
1646             case SCM_CREDENTIALS:
1647             {
1648                 struct ucred *cred = (struct ucred *)data;
1649                 struct target_ucred *target_cred =
1650                     (struct target_ucred *)target_data;
1651 
1652                 __put_user(cred->pid, &target_cred->pid);
1653                 __put_user(cred->uid, &target_cred->uid);
1654                 __put_user(cred->gid, &target_cred->gid);
1655                 break;
1656             }
1657             default:
1658                 goto unimplemented;
1659             }
1660             break;
1661 
1662         case SOL_IP:
1663             switch (cmsg->cmsg_type) {
1664             case IP_TTL:
1665             {
1666                 uint32_t *v = (uint32_t *)data;
1667                 uint32_t *t_int = (uint32_t *)target_data;
1668 
1669                 if (len != sizeof(uint32_t) ||
1670                     tgt_len != sizeof(uint32_t)) {
1671                     goto unimplemented;
1672                 }
1673                 __put_user(*v, t_int);
1674                 break;
1675             }
1676             case IP_RECVERR:
1677             {
1678                 struct errhdr_t {
1679                    struct sock_extended_err ee;
1680                    struct sockaddr_in offender;
1681                 };
1682                 struct errhdr_t *errh = (struct errhdr_t *)data;
1683                 struct errhdr_t *target_errh =
1684                     (struct errhdr_t *)target_data;
1685 
1686                 if (len != sizeof(struct errhdr_t) ||
1687                     tgt_len != sizeof(struct errhdr_t)) {
1688                     goto unimplemented;
1689                 }
1690                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1691                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1692                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1693                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1694                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1695                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1696                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1697                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1698                     (void *) &errh->offender, sizeof(errh->offender));
1699                 break;
1700             }
1701             default:
1702                 goto unimplemented;
1703             }
1704             break;
1705 
1706         case SOL_IPV6:
1707             switch (cmsg->cmsg_type) {
1708             case IPV6_HOPLIMIT:
1709             {
1710                 uint32_t *v = (uint32_t *)data;
1711                 uint32_t *t_int = (uint32_t *)target_data;
1712 
1713                 if (len != sizeof(uint32_t) ||
1714                     tgt_len != sizeof(uint32_t)) {
1715                     goto unimplemented;
1716                 }
1717                 __put_user(*v, t_int);
1718                 break;
1719             }
1720             case IPV6_RECVERR:
1721             {
1722                 struct errhdr6_t {
1723                    struct sock_extended_err ee;
1724                    struct sockaddr_in6 offender;
1725                 };
1726                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1727                 struct errhdr6_t *target_errh =
1728                     (struct errhdr6_t *)target_data;
1729 
1730                 if (len != sizeof(struct errhdr6_t) ||
1731                     tgt_len != sizeof(struct errhdr6_t)) {
1732                     goto unimplemented;
1733                 }
1734                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1735                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1736                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1737                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1738                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1739                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1740                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1741                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1742                     (void *) &errh->offender, sizeof(errh->offender));
1743                 break;
1744             }
1745             default:
1746                 goto unimplemented;
1747             }
1748             break;
1749 
1750         default:
1751         unimplemented:
1752             gemu_log("Unsupported ancillary data: %d/%d\n",
1753                                         cmsg->cmsg_level, cmsg->cmsg_type);
1754             memcpy(target_data, data, MIN(len, tgt_len));
1755             if (tgt_len > len) {
1756                 memset(target_data + len, 0, tgt_len - len);
1757             }
1758         }
1759 
1760         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1761         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1762         if (msg_controllen < tgt_space) {
1763             tgt_space = msg_controllen;
1764         }
1765         msg_controllen -= tgt_space;
1766         space += tgt_space;
1767         cmsg = CMSG_NXTHDR(msgh, cmsg);
1768         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1769                                          target_cmsg_start);
1770     }
1771     unlock_user(target_cmsg, target_cmsg_addr, space);
1772  the_end:
1773     target_msgh->msg_controllen = tswapal(space);
1774     return 0;
1775 }
1776 
1777 /* do_setsockopt() Must return target values and target errnos. */
1778 static abi_long do_setsockopt(int sockfd, int level, int optname,
1779                               abi_ulong optval_addr, socklen_t optlen)
1780 {
1781     abi_long ret;
1782     int val;
1783     struct ip_mreqn *ip_mreq;
1784     struct ip_mreq_source *ip_mreq_source;
1785 
1786     switch(level) {
1787     case SOL_TCP:
1788         /* TCP options all take an 'int' value.  */
1789         if (optlen < sizeof(uint32_t))
1790             return -TARGET_EINVAL;
1791 
1792         if (get_user_u32(val, optval_addr))
1793             return -TARGET_EFAULT;
1794         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1795         break;
1796     case SOL_IP:
1797         switch(optname) {
1798         case IP_TOS:
1799         case IP_TTL:
1800         case IP_HDRINCL:
1801         case IP_ROUTER_ALERT:
1802         case IP_RECVOPTS:
1803         case IP_RETOPTS:
1804         case IP_PKTINFO:
1805         case IP_MTU_DISCOVER:
1806         case IP_RECVERR:
1807         case IP_RECVTTL:
1808         case IP_RECVTOS:
1809 #ifdef IP_FREEBIND
1810         case IP_FREEBIND:
1811 #endif
1812         case IP_MULTICAST_TTL:
1813         case IP_MULTICAST_LOOP:
1814             val = 0;
1815             if (optlen >= sizeof(uint32_t)) {
1816                 if (get_user_u32(val, optval_addr))
1817                     return -TARGET_EFAULT;
1818             } else if (optlen >= 1) {
1819                 if (get_user_u8(val, optval_addr))
1820                     return -TARGET_EFAULT;
1821             }
1822             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1823             break;
1824         case IP_ADD_MEMBERSHIP:
1825         case IP_DROP_MEMBERSHIP:
1826             if (optlen < sizeof (struct target_ip_mreq) ||
1827                 optlen > sizeof (struct target_ip_mreqn))
1828                 return -TARGET_EINVAL;
1829 
1830             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1831             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1832             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1833             break;
1834 
1835         case IP_BLOCK_SOURCE:
1836         case IP_UNBLOCK_SOURCE:
1837         case IP_ADD_SOURCE_MEMBERSHIP:
1838         case IP_DROP_SOURCE_MEMBERSHIP:
1839             if (optlen != sizeof (struct target_ip_mreq_source))
1840                 return -TARGET_EINVAL;
1841 
1842             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1843             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1844             unlock_user (ip_mreq_source, optval_addr, 0);
1845             break;
1846 
1847         default:
1848             goto unimplemented;
1849         }
1850         break;
1851     case SOL_IPV6:
1852         switch (optname) {
1853         case IPV6_MTU_DISCOVER:
1854         case IPV6_MTU:
1855         case IPV6_V6ONLY:
1856         case IPV6_RECVPKTINFO:
1857         case IPV6_UNICAST_HOPS:
1858         case IPV6_MULTICAST_HOPS:
1859         case IPV6_MULTICAST_LOOP:
1860         case IPV6_RECVERR:
1861         case IPV6_RECVHOPLIMIT:
1862         case IPV6_2292HOPLIMIT:
1863         case IPV6_CHECKSUM:
1864         case IPV6_ADDRFORM:
1865         case IPV6_2292PKTINFO:
1866         case IPV6_RECVTCLASS:
1867         case IPV6_RECVRTHDR:
1868         case IPV6_2292RTHDR:
1869         case IPV6_RECVHOPOPTS:
1870         case IPV6_2292HOPOPTS:
1871         case IPV6_RECVDSTOPTS:
1872         case IPV6_2292DSTOPTS:
1873         case IPV6_TCLASS:
1874 #ifdef IPV6_RECVPATHMTU
1875         case IPV6_RECVPATHMTU:
1876 #endif
1877 #ifdef IPV6_TRANSPARENT
1878         case IPV6_TRANSPARENT:
1879 #endif
1880 #ifdef IPV6_FREEBIND
1881         case IPV6_FREEBIND:
1882 #endif
1883 #ifdef IPV6_RECVORIGDSTADDR
1884         case IPV6_RECVORIGDSTADDR:
1885 #endif
1886             val = 0;
1887             if (optlen < sizeof(uint32_t)) {
1888                 return -TARGET_EINVAL;
1889             }
1890             if (get_user_u32(val, optval_addr)) {
1891                 return -TARGET_EFAULT;
1892             }
1893             ret = get_errno(setsockopt(sockfd, level, optname,
1894                                        &val, sizeof(val)));
1895             break;
1896         case IPV6_PKTINFO:
1897         {
1898             struct in6_pktinfo pki;
1899 
1900             if (optlen < sizeof(pki)) {
1901                 return -TARGET_EINVAL;
1902             }
1903 
1904             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1905                 return -TARGET_EFAULT;
1906             }
1907 
1908             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1909 
1910             ret = get_errno(setsockopt(sockfd, level, optname,
1911                                        &pki, sizeof(pki)));
1912             break;
1913         }
1914         default:
1915             goto unimplemented;
1916         }
1917         break;
1918     case SOL_ICMPV6:
1919         switch (optname) {
1920         case ICMPV6_FILTER:
1921         {
1922             struct icmp6_filter icmp6f;
1923 
1924             if (optlen > sizeof(icmp6f)) {
1925                 optlen = sizeof(icmp6f);
1926             }
1927 
1928             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1929                 return -TARGET_EFAULT;
1930             }
1931 
1932             for (val = 0; val < 8; val++) {
1933                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1934             }
1935 
1936             ret = get_errno(setsockopt(sockfd, level, optname,
1937                                        &icmp6f, optlen));
1938             break;
1939         }
1940         default:
1941             goto unimplemented;
1942         }
1943         break;
1944     case SOL_RAW:
1945         switch (optname) {
1946         case ICMP_FILTER:
1947         case IPV6_CHECKSUM:
1948             /* those take an u32 value */
1949             if (optlen < sizeof(uint32_t)) {
1950                 return -TARGET_EINVAL;
1951             }
1952 
1953             if (get_user_u32(val, optval_addr)) {
1954                 return -TARGET_EFAULT;
1955             }
1956             ret = get_errno(setsockopt(sockfd, level, optname,
1957                                        &val, sizeof(val)));
1958             break;
1959 
1960         default:
1961             goto unimplemented;
1962         }
1963         break;
1964     case TARGET_SOL_SOCKET:
1965         switch (optname) {
1966         case TARGET_SO_RCVTIMEO:
1967         {
1968                 struct timeval tv;
1969 
1970                 optname = SO_RCVTIMEO;
1971 
1972 set_timeout:
1973                 if (optlen != sizeof(struct target_timeval)) {
1974                     return -TARGET_EINVAL;
1975                 }
1976 
1977                 if (copy_from_user_timeval(&tv, optval_addr)) {
1978                     return -TARGET_EFAULT;
1979                 }
1980 
1981                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1982                                 &tv, sizeof(tv)));
1983                 return ret;
1984         }
1985         case TARGET_SO_SNDTIMEO:
1986                 optname = SO_SNDTIMEO;
1987                 goto set_timeout;
1988         case TARGET_SO_ATTACH_FILTER:
1989         {
1990                 struct target_sock_fprog *tfprog;
1991                 struct target_sock_filter *tfilter;
1992                 struct sock_fprog fprog;
1993                 struct sock_filter *filter;
1994                 int i;
1995 
1996                 if (optlen != sizeof(*tfprog)) {
1997                     return -TARGET_EINVAL;
1998                 }
1999                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2000                     return -TARGET_EFAULT;
2001                 }
2002                 if (!lock_user_struct(VERIFY_READ, tfilter,
2003                                       tswapal(tfprog->filter), 0)) {
2004                     unlock_user_struct(tfprog, optval_addr, 1);
2005                     return -TARGET_EFAULT;
2006                 }
2007 
2008                 fprog.len = tswap16(tfprog->len);
2009                 filter = g_try_new(struct sock_filter, fprog.len);
2010                 if (filter == NULL) {
2011                     unlock_user_struct(tfilter, tfprog->filter, 1);
2012                     unlock_user_struct(tfprog, optval_addr, 1);
2013                     return -TARGET_ENOMEM;
2014                 }
2015                 for (i = 0; i < fprog.len; i++) {
2016                     filter[i].code = tswap16(tfilter[i].code);
2017                     filter[i].jt = tfilter[i].jt;
2018                     filter[i].jf = tfilter[i].jf;
2019                     filter[i].k = tswap32(tfilter[i].k);
2020                 }
2021                 fprog.filter = filter;
2022 
2023                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2024                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2025                 g_free(filter);
2026 
2027                 unlock_user_struct(tfilter, tfprog->filter, 1);
2028                 unlock_user_struct(tfprog, optval_addr, 1);
2029                 return ret;
2030         }
2031 	case TARGET_SO_BINDTODEVICE:
2032 	{
2033 		char *dev_ifname, *addr_ifname;
2034 
2035 		if (optlen > IFNAMSIZ - 1) {
2036 		    optlen = IFNAMSIZ - 1;
2037 		}
2038 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2039 		if (!dev_ifname) {
2040 		    return -TARGET_EFAULT;
2041 		}
2042 		optname = SO_BINDTODEVICE;
2043 		addr_ifname = alloca(IFNAMSIZ);
2044 		memcpy(addr_ifname, dev_ifname, optlen);
2045 		addr_ifname[optlen] = 0;
2046 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2047                                            addr_ifname, optlen));
2048 		unlock_user (dev_ifname, optval_addr, 0);
2049 		return ret;
2050 	}
2051         case TARGET_SO_LINGER:
2052         {
2053                 struct linger lg;
2054                 struct target_linger *tlg;
2055 
2056                 if (optlen != sizeof(struct target_linger)) {
2057                     return -TARGET_EINVAL;
2058                 }
2059                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2060                     return -TARGET_EFAULT;
2061                 }
2062                 __get_user(lg.l_onoff, &tlg->l_onoff);
2063                 __get_user(lg.l_linger, &tlg->l_linger);
2064                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2065                                 &lg, sizeof(lg)));
2066                 unlock_user_struct(tlg, optval_addr, 0);
2067                 return ret;
2068         }
2069             /* Options with 'int' argument.  */
2070         case TARGET_SO_DEBUG:
2071 		optname = SO_DEBUG;
2072 		break;
2073         case TARGET_SO_REUSEADDR:
2074 		optname = SO_REUSEADDR;
2075 		break;
2076 #ifdef SO_REUSEPORT
2077         case TARGET_SO_REUSEPORT:
2078                 optname = SO_REUSEPORT;
2079                 break;
2080 #endif
2081         case TARGET_SO_TYPE:
2082 		optname = SO_TYPE;
2083 		break;
2084         case TARGET_SO_ERROR:
2085 		optname = SO_ERROR;
2086 		break;
2087         case TARGET_SO_DONTROUTE:
2088 		optname = SO_DONTROUTE;
2089 		break;
2090         case TARGET_SO_BROADCAST:
2091 		optname = SO_BROADCAST;
2092 		break;
2093         case TARGET_SO_SNDBUF:
2094 		optname = SO_SNDBUF;
2095 		break;
2096         case TARGET_SO_SNDBUFFORCE:
2097                 optname = SO_SNDBUFFORCE;
2098                 break;
2099         case TARGET_SO_RCVBUF:
2100 		optname = SO_RCVBUF;
2101 		break;
2102         case TARGET_SO_RCVBUFFORCE:
2103                 optname = SO_RCVBUFFORCE;
2104                 break;
2105         case TARGET_SO_KEEPALIVE:
2106 		optname = SO_KEEPALIVE;
2107 		break;
2108         case TARGET_SO_OOBINLINE:
2109 		optname = SO_OOBINLINE;
2110 		break;
2111         case TARGET_SO_NO_CHECK:
2112 		optname = SO_NO_CHECK;
2113 		break;
2114         case TARGET_SO_PRIORITY:
2115 		optname = SO_PRIORITY;
2116 		break;
2117 #ifdef SO_BSDCOMPAT
2118         case TARGET_SO_BSDCOMPAT:
2119 		optname = SO_BSDCOMPAT;
2120 		break;
2121 #endif
2122         case TARGET_SO_PASSCRED:
2123 		optname = SO_PASSCRED;
2124 		break;
2125         case TARGET_SO_PASSSEC:
2126                 optname = SO_PASSSEC;
2127                 break;
2128         case TARGET_SO_TIMESTAMP:
2129 		optname = SO_TIMESTAMP;
2130 		break;
2131         case TARGET_SO_RCVLOWAT:
2132 		optname = SO_RCVLOWAT;
2133 		break;
2134         default:
2135             goto unimplemented;
2136         }
2137 	if (optlen < sizeof(uint32_t))
2138             return -TARGET_EINVAL;
2139 
2140 	if (get_user_u32(val, optval_addr))
2141             return -TARGET_EFAULT;
2142 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2143         break;
2144     default:
2145     unimplemented:
2146         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2147         ret = -TARGET_ENOPROTOOPT;
2148     }
2149     return ret;
2150 }
2151 
2152 /* do_getsockopt() Must return target values and target errnos. */
2153 static abi_long do_getsockopt(int sockfd, int level, int optname,
2154                               abi_ulong optval_addr, abi_ulong optlen)
2155 {
2156     abi_long ret;
2157     int len, val;
2158     socklen_t lv;
2159 
2160     switch(level) {
2161     case TARGET_SOL_SOCKET:
2162         level = SOL_SOCKET;
2163         switch (optname) {
2164         /* These don't just return a single integer */
2165         case TARGET_SO_RCVTIMEO:
2166         case TARGET_SO_SNDTIMEO:
2167         case TARGET_SO_PEERNAME:
2168             goto unimplemented;
2169         case TARGET_SO_PEERCRED: {
2170             struct ucred cr;
2171             socklen_t crlen;
2172             struct target_ucred *tcr;
2173 
2174             if (get_user_u32(len, optlen)) {
2175                 return -TARGET_EFAULT;
2176             }
2177             if (len < 0) {
2178                 return -TARGET_EINVAL;
2179             }
2180 
2181             crlen = sizeof(cr);
2182             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2183                                        &cr, &crlen));
2184             if (ret < 0) {
2185                 return ret;
2186             }
2187             if (len > crlen) {
2188                 len = crlen;
2189             }
2190             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2191                 return -TARGET_EFAULT;
2192             }
2193             __put_user(cr.pid, &tcr->pid);
2194             __put_user(cr.uid, &tcr->uid);
2195             __put_user(cr.gid, &tcr->gid);
2196             unlock_user_struct(tcr, optval_addr, 1);
2197             if (put_user_u32(len, optlen)) {
2198                 return -TARGET_EFAULT;
2199             }
2200             break;
2201         }
2202         case TARGET_SO_LINGER:
2203         {
2204             struct linger lg;
2205             socklen_t lglen;
2206             struct target_linger *tlg;
2207 
2208             if (get_user_u32(len, optlen)) {
2209                 return -TARGET_EFAULT;
2210             }
2211             if (len < 0) {
2212                 return -TARGET_EINVAL;
2213             }
2214 
2215             lglen = sizeof(lg);
2216             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2217                                        &lg, &lglen));
2218             if (ret < 0) {
2219                 return ret;
2220             }
2221             if (len > lglen) {
2222                 len = lglen;
2223             }
2224             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2225                 return -TARGET_EFAULT;
2226             }
2227             __put_user(lg.l_onoff, &tlg->l_onoff);
2228             __put_user(lg.l_linger, &tlg->l_linger);
2229             unlock_user_struct(tlg, optval_addr, 1);
2230             if (put_user_u32(len, optlen)) {
2231                 return -TARGET_EFAULT;
2232             }
2233             break;
2234         }
2235         /* Options with 'int' argument.  */
2236         case TARGET_SO_DEBUG:
2237             optname = SO_DEBUG;
2238             goto int_case;
2239         case TARGET_SO_REUSEADDR:
2240             optname = SO_REUSEADDR;
2241             goto int_case;
2242 #ifdef SO_REUSEPORT
2243         case TARGET_SO_REUSEPORT:
2244             optname = SO_REUSEPORT;
2245             goto int_case;
2246 #endif
2247         case TARGET_SO_TYPE:
2248             optname = SO_TYPE;
2249             goto int_case;
2250         case TARGET_SO_ERROR:
2251             optname = SO_ERROR;
2252             goto int_case;
2253         case TARGET_SO_DONTROUTE:
2254             optname = SO_DONTROUTE;
2255             goto int_case;
2256         case TARGET_SO_BROADCAST:
2257             optname = SO_BROADCAST;
2258             goto int_case;
2259         case TARGET_SO_SNDBUF:
2260             optname = SO_SNDBUF;
2261             goto int_case;
2262         case TARGET_SO_RCVBUF:
2263             optname = SO_RCVBUF;
2264             goto int_case;
2265         case TARGET_SO_KEEPALIVE:
2266             optname = SO_KEEPALIVE;
2267             goto int_case;
2268         case TARGET_SO_OOBINLINE:
2269             optname = SO_OOBINLINE;
2270             goto int_case;
2271         case TARGET_SO_NO_CHECK:
2272             optname = SO_NO_CHECK;
2273             goto int_case;
2274         case TARGET_SO_PRIORITY:
2275             optname = SO_PRIORITY;
2276             goto int_case;
2277 #ifdef SO_BSDCOMPAT
2278         case TARGET_SO_BSDCOMPAT:
2279             optname = SO_BSDCOMPAT;
2280             goto int_case;
2281 #endif
2282         case TARGET_SO_PASSCRED:
2283             optname = SO_PASSCRED;
2284             goto int_case;
2285         case TARGET_SO_TIMESTAMP:
2286             optname = SO_TIMESTAMP;
2287             goto int_case;
2288         case TARGET_SO_RCVLOWAT:
2289             optname = SO_RCVLOWAT;
2290             goto int_case;
2291         case TARGET_SO_ACCEPTCONN:
2292             optname = SO_ACCEPTCONN;
2293             goto int_case;
2294         default:
2295             goto int_case;
2296         }
2297         break;
2298     case SOL_TCP:
2299         /* TCP options all take an 'int' value.  */
2300     int_case:
2301         if (get_user_u32(len, optlen))
2302             return -TARGET_EFAULT;
2303         if (len < 0)
2304             return -TARGET_EINVAL;
2305         lv = sizeof(lv);
2306         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2307         if (ret < 0)
2308             return ret;
2309         if (optname == SO_TYPE) {
2310             val = host_to_target_sock_type(val);
2311         }
2312         if (len > lv)
2313             len = lv;
2314         if (len == 4) {
2315             if (put_user_u32(val, optval_addr))
2316                 return -TARGET_EFAULT;
2317         } else {
2318             if (put_user_u8(val, optval_addr))
2319                 return -TARGET_EFAULT;
2320         }
2321         if (put_user_u32(len, optlen))
2322             return -TARGET_EFAULT;
2323         break;
2324     case SOL_IP:
2325         switch(optname) {
2326         case IP_TOS:
2327         case IP_TTL:
2328         case IP_HDRINCL:
2329         case IP_ROUTER_ALERT:
2330         case IP_RECVOPTS:
2331         case IP_RETOPTS:
2332         case IP_PKTINFO:
2333         case IP_MTU_DISCOVER:
2334         case IP_RECVERR:
2335         case IP_RECVTOS:
2336 #ifdef IP_FREEBIND
2337         case IP_FREEBIND:
2338 #endif
2339         case IP_MULTICAST_TTL:
2340         case IP_MULTICAST_LOOP:
2341             if (get_user_u32(len, optlen))
2342                 return -TARGET_EFAULT;
2343             if (len < 0)
2344                 return -TARGET_EINVAL;
2345             lv = sizeof(lv);
2346             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2347             if (ret < 0)
2348                 return ret;
2349             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2350                 len = 1;
2351                 if (put_user_u32(len, optlen)
2352                     || put_user_u8(val, optval_addr))
2353                     return -TARGET_EFAULT;
2354             } else {
2355                 if (len > sizeof(int))
2356                     len = sizeof(int);
2357                 if (put_user_u32(len, optlen)
2358                     || put_user_u32(val, optval_addr))
2359                     return -TARGET_EFAULT;
2360             }
2361             break;
2362         default:
2363             ret = -TARGET_ENOPROTOOPT;
2364             break;
2365         }
2366         break;
2367     case SOL_IPV6:
2368         switch (optname) {
2369         case IPV6_MTU_DISCOVER:
2370         case IPV6_MTU:
2371         case IPV6_V6ONLY:
2372         case IPV6_RECVPKTINFO:
2373         case IPV6_UNICAST_HOPS:
2374         case IPV6_MULTICAST_HOPS:
2375         case IPV6_MULTICAST_LOOP:
2376         case IPV6_RECVERR:
2377         case IPV6_RECVHOPLIMIT:
2378         case IPV6_2292HOPLIMIT:
2379         case IPV6_CHECKSUM:
2380         case IPV6_ADDRFORM:
2381         case IPV6_2292PKTINFO:
2382         case IPV6_RECVTCLASS:
2383         case IPV6_RECVRTHDR:
2384         case IPV6_2292RTHDR:
2385         case IPV6_RECVHOPOPTS:
2386         case IPV6_2292HOPOPTS:
2387         case IPV6_RECVDSTOPTS:
2388         case IPV6_2292DSTOPTS:
2389         case IPV6_TCLASS:
2390 #ifdef IPV6_RECVPATHMTU
2391         case IPV6_RECVPATHMTU:
2392 #endif
2393 #ifdef IPV6_TRANSPARENT
2394         case IPV6_TRANSPARENT:
2395 #endif
2396 #ifdef IPV6_FREEBIND
2397         case IPV6_FREEBIND:
2398 #endif
2399 #ifdef IPV6_RECVORIGDSTADDR
2400         case IPV6_RECVORIGDSTADDR:
2401 #endif
2402             if (get_user_u32(len, optlen))
2403                 return -TARGET_EFAULT;
2404             if (len < 0)
2405                 return -TARGET_EINVAL;
2406             lv = sizeof(lv);
2407             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2408             if (ret < 0)
2409                 return ret;
2410             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2411                 len = 1;
2412                 if (put_user_u32(len, optlen)
2413                     || put_user_u8(val, optval_addr))
2414                     return -TARGET_EFAULT;
2415             } else {
2416                 if (len > sizeof(int))
2417                     len = sizeof(int);
2418                 if (put_user_u32(len, optlen)
2419                     || put_user_u32(val, optval_addr))
2420                     return -TARGET_EFAULT;
2421             }
2422             break;
2423         default:
2424             ret = -TARGET_ENOPROTOOPT;
2425             break;
2426         }
2427         break;
2428     default:
2429     unimplemented:
2430         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2431                  level, optname);
2432         ret = -TARGET_EOPNOTSUPP;
2433         break;
2434     }
2435     return ret;
2436 }
2437 
2438 /* Convert target low/high pair representing file offset into the host
2439  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2440  * as the kernel doesn't handle them either.
2441  */
2442 static void target_to_host_low_high(abi_ulong tlow,
2443                                     abi_ulong thigh,
2444                                     unsigned long *hlow,
2445                                     unsigned long *hhigh)
2446 {
2447     uint64_t off = tlow |
2448         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2449         TARGET_LONG_BITS / 2;
2450 
2451     *hlow = off;
2452     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2453 }
2454 
2455 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2456                                 abi_ulong count, int copy)
2457 {
2458     struct target_iovec *target_vec;
2459     struct iovec *vec;
2460     abi_ulong total_len, max_len;
2461     int i;
2462     int err = 0;
2463     bool bad_address = false;
2464 
2465     if (count == 0) {
2466         errno = 0;
2467         return NULL;
2468     }
2469     if (count > IOV_MAX) {
2470         errno = EINVAL;
2471         return NULL;
2472     }
2473 
2474     vec = g_try_new0(struct iovec, count);
2475     if (vec == NULL) {
2476         errno = ENOMEM;
2477         return NULL;
2478     }
2479 
2480     target_vec = lock_user(VERIFY_READ, target_addr,
2481                            count * sizeof(struct target_iovec), 1);
2482     if (target_vec == NULL) {
2483         err = EFAULT;
2484         goto fail2;
2485     }
2486 
2487     /* ??? If host page size > target page size, this will result in a
2488        value larger than what we can actually support.  */
2489     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2490     total_len = 0;
2491 
2492     for (i = 0; i < count; i++) {
2493         abi_ulong base = tswapal(target_vec[i].iov_base);
2494         abi_long len = tswapal(target_vec[i].iov_len);
2495 
2496         if (len < 0) {
2497             err = EINVAL;
2498             goto fail;
2499         } else if (len == 0) {
2500             /* Zero length pointer is ignored.  */
2501             vec[i].iov_base = 0;
2502         } else {
2503             vec[i].iov_base = lock_user(type, base, len, copy);
2504             /* If the first buffer pointer is bad, this is a fault.  But
2505              * subsequent bad buffers will result in a partial write; this
2506              * is realized by filling the vector with null pointers and
2507              * zero lengths. */
2508             if (!vec[i].iov_base) {
2509                 if (i == 0) {
2510                     err = EFAULT;
2511                     goto fail;
2512                 } else {
2513                     bad_address = true;
2514                 }
2515             }
2516             if (bad_address) {
2517                 len = 0;
2518             }
2519             if (len > max_len - total_len) {
2520                 len = max_len - total_len;
2521             }
2522         }
2523         vec[i].iov_len = len;
2524         total_len += len;
2525     }
2526 
2527     unlock_user(target_vec, target_addr, 0);
2528     return vec;
2529 
2530  fail:
2531     while (--i >= 0) {
2532         if (tswapal(target_vec[i].iov_len) > 0) {
2533             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2534         }
2535     }
2536     unlock_user(target_vec, target_addr, 0);
2537  fail2:
2538     g_free(vec);
2539     errno = err;
2540     return NULL;
2541 }
2542 
2543 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2544                          abi_ulong count, int copy)
2545 {
2546     struct target_iovec *target_vec;
2547     int i;
2548 
2549     target_vec = lock_user(VERIFY_READ, target_addr,
2550                            count * sizeof(struct target_iovec), 1);
2551     if (target_vec) {
2552         for (i = 0; i < count; i++) {
2553             abi_ulong base = tswapal(target_vec[i].iov_base);
2554             abi_long len = tswapal(target_vec[i].iov_len);
2555             if (len < 0) {
2556                 break;
2557             }
2558             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2559         }
2560         unlock_user(target_vec, target_addr, 0);
2561     }
2562 
2563     g_free(vec);
2564 }
2565 
2566 static inline int target_to_host_sock_type(int *type)
2567 {
2568     int host_type = 0;
2569     int target_type = *type;
2570 
2571     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2572     case TARGET_SOCK_DGRAM:
2573         host_type = SOCK_DGRAM;
2574         break;
2575     case TARGET_SOCK_STREAM:
2576         host_type = SOCK_STREAM;
2577         break;
2578     default:
2579         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2580         break;
2581     }
2582     if (target_type & TARGET_SOCK_CLOEXEC) {
2583 #if defined(SOCK_CLOEXEC)
2584         host_type |= SOCK_CLOEXEC;
2585 #else
2586         return -TARGET_EINVAL;
2587 #endif
2588     }
2589     if (target_type & TARGET_SOCK_NONBLOCK) {
2590 #if defined(SOCK_NONBLOCK)
2591         host_type |= SOCK_NONBLOCK;
2592 #elif !defined(O_NONBLOCK)
2593         return -TARGET_EINVAL;
2594 #endif
2595     }
2596     *type = host_type;
2597     return 0;
2598 }
2599 
2600 /* Try to emulate socket type flags after socket creation.  */
2601 static int sock_flags_fixup(int fd, int target_type)
2602 {
2603 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2604     if (target_type & TARGET_SOCK_NONBLOCK) {
2605         int flags = fcntl(fd, F_GETFL);
2606         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2607             close(fd);
2608             return -TARGET_EINVAL;
2609         }
2610     }
2611 #endif
2612     return fd;
2613 }
2614 
2615 /* do_socket() Must return target values and target errnos. */
2616 static abi_long do_socket(int domain, int type, int protocol)
2617 {
2618     int target_type = type;
2619     int ret;
2620 
2621     ret = target_to_host_sock_type(&type);
2622     if (ret) {
2623         return ret;
2624     }
2625 
2626     if (domain == PF_NETLINK && !(
2627 #ifdef CONFIG_RTNETLINK
2628          protocol == NETLINK_ROUTE ||
2629 #endif
2630          protocol == NETLINK_KOBJECT_UEVENT ||
2631          protocol == NETLINK_AUDIT)) {
2632         return -EPFNOSUPPORT;
2633     }
2634 
2635     if (domain == AF_PACKET ||
2636         (domain == AF_INET && type == SOCK_PACKET)) {
2637         protocol = tswap16(protocol);
2638     }
2639 
2640     ret = get_errno(socket(domain, type, protocol));
2641     if (ret >= 0) {
2642         ret = sock_flags_fixup(ret, target_type);
2643         if (type == SOCK_PACKET) {
2644             /* Manage an obsolete case :
2645              * if socket type is SOCK_PACKET, bind by name
2646              */
2647             fd_trans_register(ret, &target_packet_trans);
2648         } else if (domain == PF_NETLINK) {
2649             switch (protocol) {
2650 #ifdef CONFIG_RTNETLINK
2651             case NETLINK_ROUTE:
2652                 fd_trans_register(ret, &target_netlink_route_trans);
2653                 break;
2654 #endif
2655             case NETLINK_KOBJECT_UEVENT:
2656                 /* nothing to do: messages are strings */
2657                 break;
2658             case NETLINK_AUDIT:
2659                 fd_trans_register(ret, &target_netlink_audit_trans);
2660                 break;
2661             default:
2662                 g_assert_not_reached();
2663             }
2664         }
2665     }
2666     return ret;
2667 }
2668 
2669 /* do_bind() Must return target values and target errnos. */
2670 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2671                         socklen_t addrlen)
2672 {
2673     void *addr;
2674     abi_long ret;
2675 
2676     if ((int)addrlen < 0) {
2677         return -TARGET_EINVAL;
2678     }
2679 
2680     addr = alloca(addrlen+1);
2681 
2682     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2683     if (ret)
2684         return ret;
2685 
2686     return get_errno(bind(sockfd, addr, addrlen));
2687 }
2688 
2689 /* do_connect() Must return target values and target errnos. */
2690 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2691                            socklen_t addrlen)
2692 {
2693     void *addr;
2694     abi_long ret;
2695 
2696     if ((int)addrlen < 0) {
2697         return -TARGET_EINVAL;
2698     }
2699 
2700     addr = alloca(addrlen+1);
2701 
2702     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2703     if (ret)
2704         return ret;
2705 
2706     return get_errno(safe_connect(sockfd, addr, addrlen));
2707 }
2708 
2709 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2710 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2711                                       int flags, int send)
2712 {
2713     abi_long ret, len;
2714     struct msghdr msg;
2715     abi_ulong count;
2716     struct iovec *vec;
2717     abi_ulong target_vec;
2718 
2719     if (msgp->msg_name) {
2720         msg.msg_namelen = tswap32(msgp->msg_namelen);
2721         msg.msg_name = alloca(msg.msg_namelen+1);
2722         ret = target_to_host_sockaddr(fd, msg.msg_name,
2723                                       tswapal(msgp->msg_name),
2724                                       msg.msg_namelen);
2725         if (ret == -TARGET_EFAULT) {
2726             /* For connected sockets msg_name and msg_namelen must
2727              * be ignored, so returning EFAULT immediately is wrong.
2728              * Instead, pass a bad msg_name to the host kernel, and
2729              * let it decide whether to return EFAULT or not.
2730              */
2731             msg.msg_name = (void *)-1;
2732         } else if (ret) {
2733             goto out2;
2734         }
2735     } else {
2736         msg.msg_name = NULL;
2737         msg.msg_namelen = 0;
2738     }
2739     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2740     msg.msg_control = alloca(msg.msg_controllen);
2741     memset(msg.msg_control, 0, msg.msg_controllen);
2742 
2743     msg.msg_flags = tswap32(msgp->msg_flags);
2744 
2745     count = tswapal(msgp->msg_iovlen);
2746     target_vec = tswapal(msgp->msg_iov);
2747 
2748     if (count > IOV_MAX) {
2749         /* sendrcvmsg returns a different errno for this condition than
2750          * readv/writev, so we must catch it here before lock_iovec() does.
2751          */
2752         ret = -TARGET_EMSGSIZE;
2753         goto out2;
2754     }
2755 
2756     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2757                      target_vec, count, send);
2758     if (vec == NULL) {
2759         ret = -host_to_target_errno(errno);
2760         goto out2;
2761     }
2762     msg.msg_iovlen = count;
2763     msg.msg_iov = vec;
2764 
2765     if (send) {
2766         if (fd_trans_target_to_host_data(fd)) {
2767             void *host_msg;
2768 
2769             host_msg = g_malloc(msg.msg_iov->iov_len);
2770             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2771             ret = fd_trans_target_to_host_data(fd)(host_msg,
2772                                                    msg.msg_iov->iov_len);
2773             if (ret >= 0) {
2774                 msg.msg_iov->iov_base = host_msg;
2775                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2776             }
2777             g_free(host_msg);
2778         } else {
2779             ret = target_to_host_cmsg(&msg, msgp);
2780             if (ret == 0) {
2781                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2782             }
2783         }
2784     } else {
2785         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2786         if (!is_error(ret)) {
2787             len = ret;
2788             if (fd_trans_host_to_target_data(fd)) {
2789                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2790                                                MIN(msg.msg_iov->iov_len, len));
2791             } else {
2792                 ret = host_to_target_cmsg(msgp, &msg);
2793             }
2794             if (!is_error(ret)) {
2795                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2796                 msgp->msg_flags = tswap32(msg.msg_flags);
2797                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2798                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2799                                     msg.msg_name, msg.msg_namelen);
2800                     if (ret) {
2801                         goto out;
2802                     }
2803                 }
2804 
2805                 ret = len;
2806             }
2807         }
2808     }
2809 
2810 out:
2811     unlock_iovec(vec, target_vec, count, !send);
2812 out2:
2813     return ret;
2814 }
2815 
2816 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2817                                int flags, int send)
2818 {
2819     abi_long ret;
2820     struct target_msghdr *msgp;
2821 
2822     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2823                           msgp,
2824                           target_msg,
2825                           send ? 1 : 0)) {
2826         return -TARGET_EFAULT;
2827     }
2828     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2829     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2830     return ret;
2831 }
2832 
2833 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2834  * so it might not have this *mmsg-specific flag either.
2835  */
2836 #ifndef MSG_WAITFORONE
2837 #define MSG_WAITFORONE 0x10000
2838 #endif
2839 
2840 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2841                                 unsigned int vlen, unsigned int flags,
2842                                 int send)
2843 {
2844     struct target_mmsghdr *mmsgp;
2845     abi_long ret = 0;
2846     int i;
2847 
2848     if (vlen > UIO_MAXIOV) {
2849         vlen = UIO_MAXIOV;
2850     }
2851 
2852     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2853     if (!mmsgp) {
2854         return -TARGET_EFAULT;
2855     }
2856 
2857     for (i = 0; i < vlen; i++) {
2858         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2859         if (is_error(ret)) {
2860             break;
2861         }
2862         mmsgp[i].msg_len = tswap32(ret);
2863         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2864         if (flags & MSG_WAITFORONE) {
2865             flags |= MSG_DONTWAIT;
2866         }
2867     }
2868 
2869     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2870 
2871     /* Return number of datagrams sent if we sent any at all;
2872      * otherwise return the error.
2873      */
2874     if (i) {
2875         return i;
2876     }
2877     return ret;
2878 }
2879 
2880 /* do_accept4() Must return target values and target errnos. */
2881 static abi_long do_accept4(int fd, abi_ulong target_addr,
2882                            abi_ulong target_addrlen_addr, int flags)
2883 {
2884     socklen_t addrlen, ret_addrlen;
2885     void *addr;
2886     abi_long ret;
2887     int host_flags;
2888 
2889     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2890 
2891     if (target_addr == 0) {
2892         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2893     }
2894 
2895     /* linux returns EINVAL if addrlen pointer is invalid */
2896     if (get_user_u32(addrlen, target_addrlen_addr))
2897         return -TARGET_EINVAL;
2898 
2899     if ((int)addrlen < 0) {
2900         return -TARGET_EINVAL;
2901     }
2902 
2903     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2904         return -TARGET_EINVAL;
2905 
2906     addr = alloca(addrlen);
2907 
2908     ret_addrlen = addrlen;
2909     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2910     if (!is_error(ret)) {
2911         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2912         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2913             ret = -TARGET_EFAULT;
2914         }
2915     }
2916     return ret;
2917 }
2918 
2919 /* do_getpeername() Must return target values and target errnos. */
2920 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2921                                abi_ulong target_addrlen_addr)
2922 {
2923     socklen_t addrlen, ret_addrlen;
2924     void *addr;
2925     abi_long ret;
2926 
2927     if (get_user_u32(addrlen, target_addrlen_addr))
2928         return -TARGET_EFAULT;
2929 
2930     if ((int)addrlen < 0) {
2931         return -TARGET_EINVAL;
2932     }
2933 
2934     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2935         return -TARGET_EFAULT;
2936 
2937     addr = alloca(addrlen);
2938 
2939     ret_addrlen = addrlen;
2940     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2941     if (!is_error(ret)) {
2942         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2943         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2944             ret = -TARGET_EFAULT;
2945         }
2946     }
2947     return ret;
2948 }
2949 
2950 /* do_getsockname() Must return target values and target errnos. */
2951 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2952                                abi_ulong target_addrlen_addr)
2953 {
2954     socklen_t addrlen, ret_addrlen;
2955     void *addr;
2956     abi_long ret;
2957 
2958     if (get_user_u32(addrlen, target_addrlen_addr))
2959         return -TARGET_EFAULT;
2960 
2961     if ((int)addrlen < 0) {
2962         return -TARGET_EINVAL;
2963     }
2964 
2965     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2966         return -TARGET_EFAULT;
2967 
2968     addr = alloca(addrlen);
2969 
2970     ret_addrlen = addrlen;
2971     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2972     if (!is_error(ret)) {
2973         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2974         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2975             ret = -TARGET_EFAULT;
2976         }
2977     }
2978     return ret;
2979 }
2980 
2981 /* do_socketpair() Must return target values and target errnos. */
2982 static abi_long do_socketpair(int domain, int type, int protocol,
2983                               abi_ulong target_tab_addr)
2984 {
2985     int tab[2];
2986     abi_long ret;
2987 
2988     target_to_host_sock_type(&type);
2989 
2990     ret = get_errno(socketpair(domain, type, protocol, tab));
2991     if (!is_error(ret)) {
2992         if (put_user_s32(tab[0], target_tab_addr)
2993             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2994             ret = -TARGET_EFAULT;
2995     }
2996     return ret;
2997 }
2998 
2999 /* do_sendto() Must return target values and target errnos. */
3000 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3001                           abi_ulong target_addr, socklen_t addrlen)
3002 {
3003     void *addr;
3004     void *host_msg;
3005     void *copy_msg = NULL;
3006     abi_long ret;
3007 
3008     if ((int)addrlen < 0) {
3009         return -TARGET_EINVAL;
3010     }
3011 
3012     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3013     if (!host_msg)
3014         return -TARGET_EFAULT;
3015     if (fd_trans_target_to_host_data(fd)) {
3016         copy_msg = host_msg;
3017         host_msg = g_malloc(len);
3018         memcpy(host_msg, copy_msg, len);
3019         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3020         if (ret < 0) {
3021             goto fail;
3022         }
3023     }
3024     if (target_addr) {
3025         addr = alloca(addrlen+1);
3026         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3027         if (ret) {
3028             goto fail;
3029         }
3030         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3031     } else {
3032         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3033     }
3034 fail:
3035     if (copy_msg) {
3036         g_free(host_msg);
3037         host_msg = copy_msg;
3038     }
3039     unlock_user(host_msg, msg, 0);
3040     return ret;
3041 }
3042 
3043 /* do_recvfrom() Must return target values and target errnos. */
3044 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3045                             abi_ulong target_addr,
3046                             abi_ulong target_addrlen)
3047 {
3048     socklen_t addrlen, ret_addrlen;
3049     void *addr;
3050     void *host_msg;
3051     abi_long ret;
3052 
3053     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3054     if (!host_msg)
3055         return -TARGET_EFAULT;
3056     if (target_addr) {
3057         if (get_user_u32(addrlen, target_addrlen)) {
3058             ret = -TARGET_EFAULT;
3059             goto fail;
3060         }
3061         if ((int)addrlen < 0) {
3062             ret = -TARGET_EINVAL;
3063             goto fail;
3064         }
3065         addr = alloca(addrlen);
3066         ret_addrlen = addrlen;
3067         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3068                                       addr, &ret_addrlen));
3069     } else {
3070         addr = NULL; /* To keep compiler quiet.  */
3071         addrlen = 0; /* To keep compiler quiet.  */
3072         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3073     }
3074     if (!is_error(ret)) {
3075         if (fd_trans_host_to_target_data(fd)) {
3076             abi_long trans;
3077             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3078             if (is_error(trans)) {
3079                 ret = trans;
3080                 goto fail;
3081             }
3082         }
3083         if (target_addr) {
3084             host_to_target_sockaddr(target_addr, addr,
3085                                     MIN(addrlen, ret_addrlen));
3086             if (put_user_u32(ret_addrlen, target_addrlen)) {
3087                 ret = -TARGET_EFAULT;
3088                 goto fail;
3089             }
3090         }
3091         unlock_user(host_msg, msg, len);
3092     } else {
3093 fail:
3094         unlock_user(host_msg, msg, 0);
3095     }
3096     return ret;
3097 }
3098 
3099 #ifdef TARGET_NR_socketcall
3100 /* do_socketcall() must return target values and target errnos. */
3101 static abi_long do_socketcall(int num, abi_ulong vptr)
3102 {
3103     static const unsigned nargs[] = { /* number of arguments per operation */
3104         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3105         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3106         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3107         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3108         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3109         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3110         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3111         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3112         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3113         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3114         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3115         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3116         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3117         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3118         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3119         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3120         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3121         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3122         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3123         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3124     };
3125     abi_long a[6]; /* max 6 args */
3126     unsigned i;
3127 
3128     /* check the range of the first argument num */
3129     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3130     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3131         return -TARGET_EINVAL;
3132     }
3133     /* ensure we have space for args */
3134     if (nargs[num] > ARRAY_SIZE(a)) {
3135         return -TARGET_EINVAL;
3136     }
3137     /* collect the arguments in a[] according to nargs[] */
3138     for (i = 0; i < nargs[num]; ++i) {
3139         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3140             return -TARGET_EFAULT;
3141         }
3142     }
3143     /* now when we have the args, invoke the appropriate underlying function */
3144     switch (num) {
3145     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3146         return do_socket(a[0], a[1], a[2]);
3147     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3148         return do_bind(a[0], a[1], a[2]);
3149     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3150         return do_connect(a[0], a[1], a[2]);
3151     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3152         return get_errno(listen(a[0], a[1]));
3153     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3154         return do_accept4(a[0], a[1], a[2], 0);
3155     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3156         return do_getsockname(a[0], a[1], a[2]);
3157     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3158         return do_getpeername(a[0], a[1], a[2]);
3159     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3160         return do_socketpair(a[0], a[1], a[2], a[3]);
3161     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3162         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3163     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3164         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3165     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3166         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3167     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3168         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3169     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3170         return get_errno(shutdown(a[0], a[1]));
3171     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3172         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3173     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3174         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3175     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3176         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3177     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3178         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3179     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3180         return do_accept4(a[0], a[1], a[2], a[3]);
3181     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3182         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3183     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3184         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3185     default:
3186         gemu_log("Unsupported socketcall: %d\n", num);
3187         return -TARGET_EINVAL;
3188     }
3189 }
3190 #endif
3191 
3192 #define N_SHM_REGIONS	32
3193 
3194 static struct shm_region {
3195     abi_ulong start;
3196     abi_ulong size;
3197     bool in_use;
3198 } shm_regions[N_SHM_REGIONS];
3199 
3200 #ifndef TARGET_SEMID64_DS
3201 /* asm-generic version of this struct */
3202 struct target_semid64_ds
3203 {
3204   struct target_ipc_perm sem_perm;
3205   abi_ulong sem_otime;
3206 #if TARGET_ABI_BITS == 32
3207   abi_ulong __unused1;
3208 #endif
3209   abi_ulong sem_ctime;
3210 #if TARGET_ABI_BITS == 32
3211   abi_ulong __unused2;
3212 #endif
3213   abi_ulong sem_nsems;
3214   abi_ulong __unused3;
3215   abi_ulong __unused4;
3216 };
3217 #endif
3218 
3219 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3220                                                abi_ulong target_addr)
3221 {
3222     struct target_ipc_perm *target_ip;
3223     struct target_semid64_ds *target_sd;
3224 
3225     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3226         return -TARGET_EFAULT;
3227     target_ip = &(target_sd->sem_perm);
3228     host_ip->__key = tswap32(target_ip->__key);
3229     host_ip->uid = tswap32(target_ip->uid);
3230     host_ip->gid = tswap32(target_ip->gid);
3231     host_ip->cuid = tswap32(target_ip->cuid);
3232     host_ip->cgid = tswap32(target_ip->cgid);
3233 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3234     host_ip->mode = tswap32(target_ip->mode);
3235 #else
3236     host_ip->mode = tswap16(target_ip->mode);
3237 #endif
3238 #if defined(TARGET_PPC)
3239     host_ip->__seq = tswap32(target_ip->__seq);
3240 #else
3241     host_ip->__seq = tswap16(target_ip->__seq);
3242 #endif
3243     unlock_user_struct(target_sd, target_addr, 0);
3244     return 0;
3245 }
3246 
3247 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3248                                                struct ipc_perm *host_ip)
3249 {
3250     struct target_ipc_perm *target_ip;
3251     struct target_semid64_ds *target_sd;
3252 
3253     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3254         return -TARGET_EFAULT;
3255     target_ip = &(target_sd->sem_perm);
3256     target_ip->__key = tswap32(host_ip->__key);
3257     target_ip->uid = tswap32(host_ip->uid);
3258     target_ip->gid = tswap32(host_ip->gid);
3259     target_ip->cuid = tswap32(host_ip->cuid);
3260     target_ip->cgid = tswap32(host_ip->cgid);
3261 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3262     target_ip->mode = tswap32(host_ip->mode);
3263 #else
3264     target_ip->mode = tswap16(host_ip->mode);
3265 #endif
3266 #if defined(TARGET_PPC)
3267     target_ip->__seq = tswap32(host_ip->__seq);
3268 #else
3269     target_ip->__seq = tswap16(host_ip->__seq);
3270 #endif
3271     unlock_user_struct(target_sd, target_addr, 1);
3272     return 0;
3273 }
3274 
3275 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3276                                                abi_ulong target_addr)
3277 {
3278     struct target_semid64_ds *target_sd;
3279 
3280     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3281         return -TARGET_EFAULT;
3282     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3283         return -TARGET_EFAULT;
3284     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3285     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3286     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3287     unlock_user_struct(target_sd, target_addr, 0);
3288     return 0;
3289 }
3290 
3291 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3292                                                struct semid_ds *host_sd)
3293 {
3294     struct target_semid64_ds *target_sd;
3295 
3296     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3297         return -TARGET_EFAULT;
3298     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3299         return -TARGET_EFAULT;
3300     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3301     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3302     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3303     unlock_user_struct(target_sd, target_addr, 1);
3304     return 0;
3305 }
3306 
3307 struct target_seminfo {
3308     int semmap;
3309     int semmni;
3310     int semmns;
3311     int semmnu;
3312     int semmsl;
3313     int semopm;
3314     int semume;
3315     int semusz;
3316     int semvmx;
3317     int semaem;
3318 };
3319 
3320 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3321                                               struct seminfo *host_seminfo)
3322 {
3323     struct target_seminfo *target_seminfo;
3324     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3325         return -TARGET_EFAULT;
3326     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3327     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3328     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3329     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3330     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3331     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3332     __put_user(host_seminfo->semume, &target_seminfo->semume);
3333     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3334     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3335     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3336     unlock_user_struct(target_seminfo, target_addr, 1);
3337     return 0;
3338 }
3339 
3340 union semun {
3341 	int val;
3342 	struct semid_ds *buf;
3343 	unsigned short *array;
3344 	struct seminfo *__buf;
3345 };
3346 
3347 union target_semun {
3348 	int val;
3349 	abi_ulong buf;
3350 	abi_ulong array;
3351 	abi_ulong __buf;
3352 };
3353 
3354 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3355                                                abi_ulong target_addr)
3356 {
3357     int nsems;
3358     unsigned short *array;
3359     union semun semun;
3360     struct semid_ds semid_ds;
3361     int i, ret;
3362 
3363     semun.buf = &semid_ds;
3364 
3365     ret = semctl(semid, 0, IPC_STAT, semun);
3366     if (ret == -1)
3367         return get_errno(ret);
3368 
3369     nsems = semid_ds.sem_nsems;
3370 
3371     *host_array = g_try_new(unsigned short, nsems);
3372     if (!*host_array) {
3373         return -TARGET_ENOMEM;
3374     }
3375     array = lock_user(VERIFY_READ, target_addr,
3376                       nsems*sizeof(unsigned short), 1);
3377     if (!array) {
3378         g_free(*host_array);
3379         return -TARGET_EFAULT;
3380     }
3381 
3382     for(i=0; i<nsems; i++) {
3383         __get_user((*host_array)[i], &array[i]);
3384     }
3385     unlock_user(array, target_addr, 0);
3386 
3387     return 0;
3388 }
3389 
3390 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3391                                                unsigned short **host_array)
3392 {
3393     int nsems;
3394     unsigned short *array;
3395     union semun semun;
3396     struct semid_ds semid_ds;
3397     int i, ret;
3398 
3399     semun.buf = &semid_ds;
3400 
3401     ret = semctl(semid, 0, IPC_STAT, semun);
3402     if (ret == -1)
3403         return get_errno(ret);
3404 
3405     nsems = semid_ds.sem_nsems;
3406 
3407     array = lock_user(VERIFY_WRITE, target_addr,
3408                       nsems*sizeof(unsigned short), 0);
3409     if (!array)
3410         return -TARGET_EFAULT;
3411 
3412     for(i=0; i<nsems; i++) {
3413         __put_user((*host_array)[i], &array[i]);
3414     }
3415     g_free(*host_array);
3416     unlock_user(array, target_addr, 1);
3417 
3418     return 0;
3419 }
3420 
3421 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3422                                  abi_ulong target_arg)
3423 {
3424     union target_semun target_su = { .buf = target_arg };
3425     union semun arg;
3426     struct semid_ds dsarg;
3427     unsigned short *array = NULL;
3428     struct seminfo seminfo;
3429     abi_long ret = -TARGET_EINVAL;
3430     abi_long err;
3431     cmd &= 0xff;
3432 
3433     switch( cmd ) {
3434 	case GETVAL:
3435 	case SETVAL:
3436             /* In 64 bit cross-endian situations, we will erroneously pick up
3437              * the wrong half of the union for the "val" element.  To rectify
3438              * this, the entire 8-byte structure is byteswapped, followed by
3439 	     * a swap of the 4 byte val field. In other cases, the data is
3440 	     * already in proper host byte order. */
3441 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3442 		target_su.buf = tswapal(target_su.buf);
3443 		arg.val = tswap32(target_su.val);
3444 	    } else {
3445 		arg.val = target_su.val;
3446 	    }
3447             ret = get_errno(semctl(semid, semnum, cmd, arg));
3448             break;
3449 	case GETALL:
3450 	case SETALL:
3451             err = target_to_host_semarray(semid, &array, target_su.array);
3452             if (err)
3453                 return err;
3454             arg.array = array;
3455             ret = get_errno(semctl(semid, semnum, cmd, arg));
3456             err = host_to_target_semarray(semid, target_su.array, &array);
3457             if (err)
3458                 return err;
3459             break;
3460 	case IPC_STAT:
3461 	case IPC_SET:
3462 	case SEM_STAT:
3463             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3464             if (err)
3465                 return err;
3466             arg.buf = &dsarg;
3467             ret = get_errno(semctl(semid, semnum, cmd, arg));
3468             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3469             if (err)
3470                 return err;
3471             break;
3472 	case IPC_INFO:
3473 	case SEM_INFO:
3474             arg.__buf = &seminfo;
3475             ret = get_errno(semctl(semid, semnum, cmd, arg));
3476             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3477             if (err)
3478                 return err;
3479             break;
3480 	case IPC_RMID:
3481 	case GETPID:
3482 	case GETNCNT:
3483 	case GETZCNT:
3484             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3485             break;
3486     }
3487 
3488     return ret;
3489 }
3490 
3491 struct target_sembuf {
3492     unsigned short sem_num;
3493     short sem_op;
3494     short sem_flg;
3495 };
3496 
3497 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3498                                              abi_ulong target_addr,
3499                                              unsigned nsops)
3500 {
3501     struct target_sembuf *target_sembuf;
3502     int i;
3503 
3504     target_sembuf = lock_user(VERIFY_READ, target_addr,
3505                               nsops*sizeof(struct target_sembuf), 1);
3506     if (!target_sembuf)
3507         return -TARGET_EFAULT;
3508 
3509     for(i=0; i<nsops; i++) {
3510         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3511         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3512         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3513     }
3514 
3515     unlock_user(target_sembuf, target_addr, 0);
3516 
3517     return 0;
3518 }
3519 
3520 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3521 {
3522     struct sembuf sops[nsops];
3523 
3524     if (target_to_host_sembuf(sops, ptr, nsops))
3525         return -TARGET_EFAULT;
3526 
3527     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3528 }
3529 
3530 struct target_msqid_ds
3531 {
3532     struct target_ipc_perm msg_perm;
3533     abi_ulong msg_stime;
3534 #if TARGET_ABI_BITS == 32
3535     abi_ulong __unused1;
3536 #endif
3537     abi_ulong msg_rtime;
3538 #if TARGET_ABI_BITS == 32
3539     abi_ulong __unused2;
3540 #endif
3541     abi_ulong msg_ctime;
3542 #if TARGET_ABI_BITS == 32
3543     abi_ulong __unused3;
3544 #endif
3545     abi_ulong __msg_cbytes;
3546     abi_ulong msg_qnum;
3547     abi_ulong msg_qbytes;
3548     abi_ulong msg_lspid;
3549     abi_ulong msg_lrpid;
3550     abi_ulong __unused4;
3551     abi_ulong __unused5;
3552 };
3553 
3554 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3555                                                abi_ulong target_addr)
3556 {
3557     struct target_msqid_ds *target_md;
3558 
3559     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3560         return -TARGET_EFAULT;
3561     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3562         return -TARGET_EFAULT;
3563     host_md->msg_stime = tswapal(target_md->msg_stime);
3564     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3565     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3566     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3567     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3568     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3569     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3570     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3571     unlock_user_struct(target_md, target_addr, 0);
3572     return 0;
3573 }
3574 
3575 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3576                                                struct msqid_ds *host_md)
3577 {
3578     struct target_msqid_ds *target_md;
3579 
3580     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3581         return -TARGET_EFAULT;
3582     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3583         return -TARGET_EFAULT;
3584     target_md->msg_stime = tswapal(host_md->msg_stime);
3585     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3586     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3587     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3588     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3589     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3590     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3591     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3592     unlock_user_struct(target_md, target_addr, 1);
3593     return 0;
3594 }
3595 
3596 struct target_msginfo {
3597     int msgpool;
3598     int msgmap;
3599     int msgmax;
3600     int msgmnb;
3601     int msgmni;
3602     int msgssz;
3603     int msgtql;
3604     unsigned short int msgseg;
3605 };
3606 
3607 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3608                                               struct msginfo *host_msginfo)
3609 {
3610     struct target_msginfo *target_msginfo;
3611     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3612         return -TARGET_EFAULT;
3613     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3614     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3615     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3616     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3617     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3618     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3619     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3620     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3621     unlock_user_struct(target_msginfo, target_addr, 1);
3622     return 0;
3623 }
3624 
3625 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3626 {
3627     struct msqid_ds dsarg;
3628     struct msginfo msginfo;
3629     abi_long ret = -TARGET_EINVAL;
3630 
3631     cmd &= 0xff;
3632 
3633     switch (cmd) {
3634     case IPC_STAT:
3635     case IPC_SET:
3636     case MSG_STAT:
3637         if (target_to_host_msqid_ds(&dsarg,ptr))
3638             return -TARGET_EFAULT;
3639         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3640         if (host_to_target_msqid_ds(ptr,&dsarg))
3641             return -TARGET_EFAULT;
3642         break;
3643     case IPC_RMID:
3644         ret = get_errno(msgctl(msgid, cmd, NULL));
3645         break;
3646     case IPC_INFO:
3647     case MSG_INFO:
3648         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3649         if (host_to_target_msginfo(ptr, &msginfo))
3650             return -TARGET_EFAULT;
3651         break;
3652     }
3653 
3654     return ret;
3655 }
3656 
3657 struct target_msgbuf {
3658     abi_long mtype;
3659     char	mtext[1];
3660 };
3661 
3662 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3663                                  ssize_t msgsz, int msgflg)
3664 {
3665     struct target_msgbuf *target_mb;
3666     struct msgbuf *host_mb;
3667     abi_long ret = 0;
3668 
3669     if (msgsz < 0) {
3670         return -TARGET_EINVAL;
3671     }
3672 
3673     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3674         return -TARGET_EFAULT;
3675     host_mb = g_try_malloc(msgsz + sizeof(long));
3676     if (!host_mb) {
3677         unlock_user_struct(target_mb, msgp, 0);
3678         return -TARGET_ENOMEM;
3679     }
3680     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3681     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3682     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3683     g_free(host_mb);
3684     unlock_user_struct(target_mb, msgp, 0);
3685 
3686     return ret;
3687 }
3688 
3689 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3690                                  ssize_t msgsz, abi_long msgtyp,
3691                                  int msgflg)
3692 {
3693     struct target_msgbuf *target_mb;
3694     char *target_mtext;
3695     struct msgbuf *host_mb;
3696     abi_long ret = 0;
3697 
3698     if (msgsz < 0) {
3699         return -TARGET_EINVAL;
3700     }
3701 
3702     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3703         return -TARGET_EFAULT;
3704 
3705     host_mb = g_try_malloc(msgsz + sizeof(long));
3706     if (!host_mb) {
3707         ret = -TARGET_ENOMEM;
3708         goto end;
3709     }
3710     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3711 
3712     if (ret > 0) {
3713         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3714         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3715         if (!target_mtext) {
3716             ret = -TARGET_EFAULT;
3717             goto end;
3718         }
3719         memcpy(target_mb->mtext, host_mb->mtext, ret);
3720         unlock_user(target_mtext, target_mtext_addr, ret);
3721     }
3722 
3723     target_mb->mtype = tswapal(host_mb->mtype);
3724 
3725 end:
3726     if (target_mb)
3727         unlock_user_struct(target_mb, msgp, 1);
3728     g_free(host_mb);
3729     return ret;
3730 }
3731 
3732 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3733                                                abi_ulong target_addr)
3734 {
3735     struct target_shmid_ds *target_sd;
3736 
3737     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3738         return -TARGET_EFAULT;
3739     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3740         return -TARGET_EFAULT;
3741     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3742     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3743     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3744     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3745     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3746     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3747     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3748     unlock_user_struct(target_sd, target_addr, 0);
3749     return 0;
3750 }
3751 
3752 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3753                                                struct shmid_ds *host_sd)
3754 {
3755     struct target_shmid_ds *target_sd;
3756 
3757     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3758         return -TARGET_EFAULT;
3759     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3760         return -TARGET_EFAULT;
3761     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3762     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3763     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3764     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3765     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3766     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3767     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3768     unlock_user_struct(target_sd, target_addr, 1);
3769     return 0;
3770 }
3771 
3772 struct  target_shminfo {
3773     abi_ulong shmmax;
3774     abi_ulong shmmin;
3775     abi_ulong shmmni;
3776     abi_ulong shmseg;
3777     abi_ulong shmall;
3778 };
3779 
3780 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3781                                               struct shminfo *host_shminfo)
3782 {
3783     struct target_shminfo *target_shminfo;
3784     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3785         return -TARGET_EFAULT;
3786     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3787     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3788     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3789     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3790     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3791     unlock_user_struct(target_shminfo, target_addr, 1);
3792     return 0;
3793 }
3794 
3795 struct target_shm_info {
3796     int used_ids;
3797     abi_ulong shm_tot;
3798     abi_ulong shm_rss;
3799     abi_ulong shm_swp;
3800     abi_ulong swap_attempts;
3801     abi_ulong swap_successes;
3802 };
3803 
3804 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3805                                                struct shm_info *host_shm_info)
3806 {
3807     struct target_shm_info *target_shm_info;
3808     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3809         return -TARGET_EFAULT;
3810     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3811     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3812     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3813     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3814     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3815     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3816     unlock_user_struct(target_shm_info, target_addr, 1);
3817     return 0;
3818 }
3819 
3820 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3821 {
3822     struct shmid_ds dsarg;
3823     struct shminfo shminfo;
3824     struct shm_info shm_info;
3825     abi_long ret = -TARGET_EINVAL;
3826 
3827     cmd &= 0xff;
3828 
3829     switch(cmd) {
3830     case IPC_STAT:
3831     case IPC_SET:
3832     case SHM_STAT:
3833         if (target_to_host_shmid_ds(&dsarg, buf))
3834             return -TARGET_EFAULT;
3835         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3836         if (host_to_target_shmid_ds(buf, &dsarg))
3837             return -TARGET_EFAULT;
3838         break;
3839     case IPC_INFO:
3840         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3841         if (host_to_target_shminfo(buf, &shminfo))
3842             return -TARGET_EFAULT;
3843         break;
3844     case SHM_INFO:
3845         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3846         if (host_to_target_shm_info(buf, &shm_info))
3847             return -TARGET_EFAULT;
3848         break;
3849     case IPC_RMID:
3850     case SHM_LOCK:
3851     case SHM_UNLOCK:
3852         ret = get_errno(shmctl(shmid, cmd, NULL));
3853         break;
3854     }
3855 
3856     return ret;
3857 }
3858 
3859 #ifndef TARGET_FORCE_SHMLBA
3860 /* For most architectures, SHMLBA is the same as the page size;
3861  * some architectures have larger values, in which case they should
3862  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3863  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3864  * and defining its own value for SHMLBA.
3865  *
3866  * The kernel also permits SHMLBA to be set by the architecture to a
3867  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3868  * this means that addresses are rounded to the large size if
3869  * SHM_RND is set but addresses not aligned to that size are not rejected
3870  * as long as they are at least page-aligned. Since the only architecture
3871  * which uses this is ia64 this code doesn't provide for that oddity.
3872  */
3873 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3874 {
3875     return TARGET_PAGE_SIZE;
3876 }
3877 #endif
3878 
3879 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3880                                  int shmid, abi_ulong shmaddr, int shmflg)
3881 {
3882     abi_long raddr;
3883     void *host_raddr;
3884     struct shmid_ds shm_info;
3885     int i,ret;
3886     abi_ulong shmlba;
3887 
3888     /* find out the length of the shared memory segment */
3889     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3890     if (is_error(ret)) {
3891         /* can't get length, bail out */
3892         return ret;
3893     }
3894 
3895     shmlba = target_shmlba(cpu_env);
3896 
3897     if (shmaddr & (shmlba - 1)) {
3898         if (shmflg & SHM_RND) {
3899             shmaddr &= ~(shmlba - 1);
3900         } else {
3901             return -TARGET_EINVAL;
3902         }
3903     }
3904     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3905         return -TARGET_EINVAL;
3906     }
3907 
3908     mmap_lock();
3909 
3910     if (shmaddr)
3911         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3912     else {
3913         abi_ulong mmap_start;
3914 
3915         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3916 
3917         if (mmap_start == -1) {
3918             errno = ENOMEM;
3919             host_raddr = (void *)-1;
3920         } else
3921             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3922     }
3923 
3924     if (host_raddr == (void *)-1) {
3925         mmap_unlock();
3926         return get_errno((long)host_raddr);
3927     }
3928     raddr=h2g((unsigned long)host_raddr);
3929 
3930     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3931                    PAGE_VALID | PAGE_READ |
3932                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3933 
3934     for (i = 0; i < N_SHM_REGIONS; i++) {
3935         if (!shm_regions[i].in_use) {
3936             shm_regions[i].in_use = true;
3937             shm_regions[i].start = raddr;
3938             shm_regions[i].size = shm_info.shm_segsz;
3939             break;
3940         }
3941     }
3942 
3943     mmap_unlock();
3944     return raddr;
3945 
3946 }
3947 
3948 static inline abi_long do_shmdt(abi_ulong shmaddr)
3949 {
3950     int i;
3951     abi_long rv;
3952 
3953     mmap_lock();
3954 
3955     for (i = 0; i < N_SHM_REGIONS; ++i) {
3956         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3957             shm_regions[i].in_use = false;
3958             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3959             break;
3960         }
3961     }
3962     rv = get_errno(shmdt(g2h(shmaddr)));
3963 
3964     mmap_unlock();
3965 
3966     return rv;
3967 }
3968 
3969 #ifdef TARGET_NR_ipc
3970 /* ??? This only works with linear mappings.  */
3971 /* do_ipc() must return target values and target errnos. */
3972 static abi_long do_ipc(CPUArchState *cpu_env,
3973                        unsigned int call, abi_long first,
3974                        abi_long second, abi_long third,
3975                        abi_long ptr, abi_long fifth)
3976 {
3977     int version;
3978     abi_long ret = 0;
3979 
3980     version = call >> 16;
3981     call &= 0xffff;
3982 
3983     switch (call) {
3984     case IPCOP_semop:
3985         ret = do_semop(first, ptr, second);
3986         break;
3987 
3988     case IPCOP_semget:
3989         ret = get_errno(semget(first, second, third));
3990         break;
3991 
3992     case IPCOP_semctl: {
3993         /* The semun argument to semctl is passed by value, so dereference the
3994          * ptr argument. */
3995         abi_ulong atptr;
3996         get_user_ual(atptr, ptr);
3997         ret = do_semctl(first, second, third, atptr);
3998         break;
3999     }
4000 
4001     case IPCOP_msgget:
4002         ret = get_errno(msgget(first, second));
4003         break;
4004 
4005     case IPCOP_msgsnd:
4006         ret = do_msgsnd(first, ptr, second, third);
4007         break;
4008 
4009     case IPCOP_msgctl:
4010         ret = do_msgctl(first, second, ptr);
4011         break;
4012 
4013     case IPCOP_msgrcv:
4014         switch (version) {
4015         case 0:
4016             {
4017                 struct target_ipc_kludge {
4018                     abi_long msgp;
4019                     abi_long msgtyp;
4020                 } *tmp;
4021 
4022                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4023                     ret = -TARGET_EFAULT;
4024                     break;
4025                 }
4026 
4027                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4028 
4029                 unlock_user_struct(tmp, ptr, 0);
4030                 break;
4031             }
4032         default:
4033             ret = do_msgrcv(first, ptr, second, fifth, third);
4034         }
4035         break;
4036 
4037     case IPCOP_shmat:
4038         switch (version) {
4039         default:
4040         {
4041             abi_ulong raddr;
4042             raddr = do_shmat(cpu_env, first, ptr, second);
4043             if (is_error(raddr))
4044                 return get_errno(raddr);
4045             if (put_user_ual(raddr, third))
4046                 return -TARGET_EFAULT;
4047             break;
4048         }
4049         case 1:
4050             ret = -TARGET_EINVAL;
4051             break;
4052         }
4053 	break;
4054     case IPCOP_shmdt:
4055         ret = do_shmdt(ptr);
4056 	break;
4057 
4058     case IPCOP_shmget:
4059 	/* IPC_* flag values are the same on all linux platforms */
4060 	ret = get_errno(shmget(first, second, third));
4061 	break;
4062 
4063 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4064     case IPCOP_shmctl:
4065         ret = do_shmctl(first, second, ptr);
4066         break;
4067     default:
4068 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4069 	ret = -TARGET_ENOSYS;
4070 	break;
4071     }
4072     return ret;
4073 }
4074 #endif
4075 
4076 /* kernel structure types definitions */
4077 
4078 #define STRUCT(name, ...) STRUCT_ ## name,
4079 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4080 enum {
4081 #include "syscall_types.h"
4082 STRUCT_MAX
4083 };
4084 #undef STRUCT
4085 #undef STRUCT_SPECIAL
4086 
4087 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4088 #define STRUCT_SPECIAL(name)
4089 #include "syscall_types.h"
4090 #undef STRUCT
4091 #undef STRUCT_SPECIAL
4092 
4093 typedef struct IOCTLEntry IOCTLEntry;
4094 
4095 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4096                              int fd, int cmd, abi_long arg);
4097 
4098 struct IOCTLEntry {
4099     int target_cmd;
4100     unsigned int host_cmd;
4101     const char *name;
4102     int access;
4103     do_ioctl_fn *do_ioctl;
4104     const argtype arg_type[5];
4105 };
4106 
4107 #define IOC_R 0x0001
4108 #define IOC_W 0x0002
4109 #define IOC_RW (IOC_R | IOC_W)
4110 
4111 #define MAX_STRUCT_SIZE 4096
4112 
4113 #ifdef CONFIG_FIEMAP
4114 /* So fiemap access checks don't overflow on 32 bit systems.
4115  * This is very slightly smaller than the limit imposed by
4116  * the underlying kernel.
4117  */
4118 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4119                             / sizeof(struct fiemap_extent))
4120 
4121 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4122                                        int fd, int cmd, abi_long arg)
4123 {
4124     /* The parameter for this ioctl is a struct fiemap followed
4125      * by an array of struct fiemap_extent whose size is set
4126      * in fiemap->fm_extent_count. The array is filled in by the
4127      * ioctl.
4128      */
4129     int target_size_in, target_size_out;
4130     struct fiemap *fm;
4131     const argtype *arg_type = ie->arg_type;
4132     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4133     void *argptr, *p;
4134     abi_long ret;
4135     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4136     uint32_t outbufsz;
4137     int free_fm = 0;
4138 
4139     assert(arg_type[0] == TYPE_PTR);
4140     assert(ie->access == IOC_RW);
4141     arg_type++;
4142     target_size_in = thunk_type_size(arg_type, 0);
4143     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4144     if (!argptr) {
4145         return -TARGET_EFAULT;
4146     }
4147     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4148     unlock_user(argptr, arg, 0);
4149     fm = (struct fiemap *)buf_temp;
4150     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4151         return -TARGET_EINVAL;
4152     }
4153 
4154     outbufsz = sizeof (*fm) +
4155         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4156 
4157     if (outbufsz > MAX_STRUCT_SIZE) {
4158         /* We can't fit all the extents into the fixed size buffer.
4159          * Allocate one that is large enough and use it instead.
4160          */
4161         fm = g_try_malloc(outbufsz);
4162         if (!fm) {
4163             return -TARGET_ENOMEM;
4164         }
4165         memcpy(fm, buf_temp, sizeof(struct fiemap));
4166         free_fm = 1;
4167     }
4168     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4169     if (!is_error(ret)) {
4170         target_size_out = target_size_in;
4171         /* An extent_count of 0 means we were only counting the extents
4172          * so there are no structs to copy
4173          */
4174         if (fm->fm_extent_count != 0) {
4175             target_size_out += fm->fm_mapped_extents * extent_size;
4176         }
4177         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4178         if (!argptr) {
4179             ret = -TARGET_EFAULT;
4180         } else {
4181             /* Convert the struct fiemap */
4182             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4183             if (fm->fm_extent_count != 0) {
4184                 p = argptr + target_size_in;
4185                 /* ...and then all the struct fiemap_extents */
4186                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4187                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4188                                   THUNK_TARGET);
4189                     p += extent_size;
4190                 }
4191             }
4192             unlock_user(argptr, arg, target_size_out);
4193         }
4194     }
4195     if (free_fm) {
4196         g_free(fm);
4197     }
4198     return ret;
4199 }
4200 #endif
4201 
4202 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4203                                 int fd, int cmd, abi_long arg)
4204 {
4205     const argtype *arg_type = ie->arg_type;
4206     int target_size;
4207     void *argptr;
4208     int ret;
4209     struct ifconf *host_ifconf;
4210     uint32_t outbufsz;
4211     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4212     int target_ifreq_size;
4213     int nb_ifreq;
4214     int free_buf = 0;
4215     int i;
4216     int target_ifc_len;
4217     abi_long target_ifc_buf;
4218     int host_ifc_len;
4219     char *host_ifc_buf;
4220 
4221     assert(arg_type[0] == TYPE_PTR);
4222     assert(ie->access == IOC_RW);
4223 
4224     arg_type++;
4225     target_size = thunk_type_size(arg_type, 0);
4226 
4227     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4228     if (!argptr)
4229         return -TARGET_EFAULT;
4230     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4231     unlock_user(argptr, arg, 0);
4232 
4233     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4234     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4235     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4236 
4237     if (target_ifc_buf != 0) {
4238         target_ifc_len = host_ifconf->ifc_len;
4239         nb_ifreq = target_ifc_len / target_ifreq_size;
4240         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4241 
4242         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4243         if (outbufsz > MAX_STRUCT_SIZE) {
4244             /*
4245              * We can't fit all the extents into the fixed size buffer.
4246              * Allocate one that is large enough and use it instead.
4247              */
4248             host_ifconf = malloc(outbufsz);
4249             if (!host_ifconf) {
4250                 return -TARGET_ENOMEM;
4251             }
4252             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4253             free_buf = 1;
4254         }
4255         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4256 
4257         host_ifconf->ifc_len = host_ifc_len;
4258     } else {
4259       host_ifc_buf = NULL;
4260     }
4261     host_ifconf->ifc_buf = host_ifc_buf;
4262 
4263     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4264     if (!is_error(ret)) {
4265 	/* convert host ifc_len to target ifc_len */
4266 
4267         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4268         target_ifc_len = nb_ifreq * target_ifreq_size;
4269         host_ifconf->ifc_len = target_ifc_len;
4270 
4271 	/* restore target ifc_buf */
4272 
4273         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4274 
4275 	/* copy struct ifconf to target user */
4276 
4277         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4278         if (!argptr)
4279             return -TARGET_EFAULT;
4280         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4281         unlock_user(argptr, arg, target_size);
4282 
4283         if (target_ifc_buf != 0) {
4284             /* copy ifreq[] to target user */
4285             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4286             for (i = 0; i < nb_ifreq ; i++) {
4287                 thunk_convert(argptr + i * target_ifreq_size,
4288                               host_ifc_buf + i * sizeof(struct ifreq),
4289                               ifreq_arg_type, THUNK_TARGET);
4290             }
4291             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4292         }
4293     }
4294 
4295     if (free_buf) {
4296         free(host_ifconf);
4297     }
4298 
4299     return ret;
4300 }
4301 
4302 #if defined(CONFIG_USBFS)
4303 #if HOST_LONG_BITS > 64
4304 #error USBDEVFS thunks do not support >64 bit hosts yet.
4305 #endif
4306 struct live_urb {
4307     uint64_t target_urb_adr;
4308     uint64_t target_buf_adr;
4309     char *target_buf_ptr;
4310     struct usbdevfs_urb host_urb;
4311 };
4312 
4313 static GHashTable *usbdevfs_urb_hashtable(void)
4314 {
4315     static GHashTable *urb_hashtable;
4316 
4317     if (!urb_hashtable) {
4318         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4319     }
4320     return urb_hashtable;
4321 }
4322 
4323 static void urb_hashtable_insert(struct live_urb *urb)
4324 {
4325     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4326     g_hash_table_insert(urb_hashtable, urb, urb);
4327 }
4328 
4329 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4330 {
4331     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4332     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4333 }
4334 
4335 static void urb_hashtable_remove(struct live_urb *urb)
4336 {
4337     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4338     g_hash_table_remove(urb_hashtable, urb);
4339 }
4340 
4341 static abi_long
4342 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4343                           int fd, int cmd, abi_long arg)
4344 {
4345     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4346     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4347     struct live_urb *lurb;
4348     void *argptr;
4349     uint64_t hurb;
4350     int target_size;
4351     uintptr_t target_urb_adr;
4352     abi_long ret;
4353 
4354     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4355 
4356     memset(buf_temp, 0, sizeof(uint64_t));
4357     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4358     if (is_error(ret)) {
4359         return ret;
4360     }
4361 
4362     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4363     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4364     if (!lurb->target_urb_adr) {
4365         return -TARGET_EFAULT;
4366     }
4367     urb_hashtable_remove(lurb);
4368     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4369         lurb->host_urb.buffer_length);
4370     lurb->target_buf_ptr = NULL;
4371 
4372     /* restore the guest buffer pointer */
4373     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4374 
4375     /* update the guest urb struct */
4376     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4377     if (!argptr) {
4378         g_free(lurb);
4379         return -TARGET_EFAULT;
4380     }
4381     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4382     unlock_user(argptr, lurb->target_urb_adr, target_size);
4383 
4384     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4385     /* write back the urb handle */
4386     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4387     if (!argptr) {
4388         g_free(lurb);
4389         return -TARGET_EFAULT;
4390     }
4391 
4392     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4393     target_urb_adr = lurb->target_urb_adr;
4394     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4395     unlock_user(argptr, arg, target_size);
4396 
4397     g_free(lurb);
4398     return ret;
4399 }
4400 
4401 static abi_long
4402 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4403                              uint8_t *buf_temp __attribute__((unused)),
4404                              int fd, int cmd, abi_long arg)
4405 {
4406     struct live_urb *lurb;
4407 
4408     /* map target address back to host URB with metadata. */
4409     lurb = urb_hashtable_lookup(arg);
4410     if (!lurb) {
4411         return -TARGET_EFAULT;
4412     }
4413     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4414 }
4415 
4416 static abi_long
4417 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4418                             int fd, int cmd, abi_long arg)
4419 {
4420     const argtype *arg_type = ie->arg_type;
4421     int target_size;
4422     abi_long ret;
4423     void *argptr;
4424     int rw_dir;
4425     struct live_urb *lurb;
4426 
4427     /*
4428      * each submitted URB needs to map to a unique ID for the
4429      * kernel, and that unique ID needs to be a pointer to
4430      * host memory.  hence, we need to malloc for each URB.
4431      * isochronous transfers have a variable length struct.
4432      */
4433     arg_type++;
4434     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4435 
4436     /* construct host copy of urb and metadata */
4437     lurb = g_try_malloc0(sizeof(struct live_urb));
4438     if (!lurb) {
4439         return -TARGET_ENOMEM;
4440     }
4441 
4442     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4443     if (!argptr) {
4444         g_free(lurb);
4445         return -TARGET_EFAULT;
4446     }
4447     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4448     unlock_user(argptr, arg, 0);
4449 
4450     lurb->target_urb_adr = arg;
4451     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4452 
4453     /* buffer space used depends on endpoint type so lock the entire buffer */
4454     /* control type urbs should check the buffer contents for true direction */
4455     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4456     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4457         lurb->host_urb.buffer_length, 1);
4458     if (lurb->target_buf_ptr == NULL) {
4459         g_free(lurb);
4460         return -TARGET_EFAULT;
4461     }
4462 
4463     /* update buffer pointer in host copy */
4464     lurb->host_urb.buffer = lurb->target_buf_ptr;
4465 
4466     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4467     if (is_error(ret)) {
4468         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4469         g_free(lurb);
4470     } else {
4471         urb_hashtable_insert(lurb);
4472     }
4473 
4474     return ret;
4475 }
4476 #endif /* CONFIG_USBFS */
4477 
4478 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4479                             int cmd, abi_long arg)
4480 {
4481     void *argptr;
4482     struct dm_ioctl *host_dm;
4483     abi_long guest_data;
4484     uint32_t guest_data_size;
4485     int target_size;
4486     const argtype *arg_type = ie->arg_type;
4487     abi_long ret;
4488     void *big_buf = NULL;
4489     char *host_data;
4490 
4491     arg_type++;
4492     target_size = thunk_type_size(arg_type, 0);
4493     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4494     if (!argptr) {
4495         ret = -TARGET_EFAULT;
4496         goto out;
4497     }
4498     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4499     unlock_user(argptr, arg, 0);
4500 
4501     /* buf_temp is too small, so fetch things into a bigger buffer */
4502     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4503     memcpy(big_buf, buf_temp, target_size);
4504     buf_temp = big_buf;
4505     host_dm = big_buf;
4506 
4507     guest_data = arg + host_dm->data_start;
4508     if ((guest_data - arg) < 0) {
4509         ret = -TARGET_EINVAL;
4510         goto out;
4511     }
4512     guest_data_size = host_dm->data_size - host_dm->data_start;
4513     host_data = (char*)host_dm + host_dm->data_start;
4514 
4515     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4516     if (!argptr) {
4517         ret = -TARGET_EFAULT;
4518         goto out;
4519     }
4520 
4521     switch (ie->host_cmd) {
4522     case DM_REMOVE_ALL:
4523     case DM_LIST_DEVICES:
4524     case DM_DEV_CREATE:
4525     case DM_DEV_REMOVE:
4526     case DM_DEV_SUSPEND:
4527     case DM_DEV_STATUS:
4528     case DM_DEV_WAIT:
4529     case DM_TABLE_STATUS:
4530     case DM_TABLE_CLEAR:
4531     case DM_TABLE_DEPS:
4532     case DM_LIST_VERSIONS:
4533         /* no input data */
4534         break;
4535     case DM_DEV_RENAME:
4536     case DM_DEV_SET_GEOMETRY:
4537         /* data contains only strings */
4538         memcpy(host_data, argptr, guest_data_size);
4539         break;
4540     case DM_TARGET_MSG:
4541         memcpy(host_data, argptr, guest_data_size);
4542         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4543         break;
4544     case DM_TABLE_LOAD:
4545     {
4546         void *gspec = argptr;
4547         void *cur_data = host_data;
4548         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4549         int spec_size = thunk_type_size(arg_type, 0);
4550         int i;
4551 
4552         for (i = 0; i < host_dm->target_count; i++) {
4553             struct dm_target_spec *spec = cur_data;
4554             uint32_t next;
4555             int slen;
4556 
4557             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4558             slen = strlen((char*)gspec + spec_size) + 1;
4559             next = spec->next;
4560             spec->next = sizeof(*spec) + slen;
4561             strcpy((char*)&spec[1], gspec + spec_size);
4562             gspec += next;
4563             cur_data += spec->next;
4564         }
4565         break;
4566     }
4567     default:
4568         ret = -TARGET_EINVAL;
4569         unlock_user(argptr, guest_data, 0);
4570         goto out;
4571     }
4572     unlock_user(argptr, guest_data, 0);
4573 
4574     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4575     if (!is_error(ret)) {
4576         guest_data = arg + host_dm->data_start;
4577         guest_data_size = host_dm->data_size - host_dm->data_start;
4578         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4579         switch (ie->host_cmd) {
4580         case DM_REMOVE_ALL:
4581         case DM_DEV_CREATE:
4582         case DM_DEV_REMOVE:
4583         case DM_DEV_RENAME:
4584         case DM_DEV_SUSPEND:
4585         case DM_DEV_STATUS:
4586         case DM_TABLE_LOAD:
4587         case DM_TABLE_CLEAR:
4588         case DM_TARGET_MSG:
4589         case DM_DEV_SET_GEOMETRY:
4590             /* no return data */
4591             break;
4592         case DM_LIST_DEVICES:
4593         {
4594             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4595             uint32_t remaining_data = guest_data_size;
4596             void *cur_data = argptr;
4597             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4598             int nl_size = 12; /* can't use thunk_size due to alignment */
4599 
4600             while (1) {
4601                 uint32_t next = nl->next;
4602                 if (next) {
4603                     nl->next = nl_size + (strlen(nl->name) + 1);
4604                 }
4605                 if (remaining_data < nl->next) {
4606                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4607                     break;
4608                 }
4609                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4610                 strcpy(cur_data + nl_size, nl->name);
4611                 cur_data += nl->next;
4612                 remaining_data -= nl->next;
4613                 if (!next) {
4614                     break;
4615                 }
4616                 nl = (void*)nl + next;
4617             }
4618             break;
4619         }
4620         case DM_DEV_WAIT:
4621         case DM_TABLE_STATUS:
4622         {
4623             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4624             void *cur_data = argptr;
4625             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4626             int spec_size = thunk_type_size(arg_type, 0);
4627             int i;
4628 
4629             for (i = 0; i < host_dm->target_count; i++) {
4630                 uint32_t next = spec->next;
4631                 int slen = strlen((char*)&spec[1]) + 1;
4632                 spec->next = (cur_data - argptr) + spec_size + slen;
4633                 if (guest_data_size < spec->next) {
4634                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4635                     break;
4636                 }
4637                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4638                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4639                 cur_data = argptr + spec->next;
4640                 spec = (void*)host_dm + host_dm->data_start + next;
4641             }
4642             break;
4643         }
4644         case DM_TABLE_DEPS:
4645         {
4646             void *hdata = (void*)host_dm + host_dm->data_start;
4647             int count = *(uint32_t*)hdata;
4648             uint64_t *hdev = hdata + 8;
4649             uint64_t *gdev = argptr + 8;
4650             int i;
4651 
4652             *(uint32_t*)argptr = tswap32(count);
4653             for (i = 0; i < count; i++) {
4654                 *gdev = tswap64(*hdev);
4655                 gdev++;
4656                 hdev++;
4657             }
4658             break;
4659         }
4660         case DM_LIST_VERSIONS:
4661         {
4662             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4663             uint32_t remaining_data = guest_data_size;
4664             void *cur_data = argptr;
4665             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4666             int vers_size = thunk_type_size(arg_type, 0);
4667 
4668             while (1) {
4669                 uint32_t next = vers->next;
4670                 if (next) {
4671                     vers->next = vers_size + (strlen(vers->name) + 1);
4672                 }
4673                 if (remaining_data < vers->next) {
4674                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4675                     break;
4676                 }
4677                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4678                 strcpy(cur_data + vers_size, vers->name);
4679                 cur_data += vers->next;
4680                 remaining_data -= vers->next;
4681                 if (!next) {
4682                     break;
4683                 }
4684                 vers = (void*)vers + next;
4685             }
4686             break;
4687         }
4688         default:
4689             unlock_user(argptr, guest_data, 0);
4690             ret = -TARGET_EINVAL;
4691             goto out;
4692         }
4693         unlock_user(argptr, guest_data, guest_data_size);
4694 
4695         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4696         if (!argptr) {
4697             ret = -TARGET_EFAULT;
4698             goto out;
4699         }
4700         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4701         unlock_user(argptr, arg, target_size);
4702     }
4703 out:
4704     g_free(big_buf);
4705     return ret;
4706 }
4707 
4708 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4709                                int cmd, abi_long arg)
4710 {
4711     void *argptr;
4712     int target_size;
4713     const argtype *arg_type = ie->arg_type;
4714     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4715     abi_long ret;
4716 
4717     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4718     struct blkpg_partition host_part;
4719 
4720     /* Read and convert blkpg */
4721     arg_type++;
4722     target_size = thunk_type_size(arg_type, 0);
4723     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4724     if (!argptr) {
4725         ret = -TARGET_EFAULT;
4726         goto out;
4727     }
4728     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4729     unlock_user(argptr, arg, 0);
4730 
4731     switch (host_blkpg->op) {
4732     case BLKPG_ADD_PARTITION:
4733     case BLKPG_DEL_PARTITION:
4734         /* payload is struct blkpg_partition */
4735         break;
4736     default:
4737         /* Unknown opcode */
4738         ret = -TARGET_EINVAL;
4739         goto out;
4740     }
4741 
4742     /* Read and convert blkpg->data */
4743     arg = (abi_long)(uintptr_t)host_blkpg->data;
4744     target_size = thunk_type_size(part_arg_type, 0);
4745     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4746     if (!argptr) {
4747         ret = -TARGET_EFAULT;
4748         goto out;
4749     }
4750     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4751     unlock_user(argptr, arg, 0);
4752 
4753     /* Swizzle the data pointer to our local copy and call! */
4754     host_blkpg->data = &host_part;
4755     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4756 
4757 out:
4758     return ret;
4759 }
4760 
4761 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4762                                 int fd, int cmd, abi_long arg)
4763 {
4764     const argtype *arg_type = ie->arg_type;
4765     const StructEntry *se;
4766     const argtype *field_types;
4767     const int *dst_offsets, *src_offsets;
4768     int target_size;
4769     void *argptr;
4770     abi_ulong *target_rt_dev_ptr = NULL;
4771     unsigned long *host_rt_dev_ptr = NULL;
4772     abi_long ret;
4773     int i;
4774 
4775     assert(ie->access == IOC_W);
4776     assert(*arg_type == TYPE_PTR);
4777     arg_type++;
4778     assert(*arg_type == TYPE_STRUCT);
4779     target_size = thunk_type_size(arg_type, 0);
4780     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4781     if (!argptr) {
4782         return -TARGET_EFAULT;
4783     }
4784     arg_type++;
4785     assert(*arg_type == (int)STRUCT_rtentry);
4786     se = struct_entries + *arg_type++;
4787     assert(se->convert[0] == NULL);
4788     /* convert struct here to be able to catch rt_dev string */
4789     field_types = se->field_types;
4790     dst_offsets = se->field_offsets[THUNK_HOST];
4791     src_offsets = se->field_offsets[THUNK_TARGET];
4792     for (i = 0; i < se->nb_fields; i++) {
4793         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4794             assert(*field_types == TYPE_PTRVOID);
4795             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4796             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4797             if (*target_rt_dev_ptr != 0) {
4798                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4799                                                   tswapal(*target_rt_dev_ptr));
4800                 if (!*host_rt_dev_ptr) {
4801                     unlock_user(argptr, arg, 0);
4802                     return -TARGET_EFAULT;
4803                 }
4804             } else {
4805                 *host_rt_dev_ptr = 0;
4806             }
4807             field_types++;
4808             continue;
4809         }
4810         field_types = thunk_convert(buf_temp + dst_offsets[i],
4811                                     argptr + src_offsets[i],
4812                                     field_types, THUNK_HOST);
4813     }
4814     unlock_user(argptr, arg, 0);
4815 
4816     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4817 
4818     assert(host_rt_dev_ptr != NULL);
4819     assert(target_rt_dev_ptr != NULL);
4820     if (*host_rt_dev_ptr != 0) {
4821         unlock_user((void *)*host_rt_dev_ptr,
4822                     *target_rt_dev_ptr, 0);
4823     }
4824     return ret;
4825 }
4826 
4827 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4828                                      int fd, int cmd, abi_long arg)
4829 {
4830     int sig = target_to_host_signal(arg);
4831     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4832 }
4833 
4834 #ifdef TIOCGPTPEER
4835 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4836                                      int fd, int cmd, abi_long arg)
4837 {
4838     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4839     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4840 }
4841 #endif
4842 
4843 static IOCTLEntry ioctl_entries[] = {
4844 #define IOCTL(cmd, access, ...) \
4845     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4846 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4847     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4848 #define IOCTL_IGNORE(cmd) \
4849     { TARGET_ ## cmd, 0, #cmd },
4850 #include "ioctls.h"
4851     { 0, 0, },
4852 };
4853 
4854 /* ??? Implement proper locking for ioctls.  */
4855 /* do_ioctl() Must return target values and target errnos. */
4856 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4857 {
4858     const IOCTLEntry *ie;
4859     const argtype *arg_type;
4860     abi_long ret;
4861     uint8_t buf_temp[MAX_STRUCT_SIZE];
4862     int target_size;
4863     void *argptr;
4864 
4865     ie = ioctl_entries;
4866     for(;;) {
4867         if (ie->target_cmd == 0) {
4868             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4869             return -TARGET_ENOSYS;
4870         }
4871         if (ie->target_cmd == cmd)
4872             break;
4873         ie++;
4874     }
4875     arg_type = ie->arg_type;
4876     if (ie->do_ioctl) {
4877         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4878     } else if (!ie->host_cmd) {
4879         /* Some architectures define BSD ioctls in their headers
4880            that are not implemented in Linux.  */
4881         return -TARGET_ENOSYS;
4882     }
4883 
4884     switch(arg_type[0]) {
4885     case TYPE_NULL:
4886         /* no argument */
4887         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4888         break;
4889     case TYPE_PTRVOID:
4890     case TYPE_INT:
4891         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4892         break;
4893     case TYPE_PTR:
4894         arg_type++;
4895         target_size = thunk_type_size(arg_type, 0);
4896         switch(ie->access) {
4897         case IOC_R:
4898             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4899             if (!is_error(ret)) {
4900                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4901                 if (!argptr)
4902                     return -TARGET_EFAULT;
4903                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4904                 unlock_user(argptr, arg, target_size);
4905             }
4906             break;
4907         case IOC_W:
4908             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4909             if (!argptr)
4910                 return -TARGET_EFAULT;
4911             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4912             unlock_user(argptr, arg, 0);
4913             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4914             break;
4915         default:
4916         case IOC_RW:
4917             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4918             if (!argptr)
4919                 return -TARGET_EFAULT;
4920             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4921             unlock_user(argptr, arg, 0);
4922             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4923             if (!is_error(ret)) {
4924                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4925                 if (!argptr)
4926                     return -TARGET_EFAULT;
4927                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4928                 unlock_user(argptr, arg, target_size);
4929             }
4930             break;
4931         }
4932         break;
4933     default:
4934         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4935                  (long)cmd, arg_type[0]);
4936         ret = -TARGET_ENOSYS;
4937         break;
4938     }
4939     return ret;
4940 }
4941 
4942 static const bitmask_transtbl iflag_tbl[] = {
4943         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4944         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4945         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4946         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4947         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4948         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4949         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4950         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4951         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4952         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4953         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4954         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4955         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4956         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4957         { 0, 0, 0, 0 }
4958 };
4959 
4960 static const bitmask_transtbl oflag_tbl[] = {
4961 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4962 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4963 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4964 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4965 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4966 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4967 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4968 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4969 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4970 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4971 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4972 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4973 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4974 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4975 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4976 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4977 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4978 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4979 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4980 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4981 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4982 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4983 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4984 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4985 	{ 0, 0, 0, 0 }
4986 };
4987 
4988 static const bitmask_transtbl cflag_tbl[] = {
4989 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4990 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4991 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4992 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4993 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4994 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4995 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4996 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4997 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4998 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4999 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5000 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5001 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5002 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5003 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5004 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5005 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5006 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5007 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5008 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5009 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5010 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5011 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5012 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5013 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5014 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5015 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5016 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5017 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5018 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5019 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5020 	{ 0, 0, 0, 0 }
5021 };
5022 
5023 static const bitmask_transtbl lflag_tbl[] = {
5024 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5025 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5026 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5027 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5028 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5029 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5030 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5031 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5032 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5033 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5034 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5035 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5036 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5037 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5038 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5039 	{ 0, 0, 0, 0 }
5040 };
5041 
5042 static void target_to_host_termios (void *dst, const void *src)
5043 {
5044     struct host_termios *host = dst;
5045     const struct target_termios *target = src;
5046 
5047     host->c_iflag =
5048         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5049     host->c_oflag =
5050         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5051     host->c_cflag =
5052         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5053     host->c_lflag =
5054         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5055     host->c_line = target->c_line;
5056 
5057     memset(host->c_cc, 0, sizeof(host->c_cc));
5058     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5059     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5060     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5061     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5062     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5063     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5064     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5065     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5066     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5067     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5068     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5069     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5070     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5071     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5072     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5073     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5074     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5075 }
5076 
5077 static void host_to_target_termios (void *dst, const void *src)
5078 {
5079     struct target_termios *target = dst;
5080     const struct host_termios *host = src;
5081 
5082     target->c_iflag =
5083         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5084     target->c_oflag =
5085         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5086     target->c_cflag =
5087         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5088     target->c_lflag =
5089         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5090     target->c_line = host->c_line;
5091 
5092     memset(target->c_cc, 0, sizeof(target->c_cc));
5093     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5094     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5095     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5096     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5097     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5098     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5099     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5100     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5101     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5102     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5103     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5104     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5105     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5106     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5107     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5108     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5109     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5110 }
5111 
5112 static const StructEntry struct_termios_def = {
5113     .convert = { host_to_target_termios, target_to_host_termios },
5114     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5115     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5116 };
5117 
5118 static bitmask_transtbl mmap_flags_tbl[] = {
5119     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5120     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5121     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5122     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5123       MAP_ANONYMOUS, MAP_ANONYMOUS },
5124     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5125       MAP_GROWSDOWN, MAP_GROWSDOWN },
5126     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5127       MAP_DENYWRITE, MAP_DENYWRITE },
5128     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5129       MAP_EXECUTABLE, MAP_EXECUTABLE },
5130     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5131     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5132       MAP_NORESERVE, MAP_NORESERVE },
5133     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5134     /* MAP_STACK had been ignored by the kernel for quite some time.
5135        Recognize it for the target insofar as we do not want to pass
5136        it through to the host.  */
5137     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5138     { 0, 0, 0, 0 }
5139 };
5140 
5141 #if defined(TARGET_I386)
5142 
5143 /* NOTE: there is really one LDT for all the threads */
5144 static uint8_t *ldt_table;
5145 
5146 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5147 {
5148     int size;
5149     void *p;
5150 
5151     if (!ldt_table)
5152         return 0;
5153     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5154     if (size > bytecount)
5155         size = bytecount;
5156     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5157     if (!p)
5158         return -TARGET_EFAULT;
5159     /* ??? Should this by byteswapped?  */
5160     memcpy(p, ldt_table, size);
5161     unlock_user(p, ptr, size);
5162     return size;
5163 }
5164 
5165 /* XXX: add locking support */
5166 static abi_long write_ldt(CPUX86State *env,
5167                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5168 {
5169     struct target_modify_ldt_ldt_s ldt_info;
5170     struct target_modify_ldt_ldt_s *target_ldt_info;
5171     int seg_32bit, contents, read_exec_only, limit_in_pages;
5172     int seg_not_present, useable, lm;
5173     uint32_t *lp, entry_1, entry_2;
5174 
5175     if (bytecount != sizeof(ldt_info))
5176         return -TARGET_EINVAL;
5177     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5178         return -TARGET_EFAULT;
5179     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5180     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5181     ldt_info.limit = tswap32(target_ldt_info->limit);
5182     ldt_info.flags = tswap32(target_ldt_info->flags);
5183     unlock_user_struct(target_ldt_info, ptr, 0);
5184 
5185     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5186         return -TARGET_EINVAL;
5187     seg_32bit = ldt_info.flags & 1;
5188     contents = (ldt_info.flags >> 1) & 3;
5189     read_exec_only = (ldt_info.flags >> 3) & 1;
5190     limit_in_pages = (ldt_info.flags >> 4) & 1;
5191     seg_not_present = (ldt_info.flags >> 5) & 1;
5192     useable = (ldt_info.flags >> 6) & 1;
5193 #ifdef TARGET_ABI32
5194     lm = 0;
5195 #else
5196     lm = (ldt_info.flags >> 7) & 1;
5197 #endif
5198     if (contents == 3) {
5199         if (oldmode)
5200             return -TARGET_EINVAL;
5201         if (seg_not_present == 0)
5202             return -TARGET_EINVAL;
5203     }
5204     /* allocate the LDT */
5205     if (!ldt_table) {
5206         env->ldt.base = target_mmap(0,
5207                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5208                                     PROT_READ|PROT_WRITE,
5209                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5210         if (env->ldt.base == -1)
5211             return -TARGET_ENOMEM;
5212         memset(g2h(env->ldt.base), 0,
5213                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5214         env->ldt.limit = 0xffff;
5215         ldt_table = g2h(env->ldt.base);
5216     }
5217 
5218     /* NOTE: same code as Linux kernel */
5219     /* Allow LDTs to be cleared by the user. */
5220     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5221         if (oldmode ||
5222             (contents == 0		&&
5223              read_exec_only == 1	&&
5224              seg_32bit == 0		&&
5225              limit_in_pages == 0	&&
5226              seg_not_present == 1	&&
5227              useable == 0 )) {
5228             entry_1 = 0;
5229             entry_2 = 0;
5230             goto install;
5231         }
5232     }
5233 
5234     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5235         (ldt_info.limit & 0x0ffff);
5236     entry_2 = (ldt_info.base_addr & 0xff000000) |
5237         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5238         (ldt_info.limit & 0xf0000) |
5239         ((read_exec_only ^ 1) << 9) |
5240         (contents << 10) |
5241         ((seg_not_present ^ 1) << 15) |
5242         (seg_32bit << 22) |
5243         (limit_in_pages << 23) |
5244         (lm << 21) |
5245         0x7000;
5246     if (!oldmode)
5247         entry_2 |= (useable << 20);
5248 
5249     /* Install the new entry ...  */
5250 install:
5251     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5252     lp[0] = tswap32(entry_1);
5253     lp[1] = tswap32(entry_2);
5254     return 0;
5255 }
5256 
5257 /* specific and weird i386 syscalls */
5258 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5259                               unsigned long bytecount)
5260 {
5261     abi_long ret;
5262 
5263     switch (func) {
5264     case 0:
5265         ret = read_ldt(ptr, bytecount);
5266         break;
5267     case 1:
5268         ret = write_ldt(env, ptr, bytecount, 1);
5269         break;
5270     case 0x11:
5271         ret = write_ldt(env, ptr, bytecount, 0);
5272         break;
5273     default:
5274         ret = -TARGET_ENOSYS;
5275         break;
5276     }
5277     return ret;
5278 }
5279 
5280 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5281 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5282 {
5283     uint64_t *gdt_table = g2h(env->gdt.base);
5284     struct target_modify_ldt_ldt_s ldt_info;
5285     struct target_modify_ldt_ldt_s *target_ldt_info;
5286     int seg_32bit, contents, read_exec_only, limit_in_pages;
5287     int seg_not_present, useable, lm;
5288     uint32_t *lp, entry_1, entry_2;
5289     int i;
5290 
5291     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5292     if (!target_ldt_info)
5293         return -TARGET_EFAULT;
5294     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5295     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5296     ldt_info.limit = tswap32(target_ldt_info->limit);
5297     ldt_info.flags = tswap32(target_ldt_info->flags);
5298     if (ldt_info.entry_number == -1) {
5299         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5300             if (gdt_table[i] == 0) {
5301                 ldt_info.entry_number = i;
5302                 target_ldt_info->entry_number = tswap32(i);
5303                 break;
5304             }
5305         }
5306     }
5307     unlock_user_struct(target_ldt_info, ptr, 1);
5308 
5309     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5310         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5311            return -TARGET_EINVAL;
5312     seg_32bit = ldt_info.flags & 1;
5313     contents = (ldt_info.flags >> 1) & 3;
5314     read_exec_only = (ldt_info.flags >> 3) & 1;
5315     limit_in_pages = (ldt_info.flags >> 4) & 1;
5316     seg_not_present = (ldt_info.flags >> 5) & 1;
5317     useable = (ldt_info.flags >> 6) & 1;
5318 #ifdef TARGET_ABI32
5319     lm = 0;
5320 #else
5321     lm = (ldt_info.flags >> 7) & 1;
5322 #endif
5323 
5324     if (contents == 3) {
5325         if (seg_not_present == 0)
5326             return -TARGET_EINVAL;
5327     }
5328 
5329     /* NOTE: same code as Linux kernel */
5330     /* Allow LDTs to be cleared by the user. */
5331     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5332         if ((contents == 0             &&
5333              read_exec_only == 1       &&
5334              seg_32bit == 0            &&
5335              limit_in_pages == 0       &&
5336              seg_not_present == 1      &&
5337              useable == 0 )) {
5338             entry_1 = 0;
5339             entry_2 = 0;
5340             goto install;
5341         }
5342     }
5343 
5344     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5345         (ldt_info.limit & 0x0ffff);
5346     entry_2 = (ldt_info.base_addr & 0xff000000) |
5347         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5348         (ldt_info.limit & 0xf0000) |
5349         ((read_exec_only ^ 1) << 9) |
5350         (contents << 10) |
5351         ((seg_not_present ^ 1) << 15) |
5352         (seg_32bit << 22) |
5353         (limit_in_pages << 23) |
5354         (useable << 20) |
5355         (lm << 21) |
5356         0x7000;
5357 
5358     /* Install the new entry ...  */
5359 install:
5360     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5361     lp[0] = tswap32(entry_1);
5362     lp[1] = tswap32(entry_2);
5363     return 0;
5364 }
5365 
5366 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5367 {
5368     struct target_modify_ldt_ldt_s *target_ldt_info;
5369     uint64_t *gdt_table = g2h(env->gdt.base);
5370     uint32_t base_addr, limit, flags;
5371     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5372     int seg_not_present, useable, lm;
5373     uint32_t *lp, entry_1, entry_2;
5374 
5375     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5376     if (!target_ldt_info)
5377         return -TARGET_EFAULT;
5378     idx = tswap32(target_ldt_info->entry_number);
5379     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5380         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5381         unlock_user_struct(target_ldt_info, ptr, 1);
5382         return -TARGET_EINVAL;
5383     }
5384     lp = (uint32_t *)(gdt_table + idx);
5385     entry_1 = tswap32(lp[0]);
5386     entry_2 = tswap32(lp[1]);
5387 
5388     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5389     contents = (entry_2 >> 10) & 3;
5390     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5391     seg_32bit = (entry_2 >> 22) & 1;
5392     limit_in_pages = (entry_2 >> 23) & 1;
5393     useable = (entry_2 >> 20) & 1;
5394 #ifdef TARGET_ABI32
5395     lm = 0;
5396 #else
5397     lm = (entry_2 >> 21) & 1;
5398 #endif
5399     flags = (seg_32bit << 0) | (contents << 1) |
5400         (read_exec_only << 3) | (limit_in_pages << 4) |
5401         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5402     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5403     base_addr = (entry_1 >> 16) |
5404         (entry_2 & 0xff000000) |
5405         ((entry_2 & 0xff) << 16);
5406     target_ldt_info->base_addr = tswapal(base_addr);
5407     target_ldt_info->limit = tswap32(limit);
5408     target_ldt_info->flags = tswap32(flags);
5409     unlock_user_struct(target_ldt_info, ptr, 1);
5410     return 0;
5411 }
5412 #endif /* TARGET_I386 && TARGET_ABI32 */
5413 
5414 #ifndef TARGET_ABI32
5415 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5416 {
5417     abi_long ret = 0;
5418     abi_ulong val;
5419     int idx;
5420 
5421     switch(code) {
5422     case TARGET_ARCH_SET_GS:
5423     case TARGET_ARCH_SET_FS:
5424         if (code == TARGET_ARCH_SET_GS)
5425             idx = R_GS;
5426         else
5427             idx = R_FS;
5428         cpu_x86_load_seg(env, idx, 0);
5429         env->segs[idx].base = addr;
5430         break;
5431     case TARGET_ARCH_GET_GS:
5432     case TARGET_ARCH_GET_FS:
5433         if (code == TARGET_ARCH_GET_GS)
5434             idx = R_GS;
5435         else
5436             idx = R_FS;
5437         val = env->segs[idx].base;
5438         if (put_user(val, addr, abi_ulong))
5439             ret = -TARGET_EFAULT;
5440         break;
5441     default:
5442         ret = -TARGET_EINVAL;
5443         break;
5444     }
5445     return ret;
5446 }
5447 #endif
5448 
5449 #endif /* defined(TARGET_I386) */
5450 
5451 #define NEW_STACK_SIZE 0x40000
5452 
5453 
5454 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5455 typedef struct {
5456     CPUArchState *env;
5457     pthread_mutex_t mutex;
5458     pthread_cond_t cond;
5459     pthread_t thread;
5460     uint32_t tid;
5461     abi_ulong child_tidptr;
5462     abi_ulong parent_tidptr;
5463     sigset_t sigmask;
5464 } new_thread_info;
5465 
5466 static void *clone_func(void *arg)
5467 {
5468     new_thread_info *info = arg;
5469     CPUArchState *env;
5470     CPUState *cpu;
5471     TaskState *ts;
5472 
5473     rcu_register_thread();
5474     tcg_register_thread();
5475     env = info->env;
5476     cpu = ENV_GET_CPU(env);
5477     thread_cpu = cpu;
5478     ts = (TaskState *)cpu->opaque;
5479     info->tid = sys_gettid();
5480     task_settid(ts);
5481     if (info->child_tidptr)
5482         put_user_u32(info->tid, info->child_tidptr);
5483     if (info->parent_tidptr)
5484         put_user_u32(info->tid, info->parent_tidptr);
5485     /* Enable signals.  */
5486     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5487     /* Signal to the parent that we're ready.  */
5488     pthread_mutex_lock(&info->mutex);
5489     pthread_cond_broadcast(&info->cond);
5490     pthread_mutex_unlock(&info->mutex);
5491     /* Wait until the parent has finished initializing the tls state.  */
5492     pthread_mutex_lock(&clone_lock);
5493     pthread_mutex_unlock(&clone_lock);
5494     cpu_loop(env);
5495     /* never exits */
5496     return NULL;
5497 }
5498 
5499 /* do_fork() Must return host values and target errnos (unlike most
5500    do_*() functions). */
5501 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5502                    abi_ulong parent_tidptr, target_ulong newtls,
5503                    abi_ulong child_tidptr)
5504 {
5505     CPUState *cpu = ENV_GET_CPU(env);
5506     int ret;
5507     TaskState *ts;
5508     CPUState *new_cpu;
5509     CPUArchState *new_env;
5510     sigset_t sigmask;
5511 
5512     flags &= ~CLONE_IGNORED_FLAGS;
5513 
5514     /* Emulate vfork() with fork() */
5515     if (flags & CLONE_VFORK)
5516         flags &= ~(CLONE_VFORK | CLONE_VM);
5517 
5518     if (flags & CLONE_VM) {
5519         TaskState *parent_ts = (TaskState *)cpu->opaque;
5520         new_thread_info info;
5521         pthread_attr_t attr;
5522 
5523         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5524             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5525             return -TARGET_EINVAL;
5526         }
5527 
5528         ts = g_new0(TaskState, 1);
5529         init_task_state(ts);
5530 
5531         /* Grab a mutex so that thread setup appears atomic.  */
5532         pthread_mutex_lock(&clone_lock);
5533 
5534         /* we create a new CPU instance. */
5535         new_env = cpu_copy(env);
5536         /* Init regs that differ from the parent.  */
5537         cpu_clone_regs(new_env, newsp);
5538         new_cpu = ENV_GET_CPU(new_env);
5539         new_cpu->opaque = ts;
5540         ts->bprm = parent_ts->bprm;
5541         ts->info = parent_ts->info;
5542         ts->signal_mask = parent_ts->signal_mask;
5543 
5544         if (flags & CLONE_CHILD_CLEARTID) {
5545             ts->child_tidptr = child_tidptr;
5546         }
5547 
5548         if (flags & CLONE_SETTLS) {
5549             cpu_set_tls (new_env, newtls);
5550         }
5551 
5552         memset(&info, 0, sizeof(info));
5553         pthread_mutex_init(&info.mutex, NULL);
5554         pthread_mutex_lock(&info.mutex);
5555         pthread_cond_init(&info.cond, NULL);
5556         info.env = new_env;
5557         if (flags & CLONE_CHILD_SETTID) {
5558             info.child_tidptr = child_tidptr;
5559         }
5560         if (flags & CLONE_PARENT_SETTID) {
5561             info.parent_tidptr = parent_tidptr;
5562         }
5563 
5564         ret = pthread_attr_init(&attr);
5565         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5566         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5567         /* It is not safe to deliver signals until the child has finished
5568            initializing, so temporarily block all signals.  */
5569         sigfillset(&sigmask);
5570         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5571 
5572         /* If this is our first additional thread, we need to ensure we
5573          * generate code for parallel execution and flush old translations.
5574          */
5575         if (!parallel_cpus) {
5576             parallel_cpus = true;
5577             tb_flush(cpu);
5578         }
5579 
5580         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5581         /* TODO: Free new CPU state if thread creation failed.  */
5582 
5583         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5584         pthread_attr_destroy(&attr);
5585         if (ret == 0) {
5586             /* Wait for the child to initialize.  */
5587             pthread_cond_wait(&info.cond, &info.mutex);
5588             ret = info.tid;
5589         } else {
5590             ret = -1;
5591         }
5592         pthread_mutex_unlock(&info.mutex);
5593         pthread_cond_destroy(&info.cond);
5594         pthread_mutex_destroy(&info.mutex);
5595         pthread_mutex_unlock(&clone_lock);
5596     } else {
5597         /* if no CLONE_VM, we consider it is a fork */
5598         if (flags & CLONE_INVALID_FORK_FLAGS) {
5599             return -TARGET_EINVAL;
5600         }
5601 
5602         /* We can't support custom termination signals */
5603         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5604             return -TARGET_EINVAL;
5605         }
5606 
5607         if (block_signals()) {
5608             return -TARGET_ERESTARTSYS;
5609         }
5610 
5611         fork_start();
5612         ret = fork();
5613         if (ret == 0) {
5614             /* Child Process.  */
5615             cpu_clone_regs(env, newsp);
5616             fork_end(1);
5617             /* There is a race condition here.  The parent process could
5618                theoretically read the TID in the child process before the child
5619                tid is set.  This would require using either ptrace
5620                (not implemented) or having *_tidptr to point at a shared memory
5621                mapping.  We can't repeat the spinlock hack used above because
5622                the child process gets its own copy of the lock.  */
5623             if (flags & CLONE_CHILD_SETTID)
5624                 put_user_u32(sys_gettid(), child_tidptr);
5625             if (flags & CLONE_PARENT_SETTID)
5626                 put_user_u32(sys_gettid(), parent_tidptr);
5627             ts = (TaskState *)cpu->opaque;
5628             if (flags & CLONE_SETTLS)
5629                 cpu_set_tls (env, newtls);
5630             if (flags & CLONE_CHILD_CLEARTID)
5631                 ts->child_tidptr = child_tidptr;
5632         } else {
5633             fork_end(0);
5634         }
5635     }
5636     return ret;
5637 }
5638 
5639 /* warning : doesn't handle linux specific flags... */
5640 static int target_to_host_fcntl_cmd(int cmd)
5641 {
5642     int ret;
5643 
5644     switch(cmd) {
5645     case TARGET_F_DUPFD:
5646     case TARGET_F_GETFD:
5647     case TARGET_F_SETFD:
5648     case TARGET_F_GETFL:
5649     case TARGET_F_SETFL:
5650         ret = cmd;
5651         break;
5652     case TARGET_F_GETLK:
5653         ret = F_GETLK64;
5654         break;
5655     case TARGET_F_SETLK:
5656         ret = F_SETLK64;
5657         break;
5658     case TARGET_F_SETLKW:
5659         ret = F_SETLKW64;
5660         break;
5661     case TARGET_F_GETOWN:
5662         ret = F_GETOWN;
5663         break;
5664     case TARGET_F_SETOWN:
5665         ret = F_SETOWN;
5666         break;
5667     case TARGET_F_GETSIG:
5668         ret = F_GETSIG;
5669         break;
5670     case TARGET_F_SETSIG:
5671         ret = F_SETSIG;
5672         break;
5673 #if TARGET_ABI_BITS == 32
5674     case TARGET_F_GETLK64:
5675         ret = F_GETLK64;
5676         break;
5677     case TARGET_F_SETLK64:
5678         ret = F_SETLK64;
5679         break;
5680     case TARGET_F_SETLKW64:
5681         ret = F_SETLKW64;
5682         break;
5683 #endif
5684     case TARGET_F_SETLEASE:
5685         ret = F_SETLEASE;
5686         break;
5687     case TARGET_F_GETLEASE:
5688         ret = F_GETLEASE;
5689         break;
5690 #ifdef F_DUPFD_CLOEXEC
5691     case TARGET_F_DUPFD_CLOEXEC:
5692         ret = F_DUPFD_CLOEXEC;
5693         break;
5694 #endif
5695     case TARGET_F_NOTIFY:
5696         ret = F_NOTIFY;
5697         break;
5698 #ifdef F_GETOWN_EX
5699     case TARGET_F_GETOWN_EX:
5700         ret = F_GETOWN_EX;
5701         break;
5702 #endif
5703 #ifdef F_SETOWN_EX
5704     case TARGET_F_SETOWN_EX:
5705         ret = F_SETOWN_EX;
5706         break;
5707 #endif
5708 #ifdef F_SETPIPE_SZ
5709     case TARGET_F_SETPIPE_SZ:
5710         ret = F_SETPIPE_SZ;
5711         break;
5712     case TARGET_F_GETPIPE_SZ:
5713         ret = F_GETPIPE_SZ;
5714         break;
5715 #endif
5716     default:
5717         ret = -TARGET_EINVAL;
5718         break;
5719     }
5720 
5721 #if defined(__powerpc64__)
5722     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5723      * is not supported by kernel. The glibc fcntl call actually adjusts
5724      * them to 5, 6 and 7 before making the syscall(). Since we make the
5725      * syscall directly, adjust to what is supported by the kernel.
5726      */
5727     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5728         ret -= F_GETLK64 - 5;
5729     }
5730 #endif
5731 
5732     return ret;
5733 }
5734 
5735 #define FLOCK_TRANSTBL \
5736     switch (type) { \
5737     TRANSTBL_CONVERT(F_RDLCK); \
5738     TRANSTBL_CONVERT(F_WRLCK); \
5739     TRANSTBL_CONVERT(F_UNLCK); \
5740     TRANSTBL_CONVERT(F_EXLCK); \
5741     TRANSTBL_CONVERT(F_SHLCK); \
5742     }
5743 
5744 static int target_to_host_flock(int type)
5745 {
5746 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5747     FLOCK_TRANSTBL
5748 #undef  TRANSTBL_CONVERT
5749     return -TARGET_EINVAL;
5750 }
5751 
5752 static int host_to_target_flock(int type)
5753 {
5754 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5755     FLOCK_TRANSTBL
5756 #undef  TRANSTBL_CONVERT
5757     /* if we don't know how to convert the value coming
5758      * from the host we copy to the target field as-is
5759      */
5760     return type;
5761 }
5762 
5763 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5764                                             abi_ulong target_flock_addr)
5765 {
5766     struct target_flock *target_fl;
5767     int l_type;
5768 
5769     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5770         return -TARGET_EFAULT;
5771     }
5772 
5773     __get_user(l_type, &target_fl->l_type);
5774     l_type = target_to_host_flock(l_type);
5775     if (l_type < 0) {
5776         return l_type;
5777     }
5778     fl->l_type = l_type;
5779     __get_user(fl->l_whence, &target_fl->l_whence);
5780     __get_user(fl->l_start, &target_fl->l_start);
5781     __get_user(fl->l_len, &target_fl->l_len);
5782     __get_user(fl->l_pid, &target_fl->l_pid);
5783     unlock_user_struct(target_fl, target_flock_addr, 0);
5784     return 0;
5785 }
5786 
5787 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5788                                           const struct flock64 *fl)
5789 {
5790     struct target_flock *target_fl;
5791     short l_type;
5792 
5793     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5794         return -TARGET_EFAULT;
5795     }
5796 
5797     l_type = host_to_target_flock(fl->l_type);
5798     __put_user(l_type, &target_fl->l_type);
5799     __put_user(fl->l_whence, &target_fl->l_whence);
5800     __put_user(fl->l_start, &target_fl->l_start);
5801     __put_user(fl->l_len, &target_fl->l_len);
5802     __put_user(fl->l_pid, &target_fl->l_pid);
5803     unlock_user_struct(target_fl, target_flock_addr, 1);
5804     return 0;
5805 }
5806 
5807 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5808 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5809 
5810 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5811 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5812                                                    abi_ulong target_flock_addr)
5813 {
5814     struct target_oabi_flock64 *target_fl;
5815     int l_type;
5816 
5817     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5818         return -TARGET_EFAULT;
5819     }
5820 
5821     __get_user(l_type, &target_fl->l_type);
5822     l_type = target_to_host_flock(l_type);
5823     if (l_type < 0) {
5824         return l_type;
5825     }
5826     fl->l_type = l_type;
5827     __get_user(fl->l_whence, &target_fl->l_whence);
5828     __get_user(fl->l_start, &target_fl->l_start);
5829     __get_user(fl->l_len, &target_fl->l_len);
5830     __get_user(fl->l_pid, &target_fl->l_pid);
5831     unlock_user_struct(target_fl, target_flock_addr, 0);
5832     return 0;
5833 }
5834 
5835 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5836                                                  const struct flock64 *fl)
5837 {
5838     struct target_oabi_flock64 *target_fl;
5839     short l_type;
5840 
5841     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5842         return -TARGET_EFAULT;
5843     }
5844 
5845     l_type = host_to_target_flock(fl->l_type);
5846     __put_user(l_type, &target_fl->l_type);
5847     __put_user(fl->l_whence, &target_fl->l_whence);
5848     __put_user(fl->l_start, &target_fl->l_start);
5849     __put_user(fl->l_len, &target_fl->l_len);
5850     __put_user(fl->l_pid, &target_fl->l_pid);
5851     unlock_user_struct(target_fl, target_flock_addr, 1);
5852     return 0;
5853 }
5854 #endif
5855 
5856 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5857                                               abi_ulong target_flock_addr)
5858 {
5859     struct target_flock64 *target_fl;
5860     int l_type;
5861 
5862     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5863         return -TARGET_EFAULT;
5864     }
5865 
5866     __get_user(l_type, &target_fl->l_type);
5867     l_type = target_to_host_flock(l_type);
5868     if (l_type < 0) {
5869         return l_type;
5870     }
5871     fl->l_type = l_type;
5872     __get_user(fl->l_whence, &target_fl->l_whence);
5873     __get_user(fl->l_start, &target_fl->l_start);
5874     __get_user(fl->l_len, &target_fl->l_len);
5875     __get_user(fl->l_pid, &target_fl->l_pid);
5876     unlock_user_struct(target_fl, target_flock_addr, 0);
5877     return 0;
5878 }
5879 
5880 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5881                                             const struct flock64 *fl)
5882 {
5883     struct target_flock64 *target_fl;
5884     short l_type;
5885 
5886     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5887         return -TARGET_EFAULT;
5888     }
5889 
5890     l_type = host_to_target_flock(fl->l_type);
5891     __put_user(l_type, &target_fl->l_type);
5892     __put_user(fl->l_whence, &target_fl->l_whence);
5893     __put_user(fl->l_start, &target_fl->l_start);
5894     __put_user(fl->l_len, &target_fl->l_len);
5895     __put_user(fl->l_pid, &target_fl->l_pid);
5896     unlock_user_struct(target_fl, target_flock_addr, 1);
5897     return 0;
5898 }
5899 
5900 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5901 {
5902     struct flock64 fl64;
5903 #ifdef F_GETOWN_EX
5904     struct f_owner_ex fox;
5905     struct target_f_owner_ex *target_fox;
5906 #endif
5907     abi_long ret;
5908     int host_cmd = target_to_host_fcntl_cmd(cmd);
5909 
5910     if (host_cmd == -TARGET_EINVAL)
5911 	    return host_cmd;
5912 
5913     switch(cmd) {
5914     case TARGET_F_GETLK:
5915         ret = copy_from_user_flock(&fl64, arg);
5916         if (ret) {
5917             return ret;
5918         }
5919         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5920         if (ret == 0) {
5921             ret = copy_to_user_flock(arg, &fl64);
5922         }
5923         break;
5924 
5925     case TARGET_F_SETLK:
5926     case TARGET_F_SETLKW:
5927         ret = copy_from_user_flock(&fl64, arg);
5928         if (ret) {
5929             return ret;
5930         }
5931         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5932         break;
5933 
5934     case TARGET_F_GETLK64:
5935         ret = copy_from_user_flock64(&fl64, arg);
5936         if (ret) {
5937             return ret;
5938         }
5939         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5940         if (ret == 0) {
5941             ret = copy_to_user_flock64(arg, &fl64);
5942         }
5943         break;
5944     case TARGET_F_SETLK64:
5945     case TARGET_F_SETLKW64:
5946         ret = copy_from_user_flock64(&fl64, arg);
5947         if (ret) {
5948             return ret;
5949         }
5950         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5951         break;
5952 
5953     case TARGET_F_GETFL:
5954         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5955         if (ret >= 0) {
5956             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5957         }
5958         break;
5959 
5960     case TARGET_F_SETFL:
5961         ret = get_errno(safe_fcntl(fd, host_cmd,
5962                                    target_to_host_bitmask(arg,
5963                                                           fcntl_flags_tbl)));
5964         break;
5965 
5966 #ifdef F_GETOWN_EX
5967     case TARGET_F_GETOWN_EX:
5968         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5969         if (ret >= 0) {
5970             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5971                 return -TARGET_EFAULT;
5972             target_fox->type = tswap32(fox.type);
5973             target_fox->pid = tswap32(fox.pid);
5974             unlock_user_struct(target_fox, arg, 1);
5975         }
5976         break;
5977 #endif
5978 
5979 #ifdef F_SETOWN_EX
5980     case TARGET_F_SETOWN_EX:
5981         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5982             return -TARGET_EFAULT;
5983         fox.type = tswap32(target_fox->type);
5984         fox.pid = tswap32(target_fox->pid);
5985         unlock_user_struct(target_fox, arg, 0);
5986         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5987         break;
5988 #endif
5989 
5990     case TARGET_F_SETOWN:
5991     case TARGET_F_GETOWN:
5992     case TARGET_F_SETSIG:
5993     case TARGET_F_GETSIG:
5994     case TARGET_F_SETLEASE:
5995     case TARGET_F_GETLEASE:
5996     case TARGET_F_SETPIPE_SZ:
5997     case TARGET_F_GETPIPE_SZ:
5998         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5999         break;
6000 
6001     default:
6002         ret = get_errno(safe_fcntl(fd, cmd, arg));
6003         break;
6004     }
6005     return ret;
6006 }
6007 
6008 #ifdef USE_UID16
6009 
6010 static inline int high2lowuid(int uid)
6011 {
6012     if (uid > 65535)
6013         return 65534;
6014     else
6015         return uid;
6016 }
6017 
6018 static inline int high2lowgid(int gid)
6019 {
6020     if (gid > 65535)
6021         return 65534;
6022     else
6023         return gid;
6024 }
6025 
6026 static inline int low2highuid(int uid)
6027 {
6028     if ((int16_t)uid == -1)
6029         return -1;
6030     else
6031         return uid;
6032 }
6033 
6034 static inline int low2highgid(int gid)
6035 {
6036     if ((int16_t)gid == -1)
6037         return -1;
6038     else
6039         return gid;
6040 }
6041 static inline int tswapid(int id)
6042 {
6043     return tswap16(id);
6044 }
6045 
6046 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6047 
6048 #else /* !USE_UID16 */
6049 static inline int high2lowuid(int uid)
6050 {
6051     return uid;
6052 }
6053 static inline int high2lowgid(int gid)
6054 {
6055     return gid;
6056 }
6057 static inline int low2highuid(int uid)
6058 {
6059     return uid;
6060 }
6061 static inline int low2highgid(int gid)
6062 {
6063     return gid;
6064 }
6065 static inline int tswapid(int id)
6066 {
6067     return tswap32(id);
6068 }
6069 
6070 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6071 
6072 #endif /* USE_UID16 */
6073 
6074 /* We must do direct syscalls for setting UID/GID, because we want to
6075  * implement the Linux system call semantics of "change only for this thread",
6076  * not the libc/POSIX semantics of "change for all threads in process".
6077  * (See http://ewontfix.com/17/ for more details.)
6078  * We use the 32-bit version of the syscalls if present; if it is not
6079  * then either the host architecture supports 32-bit UIDs natively with
6080  * the standard syscall, or the 16-bit UID is the best we can do.
6081  */
6082 #ifdef __NR_setuid32
6083 #define __NR_sys_setuid __NR_setuid32
6084 #else
6085 #define __NR_sys_setuid __NR_setuid
6086 #endif
6087 #ifdef __NR_setgid32
6088 #define __NR_sys_setgid __NR_setgid32
6089 #else
6090 #define __NR_sys_setgid __NR_setgid
6091 #endif
6092 #ifdef __NR_setresuid32
6093 #define __NR_sys_setresuid __NR_setresuid32
6094 #else
6095 #define __NR_sys_setresuid __NR_setresuid
6096 #endif
6097 #ifdef __NR_setresgid32
6098 #define __NR_sys_setresgid __NR_setresgid32
6099 #else
6100 #define __NR_sys_setresgid __NR_setresgid
6101 #endif
6102 
6103 _syscall1(int, sys_setuid, uid_t, uid)
6104 _syscall1(int, sys_setgid, gid_t, gid)
6105 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6106 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6107 
6108 void syscall_init(void)
6109 {
6110     IOCTLEntry *ie;
6111     const argtype *arg_type;
6112     int size;
6113     int i;
6114 
6115     thunk_init(STRUCT_MAX);
6116 
6117 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6118 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6119 #include "syscall_types.h"
6120 #undef STRUCT
6121 #undef STRUCT_SPECIAL
6122 
6123     /* Build target_to_host_errno_table[] table from
6124      * host_to_target_errno_table[]. */
6125     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6126         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6127     }
6128 
6129     /* we patch the ioctl size if necessary. We rely on the fact that
6130        no ioctl has all the bits at '1' in the size field */
6131     ie = ioctl_entries;
6132     while (ie->target_cmd != 0) {
6133         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6134             TARGET_IOC_SIZEMASK) {
6135             arg_type = ie->arg_type;
6136             if (arg_type[0] != TYPE_PTR) {
6137                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6138                         ie->target_cmd);
6139                 exit(1);
6140             }
6141             arg_type++;
6142             size = thunk_type_size(arg_type, 0);
6143             ie->target_cmd = (ie->target_cmd &
6144                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6145                 (size << TARGET_IOC_SIZESHIFT);
6146         }
6147 
6148         /* automatic consistency check if same arch */
6149 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6150     (defined(__x86_64__) && defined(TARGET_X86_64))
6151         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6152             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6153                     ie->name, ie->target_cmd, ie->host_cmd);
6154         }
6155 #endif
6156         ie++;
6157     }
6158 }
6159 
6160 #if TARGET_ABI_BITS == 32
6161 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6162 {
6163 #ifdef TARGET_WORDS_BIGENDIAN
6164     return ((uint64_t)word0 << 32) | word1;
6165 #else
6166     return ((uint64_t)word1 << 32) | word0;
6167 #endif
6168 }
6169 #else /* TARGET_ABI_BITS == 32 */
6170 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6171 {
6172     return word0;
6173 }
6174 #endif /* TARGET_ABI_BITS != 32 */
6175 
6176 #ifdef TARGET_NR_truncate64
6177 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6178                                          abi_long arg2,
6179                                          abi_long arg3,
6180                                          abi_long arg4)
6181 {
6182     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6183         arg2 = arg3;
6184         arg3 = arg4;
6185     }
6186     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6187 }
6188 #endif
6189 
6190 #ifdef TARGET_NR_ftruncate64
6191 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6192                                           abi_long arg2,
6193                                           abi_long arg3,
6194                                           abi_long arg4)
6195 {
6196     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6197         arg2 = arg3;
6198         arg3 = arg4;
6199     }
6200     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6201 }
6202 #endif
6203 
6204 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6205                                                abi_ulong target_addr)
6206 {
6207     struct target_timespec *target_ts;
6208 
6209     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6210         return -TARGET_EFAULT;
6211     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6212     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6213     unlock_user_struct(target_ts, target_addr, 0);
6214     return 0;
6215 }
6216 
6217 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6218                                                struct timespec *host_ts)
6219 {
6220     struct target_timespec *target_ts;
6221 
6222     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6223         return -TARGET_EFAULT;
6224     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6225     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6226     unlock_user_struct(target_ts, target_addr, 1);
6227     return 0;
6228 }
6229 
6230 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6231                                                  abi_ulong target_addr)
6232 {
6233     struct target_itimerspec *target_itspec;
6234 
6235     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6236         return -TARGET_EFAULT;
6237     }
6238 
6239     host_itspec->it_interval.tv_sec =
6240                             tswapal(target_itspec->it_interval.tv_sec);
6241     host_itspec->it_interval.tv_nsec =
6242                             tswapal(target_itspec->it_interval.tv_nsec);
6243     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6244     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6245 
6246     unlock_user_struct(target_itspec, target_addr, 1);
6247     return 0;
6248 }
6249 
6250 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6251                                                struct itimerspec *host_its)
6252 {
6253     struct target_itimerspec *target_itspec;
6254 
6255     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6256         return -TARGET_EFAULT;
6257     }
6258 
6259     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6260     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6261 
6262     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6263     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6264 
6265     unlock_user_struct(target_itspec, target_addr, 0);
6266     return 0;
6267 }
6268 
6269 static inline abi_long target_to_host_timex(struct timex *host_tx,
6270                                             abi_long target_addr)
6271 {
6272     struct target_timex *target_tx;
6273 
6274     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6275         return -TARGET_EFAULT;
6276     }
6277 
6278     __get_user(host_tx->modes, &target_tx->modes);
6279     __get_user(host_tx->offset, &target_tx->offset);
6280     __get_user(host_tx->freq, &target_tx->freq);
6281     __get_user(host_tx->maxerror, &target_tx->maxerror);
6282     __get_user(host_tx->esterror, &target_tx->esterror);
6283     __get_user(host_tx->status, &target_tx->status);
6284     __get_user(host_tx->constant, &target_tx->constant);
6285     __get_user(host_tx->precision, &target_tx->precision);
6286     __get_user(host_tx->tolerance, &target_tx->tolerance);
6287     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6288     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6289     __get_user(host_tx->tick, &target_tx->tick);
6290     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6291     __get_user(host_tx->jitter, &target_tx->jitter);
6292     __get_user(host_tx->shift, &target_tx->shift);
6293     __get_user(host_tx->stabil, &target_tx->stabil);
6294     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6295     __get_user(host_tx->calcnt, &target_tx->calcnt);
6296     __get_user(host_tx->errcnt, &target_tx->errcnt);
6297     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6298     __get_user(host_tx->tai, &target_tx->tai);
6299 
6300     unlock_user_struct(target_tx, target_addr, 0);
6301     return 0;
6302 }
6303 
6304 static inline abi_long host_to_target_timex(abi_long target_addr,
6305                                             struct timex *host_tx)
6306 {
6307     struct target_timex *target_tx;
6308 
6309     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6310         return -TARGET_EFAULT;
6311     }
6312 
6313     __put_user(host_tx->modes, &target_tx->modes);
6314     __put_user(host_tx->offset, &target_tx->offset);
6315     __put_user(host_tx->freq, &target_tx->freq);
6316     __put_user(host_tx->maxerror, &target_tx->maxerror);
6317     __put_user(host_tx->esterror, &target_tx->esterror);
6318     __put_user(host_tx->status, &target_tx->status);
6319     __put_user(host_tx->constant, &target_tx->constant);
6320     __put_user(host_tx->precision, &target_tx->precision);
6321     __put_user(host_tx->tolerance, &target_tx->tolerance);
6322     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6323     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6324     __put_user(host_tx->tick, &target_tx->tick);
6325     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6326     __put_user(host_tx->jitter, &target_tx->jitter);
6327     __put_user(host_tx->shift, &target_tx->shift);
6328     __put_user(host_tx->stabil, &target_tx->stabil);
6329     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6330     __put_user(host_tx->calcnt, &target_tx->calcnt);
6331     __put_user(host_tx->errcnt, &target_tx->errcnt);
6332     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6333     __put_user(host_tx->tai, &target_tx->tai);
6334 
6335     unlock_user_struct(target_tx, target_addr, 1);
6336     return 0;
6337 }
6338 
6339 
6340 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6341                                                abi_ulong target_addr)
6342 {
6343     struct target_sigevent *target_sevp;
6344 
6345     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6346         return -TARGET_EFAULT;
6347     }
6348 
6349     /* This union is awkward on 64 bit systems because it has a 32 bit
6350      * integer and a pointer in it; we follow the conversion approach
6351      * used for handling sigval types in signal.c so the guest should get
6352      * the correct value back even if we did a 64 bit byteswap and it's
6353      * using the 32 bit integer.
6354      */
6355     host_sevp->sigev_value.sival_ptr =
6356         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6357     host_sevp->sigev_signo =
6358         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6359     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6360     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6361 
6362     unlock_user_struct(target_sevp, target_addr, 1);
6363     return 0;
6364 }
6365 
6366 #if defined(TARGET_NR_mlockall)
6367 static inline int target_to_host_mlockall_arg(int arg)
6368 {
6369     int result = 0;
6370 
6371     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6372         result |= MCL_CURRENT;
6373     }
6374     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6375         result |= MCL_FUTURE;
6376     }
6377     return result;
6378 }
6379 #endif
6380 
6381 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6382      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6383      defined(TARGET_NR_newfstatat))
6384 static inline abi_long host_to_target_stat64(void *cpu_env,
6385                                              abi_ulong target_addr,
6386                                              struct stat *host_st)
6387 {
6388 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6389     if (((CPUARMState *)cpu_env)->eabi) {
6390         struct target_eabi_stat64 *target_st;
6391 
6392         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6393             return -TARGET_EFAULT;
6394         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6395         __put_user(host_st->st_dev, &target_st->st_dev);
6396         __put_user(host_st->st_ino, &target_st->st_ino);
6397 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6398         __put_user(host_st->st_ino, &target_st->__st_ino);
6399 #endif
6400         __put_user(host_st->st_mode, &target_st->st_mode);
6401         __put_user(host_st->st_nlink, &target_st->st_nlink);
6402         __put_user(host_st->st_uid, &target_st->st_uid);
6403         __put_user(host_st->st_gid, &target_st->st_gid);
6404         __put_user(host_st->st_rdev, &target_st->st_rdev);
6405         __put_user(host_st->st_size, &target_st->st_size);
6406         __put_user(host_st->st_blksize, &target_st->st_blksize);
6407         __put_user(host_st->st_blocks, &target_st->st_blocks);
6408         __put_user(host_st->st_atime, &target_st->target_st_atime);
6409         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6410         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6411         unlock_user_struct(target_st, target_addr, 1);
6412     } else
6413 #endif
6414     {
6415 #if defined(TARGET_HAS_STRUCT_STAT64)
6416         struct target_stat64 *target_st;
6417 #else
6418         struct target_stat *target_st;
6419 #endif
6420 
6421         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6422             return -TARGET_EFAULT;
6423         memset(target_st, 0, sizeof(*target_st));
6424         __put_user(host_st->st_dev, &target_st->st_dev);
6425         __put_user(host_st->st_ino, &target_st->st_ino);
6426 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6427         __put_user(host_st->st_ino, &target_st->__st_ino);
6428 #endif
6429         __put_user(host_st->st_mode, &target_st->st_mode);
6430         __put_user(host_st->st_nlink, &target_st->st_nlink);
6431         __put_user(host_st->st_uid, &target_st->st_uid);
6432         __put_user(host_st->st_gid, &target_st->st_gid);
6433         __put_user(host_st->st_rdev, &target_st->st_rdev);
6434         /* XXX: better use of kernel struct */
6435         __put_user(host_st->st_size, &target_st->st_size);
6436         __put_user(host_st->st_blksize, &target_st->st_blksize);
6437         __put_user(host_st->st_blocks, &target_st->st_blocks);
6438         __put_user(host_st->st_atime, &target_st->target_st_atime);
6439         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6440         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6441         unlock_user_struct(target_st, target_addr, 1);
6442     }
6443 
6444     return 0;
6445 }
6446 #endif
6447 
6448 /* ??? Using host futex calls even when target atomic operations
6449    are not really atomic probably breaks things.  However implementing
6450    futexes locally would make futexes shared between multiple processes
6451    tricky.  However they're probably useless because guest atomic
6452    operations won't work either.  */
6453 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6454                     target_ulong uaddr2, int val3)
6455 {
6456     struct timespec ts, *pts;
6457     int base_op;
6458 
6459     /* ??? We assume FUTEX_* constants are the same on both host
6460        and target.  */
6461 #ifdef FUTEX_CMD_MASK
6462     base_op = op & FUTEX_CMD_MASK;
6463 #else
6464     base_op = op;
6465 #endif
6466     switch (base_op) {
6467     case FUTEX_WAIT:
6468     case FUTEX_WAIT_BITSET:
6469         if (timeout) {
6470             pts = &ts;
6471             target_to_host_timespec(pts, timeout);
6472         } else {
6473             pts = NULL;
6474         }
6475         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6476                          pts, NULL, val3));
6477     case FUTEX_WAKE:
6478         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6479     case FUTEX_FD:
6480         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6481     case FUTEX_REQUEUE:
6482     case FUTEX_CMP_REQUEUE:
6483     case FUTEX_WAKE_OP:
6484         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6485            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6486            But the prototype takes a `struct timespec *'; insert casts
6487            to satisfy the compiler.  We do not need to tswap TIMEOUT
6488            since it's not compared to guest memory.  */
6489         pts = (struct timespec *)(uintptr_t) timeout;
6490         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6491                                     g2h(uaddr2),
6492                                     (base_op == FUTEX_CMP_REQUEUE
6493                                      ? tswap32(val3)
6494                                      : val3)));
6495     default:
6496         return -TARGET_ENOSYS;
6497     }
6498 }
6499 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6500 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6501                                      abi_long handle, abi_long mount_id,
6502                                      abi_long flags)
6503 {
6504     struct file_handle *target_fh;
6505     struct file_handle *fh;
6506     int mid = 0;
6507     abi_long ret;
6508     char *name;
6509     unsigned int size, total_size;
6510 
6511     if (get_user_s32(size, handle)) {
6512         return -TARGET_EFAULT;
6513     }
6514 
6515     name = lock_user_string(pathname);
6516     if (!name) {
6517         return -TARGET_EFAULT;
6518     }
6519 
6520     total_size = sizeof(struct file_handle) + size;
6521     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6522     if (!target_fh) {
6523         unlock_user(name, pathname, 0);
6524         return -TARGET_EFAULT;
6525     }
6526 
6527     fh = g_malloc0(total_size);
6528     fh->handle_bytes = size;
6529 
6530     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6531     unlock_user(name, pathname, 0);
6532 
6533     /* man name_to_handle_at(2):
6534      * Other than the use of the handle_bytes field, the caller should treat
6535      * the file_handle structure as an opaque data type
6536      */
6537 
6538     memcpy(target_fh, fh, total_size);
6539     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6540     target_fh->handle_type = tswap32(fh->handle_type);
6541     g_free(fh);
6542     unlock_user(target_fh, handle, total_size);
6543 
6544     if (put_user_s32(mid, mount_id)) {
6545         return -TARGET_EFAULT;
6546     }
6547 
6548     return ret;
6549 
6550 }
6551 #endif
6552 
6553 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6554 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6555                                      abi_long flags)
6556 {
6557     struct file_handle *target_fh;
6558     struct file_handle *fh;
6559     unsigned int size, total_size;
6560     abi_long ret;
6561 
6562     if (get_user_s32(size, handle)) {
6563         return -TARGET_EFAULT;
6564     }
6565 
6566     total_size = sizeof(struct file_handle) + size;
6567     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6568     if (!target_fh) {
6569         return -TARGET_EFAULT;
6570     }
6571 
6572     fh = g_memdup(target_fh, total_size);
6573     fh->handle_bytes = size;
6574     fh->handle_type = tswap32(target_fh->handle_type);
6575 
6576     ret = get_errno(open_by_handle_at(mount_fd, fh,
6577                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6578 
6579     g_free(fh);
6580 
6581     unlock_user(target_fh, handle, total_size);
6582 
6583     return ret;
6584 }
6585 #endif
6586 
6587 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6588 
6589 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6590 {
6591     int host_flags;
6592     target_sigset_t *target_mask;
6593     sigset_t host_mask;
6594     abi_long ret;
6595 
6596     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6597         return -TARGET_EINVAL;
6598     }
6599     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6600         return -TARGET_EFAULT;
6601     }
6602 
6603     target_to_host_sigset(&host_mask, target_mask);
6604 
6605     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6606 
6607     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6608     if (ret >= 0) {
6609         fd_trans_register(ret, &target_signalfd_trans);
6610     }
6611 
6612     unlock_user_struct(target_mask, mask, 0);
6613 
6614     return ret;
6615 }
6616 #endif
6617 
6618 /* Map host to target signal numbers for the wait family of syscalls.
6619    Assume all other status bits are the same.  */
6620 int host_to_target_waitstatus(int status)
6621 {
6622     if (WIFSIGNALED(status)) {
6623         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6624     }
6625     if (WIFSTOPPED(status)) {
6626         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6627                | (status & 0xff);
6628     }
6629     return status;
6630 }
6631 
6632 static int open_self_cmdline(void *cpu_env, int fd)
6633 {
6634     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6635     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6636     int i;
6637 
6638     for (i = 0; i < bprm->argc; i++) {
6639         size_t len = strlen(bprm->argv[i]) + 1;
6640 
6641         if (write(fd, bprm->argv[i], len) != len) {
6642             return -1;
6643         }
6644     }
6645 
6646     return 0;
6647 }
6648 
6649 static int open_self_maps(void *cpu_env, int fd)
6650 {
6651     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6652     TaskState *ts = cpu->opaque;
6653     FILE *fp;
6654     char *line = NULL;
6655     size_t len = 0;
6656     ssize_t read;
6657 
6658     fp = fopen("/proc/self/maps", "r");
6659     if (fp == NULL) {
6660         return -1;
6661     }
6662 
6663     while ((read = getline(&line, &len, fp)) != -1) {
6664         int fields, dev_maj, dev_min, inode;
6665         uint64_t min, max, offset;
6666         char flag_r, flag_w, flag_x, flag_p;
6667         char path[512] = "";
6668         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6669                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6670                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6671 
6672         if ((fields < 10) || (fields > 11)) {
6673             continue;
6674         }
6675         if (h2g_valid(min)) {
6676             int flags = page_get_flags(h2g(min));
6677             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6678             if (page_check_range(h2g(min), max - min, flags) == -1) {
6679                 continue;
6680             }
6681             if (h2g(min) == ts->info->stack_limit) {
6682                 pstrcpy(path, sizeof(path), "      [stack]");
6683             }
6684             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6685                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6686                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6687                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6688                     path[0] ? "         " : "", path);
6689         }
6690     }
6691 
6692     free(line);
6693     fclose(fp);
6694 
6695     return 0;
6696 }
6697 
6698 static int open_self_stat(void *cpu_env, int fd)
6699 {
6700     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6701     TaskState *ts = cpu->opaque;
6702     abi_ulong start_stack = ts->info->start_stack;
6703     int i;
6704 
6705     for (i = 0; i < 44; i++) {
6706       char buf[128];
6707       int len;
6708       uint64_t val = 0;
6709 
6710       if (i == 0) {
6711         /* pid */
6712         val = getpid();
6713         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6714       } else if (i == 1) {
6715         /* app name */
6716         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6717       } else if (i == 27) {
6718         /* stack bottom */
6719         val = start_stack;
6720         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6721       } else {
6722         /* for the rest, there is MasterCard */
6723         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6724       }
6725 
6726       len = strlen(buf);
6727       if (write(fd, buf, len) != len) {
6728           return -1;
6729       }
6730     }
6731 
6732     return 0;
6733 }
6734 
6735 static int open_self_auxv(void *cpu_env, int fd)
6736 {
6737     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6738     TaskState *ts = cpu->opaque;
6739     abi_ulong auxv = ts->info->saved_auxv;
6740     abi_ulong len = ts->info->auxv_len;
6741     char *ptr;
6742 
6743     /*
6744      * Auxiliary vector is stored in target process stack.
6745      * read in whole auxv vector and copy it to file
6746      */
6747     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6748     if (ptr != NULL) {
6749         while (len > 0) {
6750             ssize_t r;
6751             r = write(fd, ptr, len);
6752             if (r <= 0) {
6753                 break;
6754             }
6755             len -= r;
6756             ptr += r;
6757         }
6758         lseek(fd, 0, SEEK_SET);
6759         unlock_user(ptr, auxv, len);
6760     }
6761 
6762     return 0;
6763 }
6764 
6765 static int is_proc_myself(const char *filename, const char *entry)
6766 {
6767     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6768         filename += strlen("/proc/");
6769         if (!strncmp(filename, "self/", strlen("self/"))) {
6770             filename += strlen("self/");
6771         } else if (*filename >= '1' && *filename <= '9') {
6772             char myself[80];
6773             snprintf(myself, sizeof(myself), "%d/", getpid());
6774             if (!strncmp(filename, myself, strlen(myself))) {
6775                 filename += strlen(myself);
6776             } else {
6777                 return 0;
6778             }
6779         } else {
6780             return 0;
6781         }
6782         if (!strcmp(filename, entry)) {
6783             return 1;
6784         }
6785     }
6786     return 0;
6787 }
6788 
6789 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6790     defined(TARGET_SPARC)
6791 static int is_proc(const char *filename, const char *entry)
6792 {
6793     return strcmp(filename, entry) == 0;
6794 }
6795 #endif
6796 
6797 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6798 static int open_net_route(void *cpu_env, int fd)
6799 {
6800     FILE *fp;
6801     char *line = NULL;
6802     size_t len = 0;
6803     ssize_t read;
6804 
6805     fp = fopen("/proc/net/route", "r");
6806     if (fp == NULL) {
6807         return -1;
6808     }
6809 
6810     /* read header */
6811 
6812     read = getline(&line, &len, fp);
6813     dprintf(fd, "%s", line);
6814 
6815     /* read routes */
6816 
6817     while ((read = getline(&line, &len, fp)) != -1) {
6818         char iface[16];
6819         uint32_t dest, gw, mask;
6820         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6821         int fields;
6822 
6823         fields = sscanf(line,
6824                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6825                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6826                         &mask, &mtu, &window, &irtt);
6827         if (fields != 11) {
6828             continue;
6829         }
6830         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6831                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6832                 metric, tswap32(mask), mtu, window, irtt);
6833     }
6834 
6835     free(line);
6836     fclose(fp);
6837 
6838     return 0;
6839 }
6840 #endif
6841 
6842 #if defined(TARGET_SPARC)
6843 static int open_cpuinfo(void *cpu_env, int fd)
6844 {
6845     dprintf(fd, "type\t\t: sun4u\n");
6846     return 0;
6847 }
6848 #endif
6849 
6850 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6851 {
6852     struct fake_open {
6853         const char *filename;
6854         int (*fill)(void *cpu_env, int fd);
6855         int (*cmp)(const char *s1, const char *s2);
6856     };
6857     const struct fake_open *fake_open;
6858     static const struct fake_open fakes[] = {
6859         { "maps", open_self_maps, is_proc_myself },
6860         { "stat", open_self_stat, is_proc_myself },
6861         { "auxv", open_self_auxv, is_proc_myself },
6862         { "cmdline", open_self_cmdline, is_proc_myself },
6863 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6864         { "/proc/net/route", open_net_route, is_proc },
6865 #endif
6866 #if defined(TARGET_SPARC)
6867         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6868 #endif
6869         { NULL, NULL, NULL }
6870     };
6871 
6872     if (is_proc_myself(pathname, "exe")) {
6873         int execfd = qemu_getauxval(AT_EXECFD);
6874         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6875     }
6876 
6877     for (fake_open = fakes; fake_open->filename; fake_open++) {
6878         if (fake_open->cmp(pathname, fake_open->filename)) {
6879             break;
6880         }
6881     }
6882 
6883     if (fake_open->filename) {
6884         const char *tmpdir;
6885         char filename[PATH_MAX];
6886         int fd, r;
6887 
6888         /* create temporary file to map stat to */
6889         tmpdir = getenv("TMPDIR");
6890         if (!tmpdir)
6891             tmpdir = "/tmp";
6892         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6893         fd = mkstemp(filename);
6894         if (fd < 0) {
6895             return fd;
6896         }
6897         unlink(filename);
6898 
6899         if ((r = fake_open->fill(cpu_env, fd))) {
6900             int e = errno;
6901             close(fd);
6902             errno = e;
6903             return r;
6904         }
6905         lseek(fd, 0, SEEK_SET);
6906 
6907         return fd;
6908     }
6909 
6910     return safe_openat(dirfd, path(pathname), flags, mode);
6911 }
6912 
6913 #define TIMER_MAGIC 0x0caf0000
6914 #define TIMER_MAGIC_MASK 0xffff0000
6915 
6916 /* Convert QEMU provided timer ID back to internal 16bit index format */
6917 static target_timer_t get_timer_id(abi_long arg)
6918 {
6919     target_timer_t timerid = arg;
6920 
6921     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6922         return -TARGET_EINVAL;
6923     }
6924 
6925     timerid &= 0xffff;
6926 
6927     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6928         return -TARGET_EINVAL;
6929     }
6930 
6931     return timerid;
6932 }
6933 
6934 static int target_to_host_cpu_mask(unsigned long *host_mask,
6935                                    size_t host_size,
6936                                    abi_ulong target_addr,
6937                                    size_t target_size)
6938 {
6939     unsigned target_bits = sizeof(abi_ulong) * 8;
6940     unsigned host_bits = sizeof(*host_mask) * 8;
6941     abi_ulong *target_mask;
6942     unsigned i, j;
6943 
6944     assert(host_size >= target_size);
6945 
6946     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6947     if (!target_mask) {
6948         return -TARGET_EFAULT;
6949     }
6950     memset(host_mask, 0, host_size);
6951 
6952     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6953         unsigned bit = i * target_bits;
6954         abi_ulong val;
6955 
6956         __get_user(val, &target_mask[i]);
6957         for (j = 0; j < target_bits; j++, bit++) {
6958             if (val & (1UL << j)) {
6959                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6960             }
6961         }
6962     }
6963 
6964     unlock_user(target_mask, target_addr, 0);
6965     return 0;
6966 }
6967 
6968 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6969                                    size_t host_size,
6970                                    abi_ulong target_addr,
6971                                    size_t target_size)
6972 {
6973     unsigned target_bits = sizeof(abi_ulong) * 8;
6974     unsigned host_bits = sizeof(*host_mask) * 8;
6975     abi_ulong *target_mask;
6976     unsigned i, j;
6977 
6978     assert(host_size >= target_size);
6979 
6980     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6981     if (!target_mask) {
6982         return -TARGET_EFAULT;
6983     }
6984 
6985     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6986         unsigned bit = i * target_bits;
6987         abi_ulong val = 0;
6988 
6989         for (j = 0; j < target_bits; j++, bit++) {
6990             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6991                 val |= 1UL << j;
6992             }
6993         }
6994         __put_user(val, &target_mask[i]);
6995     }
6996 
6997     unlock_user(target_mask, target_addr, target_size);
6998     return 0;
6999 }
7000 
7001 /* This is an internal helper for do_syscall so that it is easier
7002  * to have a single return point, so that actions, such as logging
7003  * of syscall results, can be performed.
7004  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7005  */
7006 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7007                             abi_long arg2, abi_long arg3, abi_long arg4,
7008                             abi_long arg5, abi_long arg6, abi_long arg7,
7009                             abi_long arg8)
7010 {
7011     CPUState *cpu = ENV_GET_CPU(cpu_env);
7012     abi_long ret;
7013 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7014     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7015     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7016     struct stat st;
7017 #endif
7018 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7019     || defined(TARGET_NR_fstatfs)
7020     struct statfs stfs;
7021 #endif
7022     void *p;
7023 
7024     switch(num) {
7025     case TARGET_NR_exit:
7026         /* In old applications this may be used to implement _exit(2).
7027            However in threaded applictions it is used for thread termination,
7028            and _exit_group is used for application termination.
7029            Do thread termination if we have more then one thread.  */
7030 
7031         if (block_signals()) {
7032             return -TARGET_ERESTARTSYS;
7033         }
7034 
7035         cpu_list_lock();
7036 
7037         if (CPU_NEXT(first_cpu)) {
7038             TaskState *ts;
7039 
7040             /* Remove the CPU from the list.  */
7041             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7042 
7043             cpu_list_unlock();
7044 
7045             ts = cpu->opaque;
7046             if (ts->child_tidptr) {
7047                 put_user_u32(0, ts->child_tidptr);
7048                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7049                           NULL, NULL, 0);
7050             }
7051             thread_cpu = NULL;
7052             object_unref(OBJECT(cpu));
7053             g_free(ts);
7054             rcu_unregister_thread();
7055             pthread_exit(NULL);
7056         }
7057 
7058         cpu_list_unlock();
7059         preexit_cleanup(cpu_env, arg1);
7060         _exit(arg1);
7061         return 0; /* avoid warning */
7062     case TARGET_NR_read:
7063         if (arg2 == 0 && arg3 == 0) {
7064             return get_errno(safe_read(arg1, 0, 0));
7065         } else {
7066             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7067                 return -TARGET_EFAULT;
7068             ret = get_errno(safe_read(arg1, p, arg3));
7069             if (ret >= 0 &&
7070                 fd_trans_host_to_target_data(arg1)) {
7071                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7072             }
7073             unlock_user(p, arg2, ret);
7074         }
7075         return ret;
7076     case TARGET_NR_write:
7077         if (arg2 == 0 && arg3 == 0) {
7078             return get_errno(safe_write(arg1, 0, 0));
7079         }
7080         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7081             return -TARGET_EFAULT;
7082         if (fd_trans_target_to_host_data(arg1)) {
7083             void *copy = g_malloc(arg3);
7084             memcpy(copy, p, arg3);
7085             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7086             if (ret >= 0) {
7087                 ret = get_errno(safe_write(arg1, copy, ret));
7088             }
7089             g_free(copy);
7090         } else {
7091             ret = get_errno(safe_write(arg1, p, arg3));
7092         }
7093         unlock_user(p, arg2, 0);
7094         return ret;
7095 
7096 #ifdef TARGET_NR_open
7097     case TARGET_NR_open:
7098         if (!(p = lock_user_string(arg1)))
7099             return -TARGET_EFAULT;
7100         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7101                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7102                                   arg3));
7103         fd_trans_unregister(ret);
7104         unlock_user(p, arg1, 0);
7105         return ret;
7106 #endif
7107     case TARGET_NR_openat:
7108         if (!(p = lock_user_string(arg2)))
7109             return -TARGET_EFAULT;
7110         ret = get_errno(do_openat(cpu_env, arg1, p,
7111                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7112                                   arg4));
7113         fd_trans_unregister(ret);
7114         unlock_user(p, arg2, 0);
7115         return ret;
7116 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7117     case TARGET_NR_name_to_handle_at:
7118         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7119         return ret;
7120 #endif
7121 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7122     case TARGET_NR_open_by_handle_at:
7123         ret = do_open_by_handle_at(arg1, arg2, arg3);
7124         fd_trans_unregister(ret);
7125         return ret;
7126 #endif
7127     case TARGET_NR_close:
7128         fd_trans_unregister(arg1);
7129         return get_errno(close(arg1));
7130 
7131     case TARGET_NR_brk:
7132         return do_brk(arg1);
7133 #ifdef TARGET_NR_fork
7134     case TARGET_NR_fork:
7135         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7136 #endif
7137 #ifdef TARGET_NR_waitpid
7138     case TARGET_NR_waitpid:
7139         {
7140             int status;
7141             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7142             if (!is_error(ret) && arg2 && ret
7143                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7144                 return -TARGET_EFAULT;
7145         }
7146         return ret;
7147 #endif
7148 #ifdef TARGET_NR_waitid
7149     case TARGET_NR_waitid:
7150         {
7151             siginfo_t info;
7152             info.si_pid = 0;
7153             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7154             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7155                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7156                     return -TARGET_EFAULT;
7157                 host_to_target_siginfo(p, &info);
7158                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7159             }
7160         }
7161         return ret;
7162 #endif
7163 #ifdef TARGET_NR_creat /* not on alpha */
7164     case TARGET_NR_creat:
7165         if (!(p = lock_user_string(arg1)))
7166             return -TARGET_EFAULT;
7167         ret = get_errno(creat(p, arg2));
7168         fd_trans_unregister(ret);
7169         unlock_user(p, arg1, 0);
7170         return ret;
7171 #endif
7172 #ifdef TARGET_NR_link
7173     case TARGET_NR_link:
7174         {
7175             void * p2;
7176             p = lock_user_string(arg1);
7177             p2 = lock_user_string(arg2);
7178             if (!p || !p2)
7179                 ret = -TARGET_EFAULT;
7180             else
7181                 ret = get_errno(link(p, p2));
7182             unlock_user(p2, arg2, 0);
7183             unlock_user(p, arg1, 0);
7184         }
7185         return ret;
7186 #endif
7187 #if defined(TARGET_NR_linkat)
7188     case TARGET_NR_linkat:
7189         {
7190             void * p2 = NULL;
7191             if (!arg2 || !arg4)
7192                 return -TARGET_EFAULT;
7193             p  = lock_user_string(arg2);
7194             p2 = lock_user_string(arg4);
7195             if (!p || !p2)
7196                 ret = -TARGET_EFAULT;
7197             else
7198                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7199             unlock_user(p, arg2, 0);
7200             unlock_user(p2, arg4, 0);
7201         }
7202         return ret;
7203 #endif
7204 #ifdef TARGET_NR_unlink
7205     case TARGET_NR_unlink:
7206         if (!(p = lock_user_string(arg1)))
7207             return -TARGET_EFAULT;
7208         ret = get_errno(unlink(p));
7209         unlock_user(p, arg1, 0);
7210         return ret;
7211 #endif
7212 #if defined(TARGET_NR_unlinkat)
7213     case TARGET_NR_unlinkat:
7214         if (!(p = lock_user_string(arg2)))
7215             return -TARGET_EFAULT;
7216         ret = get_errno(unlinkat(arg1, p, arg3));
7217         unlock_user(p, arg2, 0);
7218         return ret;
7219 #endif
7220     case TARGET_NR_execve:
7221         {
7222             char **argp, **envp;
7223             int argc, envc;
7224             abi_ulong gp;
7225             abi_ulong guest_argp;
7226             abi_ulong guest_envp;
7227             abi_ulong addr;
7228             char **q;
7229             int total_size = 0;
7230 
7231             argc = 0;
7232             guest_argp = arg2;
7233             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7234                 if (get_user_ual(addr, gp))
7235                     return -TARGET_EFAULT;
7236                 if (!addr)
7237                     break;
7238                 argc++;
7239             }
7240             envc = 0;
7241             guest_envp = arg3;
7242             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7243                 if (get_user_ual(addr, gp))
7244                     return -TARGET_EFAULT;
7245                 if (!addr)
7246                     break;
7247                 envc++;
7248             }
7249 
7250             argp = g_new0(char *, argc + 1);
7251             envp = g_new0(char *, envc + 1);
7252 
7253             for (gp = guest_argp, q = argp; gp;
7254                   gp += sizeof(abi_ulong), q++) {
7255                 if (get_user_ual(addr, gp))
7256                     goto execve_efault;
7257                 if (!addr)
7258                     break;
7259                 if (!(*q = lock_user_string(addr)))
7260                     goto execve_efault;
7261                 total_size += strlen(*q) + 1;
7262             }
7263             *q = NULL;
7264 
7265             for (gp = guest_envp, q = envp; gp;
7266                   gp += sizeof(abi_ulong), q++) {
7267                 if (get_user_ual(addr, gp))
7268                     goto execve_efault;
7269                 if (!addr)
7270                     break;
7271                 if (!(*q = lock_user_string(addr)))
7272                     goto execve_efault;
7273                 total_size += strlen(*q) + 1;
7274             }
7275             *q = NULL;
7276 
7277             if (!(p = lock_user_string(arg1)))
7278                 goto execve_efault;
7279             /* Although execve() is not an interruptible syscall it is
7280              * a special case where we must use the safe_syscall wrapper:
7281              * if we allow a signal to happen before we make the host
7282              * syscall then we will 'lose' it, because at the point of
7283              * execve the process leaves QEMU's control. So we use the
7284              * safe syscall wrapper to ensure that we either take the
7285              * signal as a guest signal, or else it does not happen
7286              * before the execve completes and makes it the other
7287              * program's problem.
7288              */
7289             ret = get_errno(safe_execve(p, argp, envp));
7290             unlock_user(p, arg1, 0);
7291 
7292             goto execve_end;
7293 
7294         execve_efault:
7295             ret = -TARGET_EFAULT;
7296 
7297         execve_end:
7298             for (gp = guest_argp, q = argp; *q;
7299                   gp += sizeof(abi_ulong), q++) {
7300                 if (get_user_ual(addr, gp)
7301                     || !addr)
7302                     break;
7303                 unlock_user(*q, addr, 0);
7304             }
7305             for (gp = guest_envp, q = envp; *q;
7306                   gp += sizeof(abi_ulong), q++) {
7307                 if (get_user_ual(addr, gp)
7308                     || !addr)
7309                     break;
7310                 unlock_user(*q, addr, 0);
7311             }
7312 
7313             g_free(argp);
7314             g_free(envp);
7315         }
7316         return ret;
7317     case TARGET_NR_chdir:
7318         if (!(p = lock_user_string(arg1)))
7319             return -TARGET_EFAULT;
7320         ret = get_errno(chdir(p));
7321         unlock_user(p, arg1, 0);
7322         return ret;
7323 #ifdef TARGET_NR_time
7324     case TARGET_NR_time:
7325         {
7326             time_t host_time;
7327             ret = get_errno(time(&host_time));
7328             if (!is_error(ret)
7329                 && arg1
7330                 && put_user_sal(host_time, arg1))
7331                 return -TARGET_EFAULT;
7332         }
7333         return ret;
7334 #endif
7335 #ifdef TARGET_NR_mknod
7336     case TARGET_NR_mknod:
7337         if (!(p = lock_user_string(arg1)))
7338             return -TARGET_EFAULT;
7339         ret = get_errno(mknod(p, arg2, arg3));
7340         unlock_user(p, arg1, 0);
7341         return ret;
7342 #endif
7343 #if defined(TARGET_NR_mknodat)
7344     case TARGET_NR_mknodat:
7345         if (!(p = lock_user_string(arg2)))
7346             return -TARGET_EFAULT;
7347         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7348         unlock_user(p, arg2, 0);
7349         return ret;
7350 #endif
7351 #ifdef TARGET_NR_chmod
7352     case TARGET_NR_chmod:
7353         if (!(p = lock_user_string(arg1)))
7354             return -TARGET_EFAULT;
7355         ret = get_errno(chmod(p, arg2));
7356         unlock_user(p, arg1, 0);
7357         return ret;
7358 #endif
7359 #ifdef TARGET_NR_lseek
7360     case TARGET_NR_lseek:
7361         return get_errno(lseek(arg1, arg2, arg3));
7362 #endif
7363 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7364     /* Alpha specific */
7365     case TARGET_NR_getxpid:
7366         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7367         return get_errno(getpid());
7368 #endif
7369 #ifdef TARGET_NR_getpid
7370     case TARGET_NR_getpid:
7371         return get_errno(getpid());
7372 #endif
7373     case TARGET_NR_mount:
7374         {
7375             /* need to look at the data field */
7376             void *p2, *p3;
7377 
7378             if (arg1) {
7379                 p = lock_user_string(arg1);
7380                 if (!p) {
7381                     return -TARGET_EFAULT;
7382                 }
7383             } else {
7384                 p = NULL;
7385             }
7386 
7387             p2 = lock_user_string(arg2);
7388             if (!p2) {
7389                 if (arg1) {
7390                     unlock_user(p, arg1, 0);
7391                 }
7392                 return -TARGET_EFAULT;
7393             }
7394 
7395             if (arg3) {
7396                 p3 = lock_user_string(arg3);
7397                 if (!p3) {
7398                     if (arg1) {
7399                         unlock_user(p, arg1, 0);
7400                     }
7401                     unlock_user(p2, arg2, 0);
7402                     return -TARGET_EFAULT;
7403                 }
7404             } else {
7405                 p3 = NULL;
7406             }
7407 
7408             /* FIXME - arg5 should be locked, but it isn't clear how to
7409              * do that since it's not guaranteed to be a NULL-terminated
7410              * string.
7411              */
7412             if (!arg5) {
7413                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7414             } else {
7415                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7416             }
7417             ret = get_errno(ret);
7418 
7419             if (arg1) {
7420                 unlock_user(p, arg1, 0);
7421             }
7422             unlock_user(p2, arg2, 0);
7423             if (arg3) {
7424                 unlock_user(p3, arg3, 0);
7425             }
7426         }
7427         return ret;
7428 #ifdef TARGET_NR_umount
7429     case TARGET_NR_umount:
7430         if (!(p = lock_user_string(arg1)))
7431             return -TARGET_EFAULT;
7432         ret = get_errno(umount(p));
7433         unlock_user(p, arg1, 0);
7434         return ret;
7435 #endif
7436 #ifdef TARGET_NR_stime /* not on alpha */
7437     case TARGET_NR_stime:
7438         {
7439             time_t host_time;
7440             if (get_user_sal(host_time, arg1))
7441                 return -TARGET_EFAULT;
7442             return get_errno(stime(&host_time));
7443         }
7444 #endif
7445 #ifdef TARGET_NR_alarm /* not on alpha */
7446     case TARGET_NR_alarm:
7447         return alarm(arg1);
7448 #endif
7449 #ifdef TARGET_NR_pause /* not on alpha */
7450     case TARGET_NR_pause:
7451         if (!block_signals()) {
7452             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7453         }
7454         return -TARGET_EINTR;
7455 #endif
7456 #ifdef TARGET_NR_utime
7457     case TARGET_NR_utime:
7458         {
7459             struct utimbuf tbuf, *host_tbuf;
7460             struct target_utimbuf *target_tbuf;
7461             if (arg2) {
7462                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7463                     return -TARGET_EFAULT;
7464                 tbuf.actime = tswapal(target_tbuf->actime);
7465                 tbuf.modtime = tswapal(target_tbuf->modtime);
7466                 unlock_user_struct(target_tbuf, arg2, 0);
7467                 host_tbuf = &tbuf;
7468             } else {
7469                 host_tbuf = NULL;
7470             }
7471             if (!(p = lock_user_string(arg1)))
7472                 return -TARGET_EFAULT;
7473             ret = get_errno(utime(p, host_tbuf));
7474             unlock_user(p, arg1, 0);
7475         }
7476         return ret;
7477 #endif
7478 #ifdef TARGET_NR_utimes
7479     case TARGET_NR_utimes:
7480         {
7481             struct timeval *tvp, tv[2];
7482             if (arg2) {
7483                 if (copy_from_user_timeval(&tv[0], arg2)
7484                     || copy_from_user_timeval(&tv[1],
7485                                               arg2 + sizeof(struct target_timeval)))
7486                     return -TARGET_EFAULT;
7487                 tvp = tv;
7488             } else {
7489                 tvp = NULL;
7490             }
7491             if (!(p = lock_user_string(arg1)))
7492                 return -TARGET_EFAULT;
7493             ret = get_errno(utimes(p, tvp));
7494             unlock_user(p, arg1, 0);
7495         }
7496         return ret;
7497 #endif
7498 #if defined(TARGET_NR_futimesat)
7499     case TARGET_NR_futimesat:
7500         {
7501             struct timeval *tvp, tv[2];
7502             if (arg3) {
7503                 if (copy_from_user_timeval(&tv[0], arg3)
7504                     || copy_from_user_timeval(&tv[1],
7505                                               arg3 + sizeof(struct target_timeval)))
7506                     return -TARGET_EFAULT;
7507                 tvp = tv;
7508             } else {
7509                 tvp = NULL;
7510             }
7511             if (!(p = lock_user_string(arg2))) {
7512                 return -TARGET_EFAULT;
7513             }
7514             ret = get_errno(futimesat(arg1, path(p), tvp));
7515             unlock_user(p, arg2, 0);
7516         }
7517         return ret;
7518 #endif
7519 #ifdef TARGET_NR_access
7520     case TARGET_NR_access:
7521         if (!(p = lock_user_string(arg1))) {
7522             return -TARGET_EFAULT;
7523         }
7524         ret = get_errno(access(path(p), arg2));
7525         unlock_user(p, arg1, 0);
7526         return ret;
7527 #endif
7528 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7529     case TARGET_NR_faccessat:
7530         if (!(p = lock_user_string(arg2))) {
7531             return -TARGET_EFAULT;
7532         }
7533         ret = get_errno(faccessat(arg1, p, arg3, 0));
7534         unlock_user(p, arg2, 0);
7535         return ret;
7536 #endif
7537 #ifdef TARGET_NR_nice /* not on alpha */
7538     case TARGET_NR_nice:
7539         return get_errno(nice(arg1));
7540 #endif
7541     case TARGET_NR_sync:
7542         sync();
7543         return 0;
7544 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7545     case TARGET_NR_syncfs:
7546         return get_errno(syncfs(arg1));
7547 #endif
7548     case TARGET_NR_kill:
7549         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7550 #ifdef TARGET_NR_rename
7551     case TARGET_NR_rename:
7552         {
7553             void *p2;
7554             p = lock_user_string(arg1);
7555             p2 = lock_user_string(arg2);
7556             if (!p || !p2)
7557                 ret = -TARGET_EFAULT;
7558             else
7559                 ret = get_errno(rename(p, p2));
7560             unlock_user(p2, arg2, 0);
7561             unlock_user(p, arg1, 0);
7562         }
7563         return ret;
7564 #endif
7565 #if defined(TARGET_NR_renameat)
7566     case TARGET_NR_renameat:
7567         {
7568             void *p2;
7569             p  = lock_user_string(arg2);
7570             p2 = lock_user_string(arg4);
7571             if (!p || !p2)
7572                 ret = -TARGET_EFAULT;
7573             else
7574                 ret = get_errno(renameat(arg1, p, arg3, p2));
7575             unlock_user(p2, arg4, 0);
7576             unlock_user(p, arg2, 0);
7577         }
7578         return ret;
7579 #endif
7580 #if defined(TARGET_NR_renameat2)
7581     case TARGET_NR_renameat2:
7582         {
7583             void *p2;
7584             p  = lock_user_string(arg2);
7585             p2 = lock_user_string(arg4);
7586             if (!p || !p2) {
7587                 ret = -TARGET_EFAULT;
7588             } else {
7589                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7590             }
7591             unlock_user(p2, arg4, 0);
7592             unlock_user(p, arg2, 0);
7593         }
7594         return ret;
7595 #endif
7596 #ifdef TARGET_NR_mkdir
7597     case TARGET_NR_mkdir:
7598         if (!(p = lock_user_string(arg1)))
7599             return -TARGET_EFAULT;
7600         ret = get_errno(mkdir(p, arg2));
7601         unlock_user(p, arg1, 0);
7602         return ret;
7603 #endif
7604 #if defined(TARGET_NR_mkdirat)
7605     case TARGET_NR_mkdirat:
7606         if (!(p = lock_user_string(arg2)))
7607             return -TARGET_EFAULT;
7608         ret = get_errno(mkdirat(arg1, p, arg3));
7609         unlock_user(p, arg2, 0);
7610         return ret;
7611 #endif
7612 #ifdef TARGET_NR_rmdir
7613     case TARGET_NR_rmdir:
7614         if (!(p = lock_user_string(arg1)))
7615             return -TARGET_EFAULT;
7616         ret = get_errno(rmdir(p));
7617         unlock_user(p, arg1, 0);
7618         return ret;
7619 #endif
7620     case TARGET_NR_dup:
7621         ret = get_errno(dup(arg1));
7622         if (ret >= 0) {
7623             fd_trans_dup(arg1, ret);
7624         }
7625         return ret;
7626 #ifdef TARGET_NR_pipe
7627     case TARGET_NR_pipe:
7628         return do_pipe(cpu_env, arg1, 0, 0);
7629 #endif
7630 #ifdef TARGET_NR_pipe2
7631     case TARGET_NR_pipe2:
7632         return do_pipe(cpu_env, arg1,
7633                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7634 #endif
7635     case TARGET_NR_times:
7636         {
7637             struct target_tms *tmsp;
7638             struct tms tms;
7639             ret = get_errno(times(&tms));
7640             if (arg1) {
7641                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7642                 if (!tmsp)
7643                     return -TARGET_EFAULT;
7644                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7645                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7646                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7647                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7648             }
7649             if (!is_error(ret))
7650                 ret = host_to_target_clock_t(ret);
7651         }
7652         return ret;
7653     case TARGET_NR_acct:
7654         if (arg1 == 0) {
7655             ret = get_errno(acct(NULL));
7656         } else {
7657             if (!(p = lock_user_string(arg1))) {
7658                 return -TARGET_EFAULT;
7659             }
7660             ret = get_errno(acct(path(p)));
7661             unlock_user(p, arg1, 0);
7662         }
7663         return ret;
7664 #ifdef TARGET_NR_umount2
7665     case TARGET_NR_umount2:
7666         if (!(p = lock_user_string(arg1)))
7667             return -TARGET_EFAULT;
7668         ret = get_errno(umount2(p, arg2));
7669         unlock_user(p, arg1, 0);
7670         return ret;
7671 #endif
7672     case TARGET_NR_ioctl:
7673         return do_ioctl(arg1, arg2, arg3);
7674 #ifdef TARGET_NR_fcntl
7675     case TARGET_NR_fcntl:
7676         return do_fcntl(arg1, arg2, arg3);
7677 #endif
7678     case TARGET_NR_setpgid:
7679         return get_errno(setpgid(arg1, arg2));
7680     case TARGET_NR_umask:
7681         return get_errno(umask(arg1));
7682     case TARGET_NR_chroot:
7683         if (!(p = lock_user_string(arg1)))
7684             return -TARGET_EFAULT;
7685         ret = get_errno(chroot(p));
7686         unlock_user(p, arg1, 0);
7687         return ret;
7688 #ifdef TARGET_NR_dup2
7689     case TARGET_NR_dup2:
7690         ret = get_errno(dup2(arg1, arg2));
7691         if (ret >= 0) {
7692             fd_trans_dup(arg1, arg2);
7693         }
7694         return ret;
7695 #endif
7696 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7697     case TARGET_NR_dup3:
7698     {
7699         int host_flags;
7700 
7701         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7702             return -EINVAL;
7703         }
7704         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7705         ret = get_errno(dup3(arg1, arg2, host_flags));
7706         if (ret >= 0) {
7707             fd_trans_dup(arg1, arg2);
7708         }
7709         return ret;
7710     }
7711 #endif
7712 #ifdef TARGET_NR_getppid /* not on alpha */
7713     case TARGET_NR_getppid:
7714         return get_errno(getppid());
7715 #endif
7716 #ifdef TARGET_NR_getpgrp
7717     case TARGET_NR_getpgrp:
7718         return get_errno(getpgrp());
7719 #endif
7720     case TARGET_NR_setsid:
7721         return get_errno(setsid());
7722 #ifdef TARGET_NR_sigaction
7723     case TARGET_NR_sigaction:
7724         {
7725 #if defined(TARGET_ALPHA)
7726             struct target_sigaction act, oact, *pact = 0;
7727             struct target_old_sigaction *old_act;
7728             if (arg2) {
7729                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7730                     return -TARGET_EFAULT;
7731                 act._sa_handler = old_act->_sa_handler;
7732                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7733                 act.sa_flags = old_act->sa_flags;
7734                 act.sa_restorer = 0;
7735                 unlock_user_struct(old_act, arg2, 0);
7736                 pact = &act;
7737             }
7738             ret = get_errno(do_sigaction(arg1, pact, &oact));
7739             if (!is_error(ret) && arg3) {
7740                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7741                     return -TARGET_EFAULT;
7742                 old_act->_sa_handler = oact._sa_handler;
7743                 old_act->sa_mask = oact.sa_mask.sig[0];
7744                 old_act->sa_flags = oact.sa_flags;
7745                 unlock_user_struct(old_act, arg3, 1);
7746             }
7747 #elif defined(TARGET_MIPS)
7748 	    struct target_sigaction act, oact, *pact, *old_act;
7749 
7750 	    if (arg2) {
7751                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7752                     return -TARGET_EFAULT;
7753 		act._sa_handler = old_act->_sa_handler;
7754 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7755 		act.sa_flags = old_act->sa_flags;
7756 		unlock_user_struct(old_act, arg2, 0);
7757 		pact = &act;
7758 	    } else {
7759 		pact = NULL;
7760 	    }
7761 
7762 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7763 
7764 	    if (!is_error(ret) && arg3) {
7765                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7766                     return -TARGET_EFAULT;
7767 		old_act->_sa_handler = oact._sa_handler;
7768 		old_act->sa_flags = oact.sa_flags;
7769 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7770 		old_act->sa_mask.sig[1] = 0;
7771 		old_act->sa_mask.sig[2] = 0;
7772 		old_act->sa_mask.sig[3] = 0;
7773 		unlock_user_struct(old_act, arg3, 1);
7774 	    }
7775 #else
7776             struct target_old_sigaction *old_act;
7777             struct target_sigaction act, oact, *pact;
7778             if (arg2) {
7779                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7780                     return -TARGET_EFAULT;
7781                 act._sa_handler = old_act->_sa_handler;
7782                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7783                 act.sa_flags = old_act->sa_flags;
7784                 act.sa_restorer = old_act->sa_restorer;
7785 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7786                 act.ka_restorer = 0;
7787 #endif
7788                 unlock_user_struct(old_act, arg2, 0);
7789                 pact = &act;
7790             } else {
7791                 pact = NULL;
7792             }
7793             ret = get_errno(do_sigaction(arg1, pact, &oact));
7794             if (!is_error(ret) && arg3) {
7795                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7796                     return -TARGET_EFAULT;
7797                 old_act->_sa_handler = oact._sa_handler;
7798                 old_act->sa_mask = oact.sa_mask.sig[0];
7799                 old_act->sa_flags = oact.sa_flags;
7800                 old_act->sa_restorer = oact.sa_restorer;
7801                 unlock_user_struct(old_act, arg3, 1);
7802             }
7803 #endif
7804         }
7805         return ret;
7806 #endif
7807     case TARGET_NR_rt_sigaction:
7808         {
7809 #if defined(TARGET_ALPHA)
7810             /* For Alpha and SPARC this is a 5 argument syscall, with
7811              * a 'restorer' parameter which must be copied into the
7812              * sa_restorer field of the sigaction struct.
7813              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7814              * and arg5 is the sigsetsize.
7815              * Alpha also has a separate rt_sigaction struct that it uses
7816              * here; SPARC uses the usual sigaction struct.
7817              */
7818             struct target_rt_sigaction *rt_act;
7819             struct target_sigaction act, oact, *pact = 0;
7820 
7821             if (arg4 != sizeof(target_sigset_t)) {
7822                 return -TARGET_EINVAL;
7823             }
7824             if (arg2) {
7825                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7826                     return -TARGET_EFAULT;
7827                 act._sa_handler = rt_act->_sa_handler;
7828                 act.sa_mask = rt_act->sa_mask;
7829                 act.sa_flags = rt_act->sa_flags;
7830                 act.sa_restorer = arg5;
7831                 unlock_user_struct(rt_act, arg2, 0);
7832                 pact = &act;
7833             }
7834             ret = get_errno(do_sigaction(arg1, pact, &oact));
7835             if (!is_error(ret) && arg3) {
7836                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7837                     return -TARGET_EFAULT;
7838                 rt_act->_sa_handler = oact._sa_handler;
7839                 rt_act->sa_mask = oact.sa_mask;
7840                 rt_act->sa_flags = oact.sa_flags;
7841                 unlock_user_struct(rt_act, arg3, 1);
7842             }
7843 #else
7844 #ifdef TARGET_SPARC
7845             target_ulong restorer = arg4;
7846             target_ulong sigsetsize = arg5;
7847 #else
7848             target_ulong sigsetsize = arg4;
7849 #endif
7850             struct target_sigaction *act;
7851             struct target_sigaction *oact;
7852 
7853             if (sigsetsize != sizeof(target_sigset_t)) {
7854                 return -TARGET_EINVAL;
7855             }
7856             if (arg2) {
7857                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7858                     return -TARGET_EFAULT;
7859                 }
7860 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7861                 act->ka_restorer = restorer;
7862 #endif
7863             } else {
7864                 act = NULL;
7865             }
7866             if (arg3) {
7867                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7868                     ret = -TARGET_EFAULT;
7869                     goto rt_sigaction_fail;
7870                 }
7871             } else
7872                 oact = NULL;
7873             ret = get_errno(do_sigaction(arg1, act, oact));
7874 	rt_sigaction_fail:
7875             if (act)
7876                 unlock_user_struct(act, arg2, 0);
7877             if (oact)
7878                 unlock_user_struct(oact, arg3, 1);
7879 #endif
7880         }
7881         return ret;
7882 #ifdef TARGET_NR_sgetmask /* not on alpha */
7883     case TARGET_NR_sgetmask:
7884         {
7885             sigset_t cur_set;
7886             abi_ulong target_set;
7887             ret = do_sigprocmask(0, NULL, &cur_set);
7888             if (!ret) {
7889                 host_to_target_old_sigset(&target_set, &cur_set);
7890                 ret = target_set;
7891             }
7892         }
7893         return ret;
7894 #endif
7895 #ifdef TARGET_NR_ssetmask /* not on alpha */
7896     case TARGET_NR_ssetmask:
7897         {
7898             sigset_t set, oset;
7899             abi_ulong target_set = arg1;
7900             target_to_host_old_sigset(&set, &target_set);
7901             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7902             if (!ret) {
7903                 host_to_target_old_sigset(&target_set, &oset);
7904                 ret = target_set;
7905             }
7906         }
7907         return ret;
7908 #endif
7909 #ifdef TARGET_NR_sigprocmask
7910     case TARGET_NR_sigprocmask:
7911         {
7912 #if defined(TARGET_ALPHA)
7913             sigset_t set, oldset;
7914             abi_ulong mask;
7915             int how;
7916 
7917             switch (arg1) {
7918             case TARGET_SIG_BLOCK:
7919                 how = SIG_BLOCK;
7920                 break;
7921             case TARGET_SIG_UNBLOCK:
7922                 how = SIG_UNBLOCK;
7923                 break;
7924             case TARGET_SIG_SETMASK:
7925                 how = SIG_SETMASK;
7926                 break;
7927             default:
7928                 return -TARGET_EINVAL;
7929             }
7930             mask = arg2;
7931             target_to_host_old_sigset(&set, &mask);
7932 
7933             ret = do_sigprocmask(how, &set, &oldset);
7934             if (!is_error(ret)) {
7935                 host_to_target_old_sigset(&mask, &oldset);
7936                 ret = mask;
7937                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7938             }
7939 #else
7940             sigset_t set, oldset, *set_ptr;
7941             int how;
7942 
7943             if (arg2) {
7944                 switch (arg1) {
7945                 case TARGET_SIG_BLOCK:
7946                     how = SIG_BLOCK;
7947                     break;
7948                 case TARGET_SIG_UNBLOCK:
7949                     how = SIG_UNBLOCK;
7950                     break;
7951                 case TARGET_SIG_SETMASK:
7952                     how = SIG_SETMASK;
7953                     break;
7954                 default:
7955                     return -TARGET_EINVAL;
7956                 }
7957                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7958                     return -TARGET_EFAULT;
7959                 target_to_host_old_sigset(&set, p);
7960                 unlock_user(p, arg2, 0);
7961                 set_ptr = &set;
7962             } else {
7963                 how = 0;
7964                 set_ptr = NULL;
7965             }
7966             ret = do_sigprocmask(how, set_ptr, &oldset);
7967             if (!is_error(ret) && arg3) {
7968                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7969                     return -TARGET_EFAULT;
7970                 host_to_target_old_sigset(p, &oldset);
7971                 unlock_user(p, arg3, sizeof(target_sigset_t));
7972             }
7973 #endif
7974         }
7975         return ret;
7976 #endif
7977     case TARGET_NR_rt_sigprocmask:
7978         {
7979             int how = arg1;
7980             sigset_t set, oldset, *set_ptr;
7981 
7982             if (arg4 != sizeof(target_sigset_t)) {
7983                 return -TARGET_EINVAL;
7984             }
7985 
7986             if (arg2) {
7987                 switch(how) {
7988                 case TARGET_SIG_BLOCK:
7989                     how = SIG_BLOCK;
7990                     break;
7991                 case TARGET_SIG_UNBLOCK:
7992                     how = SIG_UNBLOCK;
7993                     break;
7994                 case TARGET_SIG_SETMASK:
7995                     how = SIG_SETMASK;
7996                     break;
7997                 default:
7998                     return -TARGET_EINVAL;
7999                 }
8000                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8001                     return -TARGET_EFAULT;
8002                 target_to_host_sigset(&set, p);
8003                 unlock_user(p, arg2, 0);
8004                 set_ptr = &set;
8005             } else {
8006                 how = 0;
8007                 set_ptr = NULL;
8008             }
8009             ret = do_sigprocmask(how, set_ptr, &oldset);
8010             if (!is_error(ret) && arg3) {
8011                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8012                     return -TARGET_EFAULT;
8013                 host_to_target_sigset(p, &oldset);
8014                 unlock_user(p, arg3, sizeof(target_sigset_t));
8015             }
8016         }
8017         return ret;
8018 #ifdef TARGET_NR_sigpending
8019     case TARGET_NR_sigpending:
8020         {
8021             sigset_t set;
8022             ret = get_errno(sigpending(&set));
8023             if (!is_error(ret)) {
8024                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8025                     return -TARGET_EFAULT;
8026                 host_to_target_old_sigset(p, &set);
8027                 unlock_user(p, arg1, sizeof(target_sigset_t));
8028             }
8029         }
8030         return ret;
8031 #endif
8032     case TARGET_NR_rt_sigpending:
8033         {
8034             sigset_t set;
8035 
8036             /* Yes, this check is >, not != like most. We follow the kernel's
8037              * logic and it does it like this because it implements
8038              * NR_sigpending through the same code path, and in that case
8039              * the old_sigset_t is smaller in size.
8040              */
8041             if (arg2 > sizeof(target_sigset_t)) {
8042                 return -TARGET_EINVAL;
8043             }
8044 
8045             ret = get_errno(sigpending(&set));
8046             if (!is_error(ret)) {
8047                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8048                     return -TARGET_EFAULT;
8049                 host_to_target_sigset(p, &set);
8050                 unlock_user(p, arg1, sizeof(target_sigset_t));
8051             }
8052         }
8053         return ret;
8054 #ifdef TARGET_NR_sigsuspend
8055     case TARGET_NR_sigsuspend:
8056         {
8057             TaskState *ts = cpu->opaque;
8058 #if defined(TARGET_ALPHA)
8059             abi_ulong mask = arg1;
8060             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8061 #else
8062             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8063                 return -TARGET_EFAULT;
8064             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8065             unlock_user(p, arg1, 0);
8066 #endif
8067             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8068                                                SIGSET_T_SIZE));
8069             if (ret != -TARGET_ERESTARTSYS) {
8070                 ts->in_sigsuspend = 1;
8071             }
8072         }
8073         return ret;
8074 #endif
8075     case TARGET_NR_rt_sigsuspend:
8076         {
8077             TaskState *ts = cpu->opaque;
8078 
8079             if (arg2 != sizeof(target_sigset_t)) {
8080                 return -TARGET_EINVAL;
8081             }
8082             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8083                 return -TARGET_EFAULT;
8084             target_to_host_sigset(&ts->sigsuspend_mask, p);
8085             unlock_user(p, arg1, 0);
8086             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8087                                                SIGSET_T_SIZE));
8088             if (ret != -TARGET_ERESTARTSYS) {
8089                 ts->in_sigsuspend = 1;
8090             }
8091         }
8092         return ret;
8093     case TARGET_NR_rt_sigtimedwait:
8094         {
8095             sigset_t set;
8096             struct timespec uts, *puts;
8097             siginfo_t uinfo;
8098 
8099             if (arg4 != sizeof(target_sigset_t)) {
8100                 return -TARGET_EINVAL;
8101             }
8102 
8103             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8104                 return -TARGET_EFAULT;
8105             target_to_host_sigset(&set, p);
8106             unlock_user(p, arg1, 0);
8107             if (arg3) {
8108                 puts = &uts;
8109                 target_to_host_timespec(puts, arg3);
8110             } else {
8111                 puts = NULL;
8112             }
8113             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8114                                                  SIGSET_T_SIZE));
8115             if (!is_error(ret)) {
8116                 if (arg2) {
8117                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8118                                   0);
8119                     if (!p) {
8120                         return -TARGET_EFAULT;
8121                     }
8122                     host_to_target_siginfo(p, &uinfo);
8123                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8124                 }
8125                 ret = host_to_target_signal(ret);
8126             }
8127         }
8128         return ret;
8129     case TARGET_NR_rt_sigqueueinfo:
8130         {
8131             siginfo_t uinfo;
8132 
8133             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8134             if (!p) {
8135                 return -TARGET_EFAULT;
8136             }
8137             target_to_host_siginfo(&uinfo, p);
8138             unlock_user(p, arg3, 0);
8139             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8140         }
8141         return ret;
8142     case TARGET_NR_rt_tgsigqueueinfo:
8143         {
8144             siginfo_t uinfo;
8145 
8146             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8147             if (!p) {
8148                 return -TARGET_EFAULT;
8149             }
8150             target_to_host_siginfo(&uinfo, p);
8151             unlock_user(p, arg4, 0);
8152             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8153         }
8154         return ret;
8155 #ifdef TARGET_NR_sigreturn
8156     case TARGET_NR_sigreturn:
8157         if (block_signals()) {
8158             return -TARGET_ERESTARTSYS;
8159         }
8160         return do_sigreturn(cpu_env);
8161 #endif
8162     case TARGET_NR_rt_sigreturn:
8163         if (block_signals()) {
8164             return -TARGET_ERESTARTSYS;
8165         }
8166         return do_rt_sigreturn(cpu_env);
8167     case TARGET_NR_sethostname:
8168         if (!(p = lock_user_string(arg1)))
8169             return -TARGET_EFAULT;
8170         ret = get_errno(sethostname(p, arg2));
8171         unlock_user(p, arg1, 0);
8172         return ret;
8173 #ifdef TARGET_NR_setrlimit
8174     case TARGET_NR_setrlimit:
8175         {
8176             int resource = target_to_host_resource(arg1);
8177             struct target_rlimit *target_rlim;
8178             struct rlimit rlim;
8179             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8180                 return -TARGET_EFAULT;
8181             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8182             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8183             unlock_user_struct(target_rlim, arg2, 0);
8184             /*
8185              * If we just passed through resource limit settings for memory then
8186              * they would also apply to QEMU's own allocations, and QEMU will
8187              * crash or hang or die if its allocations fail. Ideally we would
8188              * track the guest allocations in QEMU and apply the limits ourselves.
8189              * For now, just tell the guest the call succeeded but don't actually
8190              * limit anything.
8191              */
8192             if (resource != RLIMIT_AS &&
8193                 resource != RLIMIT_DATA &&
8194                 resource != RLIMIT_STACK) {
8195                 return get_errno(setrlimit(resource, &rlim));
8196             } else {
8197                 return 0;
8198             }
8199         }
8200 #endif
8201 #ifdef TARGET_NR_getrlimit
8202     case TARGET_NR_getrlimit:
8203         {
8204             int resource = target_to_host_resource(arg1);
8205             struct target_rlimit *target_rlim;
8206             struct rlimit rlim;
8207 
8208             ret = get_errno(getrlimit(resource, &rlim));
8209             if (!is_error(ret)) {
8210                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8211                     return -TARGET_EFAULT;
8212                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8213                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8214                 unlock_user_struct(target_rlim, arg2, 1);
8215             }
8216         }
8217         return ret;
8218 #endif
8219     case TARGET_NR_getrusage:
8220         {
8221             struct rusage rusage;
8222             ret = get_errno(getrusage(arg1, &rusage));
8223             if (!is_error(ret)) {
8224                 ret = host_to_target_rusage(arg2, &rusage);
8225             }
8226         }
8227         return ret;
8228     case TARGET_NR_gettimeofday:
8229         {
8230             struct timeval tv;
8231             ret = get_errno(gettimeofday(&tv, NULL));
8232             if (!is_error(ret)) {
8233                 if (copy_to_user_timeval(arg1, &tv))
8234                     return -TARGET_EFAULT;
8235             }
8236         }
8237         return ret;
8238     case TARGET_NR_settimeofday:
8239         {
8240             struct timeval tv, *ptv = NULL;
8241             struct timezone tz, *ptz = NULL;
8242 
8243             if (arg1) {
8244                 if (copy_from_user_timeval(&tv, arg1)) {
8245                     return -TARGET_EFAULT;
8246                 }
8247                 ptv = &tv;
8248             }
8249 
8250             if (arg2) {
8251                 if (copy_from_user_timezone(&tz, arg2)) {
8252                     return -TARGET_EFAULT;
8253                 }
8254                 ptz = &tz;
8255             }
8256 
8257             return get_errno(settimeofday(ptv, ptz));
8258         }
8259 #if defined(TARGET_NR_select)
8260     case TARGET_NR_select:
8261 #if defined(TARGET_WANT_NI_OLD_SELECT)
8262         /* some architectures used to have old_select here
8263          * but now ENOSYS it.
8264          */
8265         ret = -TARGET_ENOSYS;
8266 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8267         ret = do_old_select(arg1);
8268 #else
8269         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8270 #endif
8271         return ret;
8272 #endif
8273 #ifdef TARGET_NR_pselect6
8274     case TARGET_NR_pselect6:
8275         {
8276             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8277             fd_set rfds, wfds, efds;
8278             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8279             struct timespec ts, *ts_ptr;
8280 
8281             /*
8282              * The 6th arg is actually two args smashed together,
8283              * so we cannot use the C library.
8284              */
8285             sigset_t set;
8286             struct {
8287                 sigset_t *set;
8288                 size_t size;
8289             } sig, *sig_ptr;
8290 
8291             abi_ulong arg_sigset, arg_sigsize, *arg7;
8292             target_sigset_t *target_sigset;
8293 
8294             n = arg1;
8295             rfd_addr = arg2;
8296             wfd_addr = arg3;
8297             efd_addr = arg4;
8298             ts_addr = arg5;
8299 
8300             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8301             if (ret) {
8302                 return ret;
8303             }
8304             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8305             if (ret) {
8306                 return ret;
8307             }
8308             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8309             if (ret) {
8310                 return ret;
8311             }
8312 
8313             /*
8314              * This takes a timespec, and not a timeval, so we cannot
8315              * use the do_select() helper ...
8316              */
8317             if (ts_addr) {
8318                 if (target_to_host_timespec(&ts, ts_addr)) {
8319                     return -TARGET_EFAULT;
8320                 }
8321                 ts_ptr = &ts;
8322             } else {
8323                 ts_ptr = NULL;
8324             }
8325 
8326             /* Extract the two packed args for the sigset */
8327             if (arg6) {
8328                 sig_ptr = &sig;
8329                 sig.size = SIGSET_T_SIZE;
8330 
8331                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8332                 if (!arg7) {
8333                     return -TARGET_EFAULT;
8334                 }
8335                 arg_sigset = tswapal(arg7[0]);
8336                 arg_sigsize = tswapal(arg7[1]);
8337                 unlock_user(arg7, arg6, 0);
8338 
8339                 if (arg_sigset) {
8340                     sig.set = &set;
8341                     if (arg_sigsize != sizeof(*target_sigset)) {
8342                         /* Like the kernel, we enforce correct size sigsets */
8343                         return -TARGET_EINVAL;
8344                     }
8345                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8346                                               sizeof(*target_sigset), 1);
8347                     if (!target_sigset) {
8348                         return -TARGET_EFAULT;
8349                     }
8350                     target_to_host_sigset(&set, target_sigset);
8351                     unlock_user(target_sigset, arg_sigset, 0);
8352                 } else {
8353                     sig.set = NULL;
8354                 }
8355             } else {
8356                 sig_ptr = NULL;
8357             }
8358 
8359             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8360                                           ts_ptr, sig_ptr));
8361 
8362             if (!is_error(ret)) {
8363                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8364                     return -TARGET_EFAULT;
8365                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8366                     return -TARGET_EFAULT;
8367                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8368                     return -TARGET_EFAULT;
8369 
8370                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8371                     return -TARGET_EFAULT;
8372             }
8373         }
8374         return ret;
8375 #endif
8376 #ifdef TARGET_NR_symlink
8377     case TARGET_NR_symlink:
8378         {
8379             void *p2;
8380             p = lock_user_string(arg1);
8381             p2 = lock_user_string(arg2);
8382             if (!p || !p2)
8383                 ret = -TARGET_EFAULT;
8384             else
8385                 ret = get_errno(symlink(p, p2));
8386             unlock_user(p2, arg2, 0);
8387             unlock_user(p, arg1, 0);
8388         }
8389         return ret;
8390 #endif
8391 #if defined(TARGET_NR_symlinkat)
8392     case TARGET_NR_symlinkat:
8393         {
8394             void *p2;
8395             p  = lock_user_string(arg1);
8396             p2 = lock_user_string(arg3);
8397             if (!p || !p2)
8398                 ret = -TARGET_EFAULT;
8399             else
8400                 ret = get_errno(symlinkat(p, arg2, p2));
8401             unlock_user(p2, arg3, 0);
8402             unlock_user(p, arg1, 0);
8403         }
8404         return ret;
8405 #endif
8406 #ifdef TARGET_NR_readlink
8407     case TARGET_NR_readlink:
8408         {
8409             void *p2;
8410             p = lock_user_string(arg1);
8411             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8412             if (!p || !p2) {
8413                 ret = -TARGET_EFAULT;
8414             } else if (!arg3) {
8415                 /* Short circuit this for the magic exe check. */
8416                 ret = -TARGET_EINVAL;
8417             } else if (is_proc_myself((const char *)p, "exe")) {
8418                 char real[PATH_MAX], *temp;
8419                 temp = realpath(exec_path, real);
8420                 /* Return value is # of bytes that we wrote to the buffer. */
8421                 if (temp == NULL) {
8422                     ret = get_errno(-1);
8423                 } else {
8424                     /* Don't worry about sign mismatch as earlier mapping
8425                      * logic would have thrown a bad address error. */
8426                     ret = MIN(strlen(real), arg3);
8427                     /* We cannot NUL terminate the string. */
8428                     memcpy(p2, real, ret);
8429                 }
8430             } else {
8431                 ret = get_errno(readlink(path(p), p2, arg3));
8432             }
8433             unlock_user(p2, arg2, ret);
8434             unlock_user(p, arg1, 0);
8435         }
8436         return ret;
8437 #endif
8438 #if defined(TARGET_NR_readlinkat)
8439     case TARGET_NR_readlinkat:
8440         {
8441             void *p2;
8442             p  = lock_user_string(arg2);
8443             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8444             if (!p || !p2) {
8445                 ret = -TARGET_EFAULT;
8446             } else if (is_proc_myself((const char *)p, "exe")) {
8447                 char real[PATH_MAX], *temp;
8448                 temp = realpath(exec_path, real);
8449                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8450                 snprintf((char *)p2, arg4, "%s", real);
8451             } else {
8452                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8453             }
8454             unlock_user(p2, arg3, ret);
8455             unlock_user(p, arg2, 0);
8456         }
8457         return ret;
8458 #endif
8459 #ifdef TARGET_NR_swapon
8460     case TARGET_NR_swapon:
8461         if (!(p = lock_user_string(arg1)))
8462             return -TARGET_EFAULT;
8463         ret = get_errno(swapon(p, arg2));
8464         unlock_user(p, arg1, 0);
8465         return ret;
8466 #endif
8467     case TARGET_NR_reboot:
8468         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8469            /* arg4 must be ignored in all other cases */
8470            p = lock_user_string(arg4);
8471            if (!p) {
8472                return -TARGET_EFAULT;
8473            }
8474            ret = get_errno(reboot(arg1, arg2, arg3, p));
8475            unlock_user(p, arg4, 0);
8476         } else {
8477            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8478         }
8479         return ret;
8480 #ifdef TARGET_NR_mmap
8481     case TARGET_NR_mmap:
8482 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8483     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8484     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8485     || defined(TARGET_S390X)
8486         {
8487             abi_ulong *v;
8488             abi_ulong v1, v2, v3, v4, v5, v6;
8489             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8490                 return -TARGET_EFAULT;
8491             v1 = tswapal(v[0]);
8492             v2 = tswapal(v[1]);
8493             v3 = tswapal(v[2]);
8494             v4 = tswapal(v[3]);
8495             v5 = tswapal(v[4]);
8496             v6 = tswapal(v[5]);
8497             unlock_user(v, arg1, 0);
8498             ret = get_errno(target_mmap(v1, v2, v3,
8499                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8500                                         v5, v6));
8501         }
8502 #else
8503         ret = get_errno(target_mmap(arg1, arg2, arg3,
8504                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8505                                     arg5,
8506                                     arg6));
8507 #endif
8508         return ret;
8509 #endif
8510 #ifdef TARGET_NR_mmap2
8511     case TARGET_NR_mmap2:
8512 #ifndef MMAP_SHIFT
8513 #define MMAP_SHIFT 12
8514 #endif
8515         ret = target_mmap(arg1, arg2, arg3,
8516                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8517                           arg5, arg6 << MMAP_SHIFT);
8518         return get_errno(ret);
8519 #endif
8520     case TARGET_NR_munmap:
8521         return get_errno(target_munmap(arg1, arg2));
8522     case TARGET_NR_mprotect:
8523         {
8524             TaskState *ts = cpu->opaque;
8525             /* Special hack to detect libc making the stack executable.  */
8526             if ((arg3 & PROT_GROWSDOWN)
8527                 && arg1 >= ts->info->stack_limit
8528                 && arg1 <= ts->info->start_stack) {
8529                 arg3 &= ~PROT_GROWSDOWN;
8530                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8531                 arg1 = ts->info->stack_limit;
8532             }
8533         }
8534         return get_errno(target_mprotect(arg1, arg2, arg3));
8535 #ifdef TARGET_NR_mremap
8536     case TARGET_NR_mremap:
8537         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8538 #endif
8539         /* ??? msync/mlock/munlock are broken for softmmu.  */
8540 #ifdef TARGET_NR_msync
8541     case TARGET_NR_msync:
8542         return get_errno(msync(g2h(arg1), arg2, arg3));
8543 #endif
8544 #ifdef TARGET_NR_mlock
8545     case TARGET_NR_mlock:
8546         return get_errno(mlock(g2h(arg1), arg2));
8547 #endif
8548 #ifdef TARGET_NR_munlock
8549     case TARGET_NR_munlock:
8550         return get_errno(munlock(g2h(arg1), arg2));
8551 #endif
8552 #ifdef TARGET_NR_mlockall
8553     case TARGET_NR_mlockall:
8554         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8555 #endif
8556 #ifdef TARGET_NR_munlockall
8557     case TARGET_NR_munlockall:
8558         return get_errno(munlockall());
8559 #endif
8560 #ifdef TARGET_NR_truncate
8561     case TARGET_NR_truncate:
8562         if (!(p = lock_user_string(arg1)))
8563             return -TARGET_EFAULT;
8564         ret = get_errno(truncate(p, arg2));
8565         unlock_user(p, arg1, 0);
8566         return ret;
8567 #endif
8568 #ifdef TARGET_NR_ftruncate
8569     case TARGET_NR_ftruncate:
8570         return get_errno(ftruncate(arg1, arg2));
8571 #endif
8572     case TARGET_NR_fchmod:
8573         return get_errno(fchmod(arg1, arg2));
8574 #if defined(TARGET_NR_fchmodat)
8575     case TARGET_NR_fchmodat:
8576         if (!(p = lock_user_string(arg2)))
8577             return -TARGET_EFAULT;
8578         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8579         unlock_user(p, arg2, 0);
8580         return ret;
8581 #endif
8582     case TARGET_NR_getpriority:
8583         /* Note that negative values are valid for getpriority, so we must
8584            differentiate based on errno settings.  */
8585         errno = 0;
8586         ret = getpriority(arg1, arg2);
8587         if (ret == -1 && errno != 0) {
8588             return -host_to_target_errno(errno);
8589         }
8590 #ifdef TARGET_ALPHA
8591         /* Return value is the unbiased priority.  Signal no error.  */
8592         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8593 #else
8594         /* Return value is a biased priority to avoid negative numbers.  */
8595         ret = 20 - ret;
8596 #endif
8597         return ret;
8598     case TARGET_NR_setpriority:
8599         return get_errno(setpriority(arg1, arg2, arg3));
8600 #ifdef TARGET_NR_statfs
8601     case TARGET_NR_statfs:
8602         if (!(p = lock_user_string(arg1))) {
8603             return -TARGET_EFAULT;
8604         }
8605         ret = get_errno(statfs(path(p), &stfs));
8606         unlock_user(p, arg1, 0);
8607     convert_statfs:
8608         if (!is_error(ret)) {
8609             struct target_statfs *target_stfs;
8610 
8611             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8612                 return -TARGET_EFAULT;
8613             __put_user(stfs.f_type, &target_stfs->f_type);
8614             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8615             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8616             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8617             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8618             __put_user(stfs.f_files, &target_stfs->f_files);
8619             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8620             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8621             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8622             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8623             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8624 #ifdef _STATFS_F_FLAGS
8625             __put_user(stfs.f_flags, &target_stfs->f_flags);
8626 #else
8627             __put_user(0, &target_stfs->f_flags);
8628 #endif
8629             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8630             unlock_user_struct(target_stfs, arg2, 1);
8631         }
8632         return ret;
8633 #endif
8634 #ifdef TARGET_NR_fstatfs
8635     case TARGET_NR_fstatfs:
8636         ret = get_errno(fstatfs(arg1, &stfs));
8637         goto convert_statfs;
8638 #endif
8639 #ifdef TARGET_NR_statfs64
8640     case TARGET_NR_statfs64:
8641         if (!(p = lock_user_string(arg1))) {
8642             return -TARGET_EFAULT;
8643         }
8644         ret = get_errno(statfs(path(p), &stfs));
8645         unlock_user(p, arg1, 0);
8646     convert_statfs64:
8647         if (!is_error(ret)) {
8648             struct target_statfs64 *target_stfs;
8649 
8650             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8651                 return -TARGET_EFAULT;
8652             __put_user(stfs.f_type, &target_stfs->f_type);
8653             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8654             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8655             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8656             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8657             __put_user(stfs.f_files, &target_stfs->f_files);
8658             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8659             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8660             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8661             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8662             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8663             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8664             unlock_user_struct(target_stfs, arg3, 1);
8665         }
8666         return ret;
8667     case TARGET_NR_fstatfs64:
8668         ret = get_errno(fstatfs(arg1, &stfs));
8669         goto convert_statfs64;
8670 #endif
8671 #ifdef TARGET_NR_socketcall
8672     case TARGET_NR_socketcall:
8673         return do_socketcall(arg1, arg2);
8674 #endif
8675 #ifdef TARGET_NR_accept
8676     case TARGET_NR_accept:
8677         return do_accept4(arg1, arg2, arg3, 0);
8678 #endif
8679 #ifdef TARGET_NR_accept4
8680     case TARGET_NR_accept4:
8681         return do_accept4(arg1, arg2, arg3, arg4);
8682 #endif
8683 #ifdef TARGET_NR_bind
8684     case TARGET_NR_bind:
8685         return do_bind(arg1, arg2, arg3);
8686 #endif
8687 #ifdef TARGET_NR_connect
8688     case TARGET_NR_connect:
8689         return do_connect(arg1, arg2, arg3);
8690 #endif
8691 #ifdef TARGET_NR_getpeername
8692     case TARGET_NR_getpeername:
8693         return do_getpeername(arg1, arg2, arg3);
8694 #endif
8695 #ifdef TARGET_NR_getsockname
8696     case TARGET_NR_getsockname:
8697         return do_getsockname(arg1, arg2, arg3);
8698 #endif
8699 #ifdef TARGET_NR_getsockopt
8700     case TARGET_NR_getsockopt:
8701         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8702 #endif
8703 #ifdef TARGET_NR_listen
8704     case TARGET_NR_listen:
8705         return get_errno(listen(arg1, arg2));
8706 #endif
8707 #ifdef TARGET_NR_recv
8708     case TARGET_NR_recv:
8709         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8710 #endif
8711 #ifdef TARGET_NR_recvfrom
8712     case TARGET_NR_recvfrom:
8713         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8714 #endif
8715 #ifdef TARGET_NR_recvmsg
8716     case TARGET_NR_recvmsg:
8717         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8718 #endif
8719 #ifdef TARGET_NR_send
8720     case TARGET_NR_send:
8721         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8722 #endif
8723 #ifdef TARGET_NR_sendmsg
8724     case TARGET_NR_sendmsg:
8725         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8726 #endif
8727 #ifdef TARGET_NR_sendmmsg
8728     case TARGET_NR_sendmmsg:
8729         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8730     case TARGET_NR_recvmmsg:
8731         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8732 #endif
8733 #ifdef TARGET_NR_sendto
8734     case TARGET_NR_sendto:
8735         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8736 #endif
8737 #ifdef TARGET_NR_shutdown
8738     case TARGET_NR_shutdown:
8739         return get_errno(shutdown(arg1, arg2));
8740 #endif
8741 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8742     case TARGET_NR_getrandom:
8743         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8744         if (!p) {
8745             return -TARGET_EFAULT;
8746         }
8747         ret = get_errno(getrandom(p, arg2, arg3));
8748         unlock_user(p, arg1, ret);
8749         return ret;
8750 #endif
8751 #ifdef TARGET_NR_socket
8752     case TARGET_NR_socket:
8753         return do_socket(arg1, arg2, arg3);
8754 #endif
8755 #ifdef TARGET_NR_socketpair
8756     case TARGET_NR_socketpair:
8757         return do_socketpair(arg1, arg2, arg3, arg4);
8758 #endif
8759 #ifdef TARGET_NR_setsockopt
8760     case TARGET_NR_setsockopt:
8761         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8762 #endif
8763 #if defined(TARGET_NR_syslog)
8764     case TARGET_NR_syslog:
8765         {
8766             int len = arg2;
8767 
8768             switch (arg1) {
8769             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8770             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8771             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8772             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8773             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8774             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8775             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8776             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8777                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8778             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8779             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8780             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8781                 {
8782                     if (len < 0) {
8783                         return -TARGET_EINVAL;
8784                     }
8785                     if (len == 0) {
8786                         return 0;
8787                     }
8788                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8789                     if (!p) {
8790                         return -TARGET_EFAULT;
8791                     }
8792                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8793                     unlock_user(p, arg2, arg3);
8794                 }
8795                 return ret;
8796             default:
8797                 return -TARGET_EINVAL;
8798             }
8799         }
8800         break;
8801 #endif
8802     case TARGET_NR_setitimer:
8803         {
8804             struct itimerval value, ovalue, *pvalue;
8805 
8806             if (arg2) {
8807                 pvalue = &value;
8808                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8809                     || copy_from_user_timeval(&pvalue->it_value,
8810                                               arg2 + sizeof(struct target_timeval)))
8811                     return -TARGET_EFAULT;
8812             } else {
8813                 pvalue = NULL;
8814             }
8815             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8816             if (!is_error(ret) && arg3) {
8817                 if (copy_to_user_timeval(arg3,
8818                                          &ovalue.it_interval)
8819                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8820                                             &ovalue.it_value))
8821                     return -TARGET_EFAULT;
8822             }
8823         }
8824         return ret;
8825     case TARGET_NR_getitimer:
8826         {
8827             struct itimerval value;
8828 
8829             ret = get_errno(getitimer(arg1, &value));
8830             if (!is_error(ret) && arg2) {
8831                 if (copy_to_user_timeval(arg2,
8832                                          &value.it_interval)
8833                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8834                                             &value.it_value))
8835                     return -TARGET_EFAULT;
8836             }
8837         }
8838         return ret;
8839 #ifdef TARGET_NR_stat
8840     case TARGET_NR_stat:
8841         if (!(p = lock_user_string(arg1))) {
8842             return -TARGET_EFAULT;
8843         }
8844         ret = get_errno(stat(path(p), &st));
8845         unlock_user(p, arg1, 0);
8846         goto do_stat;
8847 #endif
8848 #ifdef TARGET_NR_lstat
8849     case TARGET_NR_lstat:
8850         if (!(p = lock_user_string(arg1))) {
8851             return -TARGET_EFAULT;
8852         }
8853         ret = get_errno(lstat(path(p), &st));
8854         unlock_user(p, arg1, 0);
8855         goto do_stat;
8856 #endif
8857 #ifdef TARGET_NR_fstat
8858     case TARGET_NR_fstat:
8859         {
8860             ret = get_errno(fstat(arg1, &st));
8861 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8862         do_stat:
8863 #endif
8864             if (!is_error(ret)) {
8865                 struct target_stat *target_st;
8866 
8867                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8868                     return -TARGET_EFAULT;
8869                 memset(target_st, 0, sizeof(*target_st));
8870                 __put_user(st.st_dev, &target_st->st_dev);
8871                 __put_user(st.st_ino, &target_st->st_ino);
8872                 __put_user(st.st_mode, &target_st->st_mode);
8873                 __put_user(st.st_uid, &target_st->st_uid);
8874                 __put_user(st.st_gid, &target_st->st_gid);
8875                 __put_user(st.st_nlink, &target_st->st_nlink);
8876                 __put_user(st.st_rdev, &target_st->st_rdev);
8877                 __put_user(st.st_size, &target_st->st_size);
8878                 __put_user(st.st_blksize, &target_st->st_blksize);
8879                 __put_user(st.st_blocks, &target_st->st_blocks);
8880                 __put_user(st.st_atime, &target_st->target_st_atime);
8881                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8882                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8883                 unlock_user_struct(target_st, arg2, 1);
8884             }
8885         }
8886         return ret;
8887 #endif
8888     case TARGET_NR_vhangup:
8889         return get_errno(vhangup());
8890 #ifdef TARGET_NR_syscall
8891     case TARGET_NR_syscall:
8892         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8893                           arg6, arg7, arg8, 0);
8894 #endif
8895     case TARGET_NR_wait4:
8896         {
8897             int status;
8898             abi_long status_ptr = arg2;
8899             struct rusage rusage, *rusage_ptr;
8900             abi_ulong target_rusage = arg4;
8901             abi_long rusage_err;
8902             if (target_rusage)
8903                 rusage_ptr = &rusage;
8904             else
8905                 rusage_ptr = NULL;
8906             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8907             if (!is_error(ret)) {
8908                 if (status_ptr && ret) {
8909                     status = host_to_target_waitstatus(status);
8910                     if (put_user_s32(status, status_ptr))
8911                         return -TARGET_EFAULT;
8912                 }
8913                 if (target_rusage) {
8914                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8915                     if (rusage_err) {
8916                         ret = rusage_err;
8917                     }
8918                 }
8919             }
8920         }
8921         return ret;
8922 #ifdef TARGET_NR_swapoff
8923     case TARGET_NR_swapoff:
8924         if (!(p = lock_user_string(arg1)))
8925             return -TARGET_EFAULT;
8926         ret = get_errno(swapoff(p));
8927         unlock_user(p, arg1, 0);
8928         return ret;
8929 #endif
8930     case TARGET_NR_sysinfo:
8931         {
8932             struct target_sysinfo *target_value;
8933             struct sysinfo value;
8934             ret = get_errno(sysinfo(&value));
8935             if (!is_error(ret) && arg1)
8936             {
8937                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8938                     return -TARGET_EFAULT;
8939                 __put_user(value.uptime, &target_value->uptime);
8940                 __put_user(value.loads[0], &target_value->loads[0]);
8941                 __put_user(value.loads[1], &target_value->loads[1]);
8942                 __put_user(value.loads[2], &target_value->loads[2]);
8943                 __put_user(value.totalram, &target_value->totalram);
8944                 __put_user(value.freeram, &target_value->freeram);
8945                 __put_user(value.sharedram, &target_value->sharedram);
8946                 __put_user(value.bufferram, &target_value->bufferram);
8947                 __put_user(value.totalswap, &target_value->totalswap);
8948                 __put_user(value.freeswap, &target_value->freeswap);
8949                 __put_user(value.procs, &target_value->procs);
8950                 __put_user(value.totalhigh, &target_value->totalhigh);
8951                 __put_user(value.freehigh, &target_value->freehigh);
8952                 __put_user(value.mem_unit, &target_value->mem_unit);
8953                 unlock_user_struct(target_value, arg1, 1);
8954             }
8955         }
8956         return ret;
8957 #ifdef TARGET_NR_ipc
8958     case TARGET_NR_ipc:
8959         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8960 #endif
8961 #ifdef TARGET_NR_semget
8962     case TARGET_NR_semget:
8963         return get_errno(semget(arg1, arg2, arg3));
8964 #endif
8965 #ifdef TARGET_NR_semop
8966     case TARGET_NR_semop:
8967         return do_semop(arg1, arg2, arg3);
8968 #endif
8969 #ifdef TARGET_NR_semctl
8970     case TARGET_NR_semctl:
8971         return do_semctl(arg1, arg2, arg3, arg4);
8972 #endif
8973 #ifdef TARGET_NR_msgctl
8974     case TARGET_NR_msgctl:
8975         return do_msgctl(arg1, arg2, arg3);
8976 #endif
8977 #ifdef TARGET_NR_msgget
8978     case TARGET_NR_msgget:
8979         return get_errno(msgget(arg1, arg2));
8980 #endif
8981 #ifdef TARGET_NR_msgrcv
8982     case TARGET_NR_msgrcv:
8983         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8984 #endif
8985 #ifdef TARGET_NR_msgsnd
8986     case TARGET_NR_msgsnd:
8987         return do_msgsnd(arg1, arg2, arg3, arg4);
8988 #endif
8989 #ifdef TARGET_NR_shmget
8990     case TARGET_NR_shmget:
8991         return get_errno(shmget(arg1, arg2, arg3));
8992 #endif
8993 #ifdef TARGET_NR_shmctl
8994     case TARGET_NR_shmctl:
8995         return do_shmctl(arg1, arg2, arg3);
8996 #endif
8997 #ifdef TARGET_NR_shmat
8998     case TARGET_NR_shmat:
8999         return do_shmat(cpu_env, arg1, arg2, arg3);
9000 #endif
9001 #ifdef TARGET_NR_shmdt
9002     case TARGET_NR_shmdt:
9003         return do_shmdt(arg1);
9004 #endif
9005     case TARGET_NR_fsync:
9006         return get_errno(fsync(arg1));
9007     case TARGET_NR_clone:
9008         /* Linux manages to have three different orderings for its
9009          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9010          * match the kernel's CONFIG_CLONE_* settings.
9011          * Microblaze is further special in that it uses a sixth
9012          * implicit argument to clone for the TLS pointer.
9013          */
9014 #if defined(TARGET_MICROBLAZE)
9015         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9016 #elif defined(TARGET_CLONE_BACKWARDS)
9017         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9018 #elif defined(TARGET_CLONE_BACKWARDS2)
9019         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9020 #else
9021         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9022 #endif
9023         return ret;
9024 #ifdef __NR_exit_group
9025         /* new thread calls */
9026     case TARGET_NR_exit_group:
9027         preexit_cleanup(cpu_env, arg1);
9028         return get_errno(exit_group(arg1));
9029 #endif
9030     case TARGET_NR_setdomainname:
9031         if (!(p = lock_user_string(arg1)))
9032             return -TARGET_EFAULT;
9033         ret = get_errno(setdomainname(p, arg2));
9034         unlock_user(p, arg1, 0);
9035         return ret;
9036     case TARGET_NR_uname:
9037         /* no need to transcode because we use the linux syscall */
9038         {
9039             struct new_utsname * buf;
9040 
9041             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9042                 return -TARGET_EFAULT;
9043             ret = get_errno(sys_uname(buf));
9044             if (!is_error(ret)) {
9045                 /* Overwrite the native machine name with whatever is being
9046                    emulated. */
9047                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9048                           sizeof(buf->machine));
9049                 /* Allow the user to override the reported release.  */
9050                 if (qemu_uname_release && *qemu_uname_release) {
9051                     g_strlcpy(buf->release, qemu_uname_release,
9052                               sizeof(buf->release));
9053                 }
9054             }
9055             unlock_user_struct(buf, arg1, 1);
9056         }
9057         return ret;
9058 #ifdef TARGET_I386
9059     case TARGET_NR_modify_ldt:
9060         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9061 #if !defined(TARGET_X86_64)
9062     case TARGET_NR_vm86:
9063         return do_vm86(cpu_env, arg1, arg2);
9064 #endif
9065 #endif
9066     case TARGET_NR_adjtimex:
9067         {
9068             struct timex host_buf;
9069 
9070             if (target_to_host_timex(&host_buf, arg1) != 0) {
9071                 return -TARGET_EFAULT;
9072             }
9073             ret = get_errno(adjtimex(&host_buf));
9074             if (!is_error(ret)) {
9075                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9076                     return -TARGET_EFAULT;
9077                 }
9078             }
9079         }
9080         return ret;
9081 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9082     case TARGET_NR_clock_adjtime:
9083         {
9084             struct timex htx, *phtx = &htx;
9085 
9086             if (target_to_host_timex(phtx, arg2) != 0) {
9087                 return -TARGET_EFAULT;
9088             }
9089             ret = get_errno(clock_adjtime(arg1, phtx));
9090             if (!is_error(ret) && phtx) {
9091                 if (host_to_target_timex(arg2, phtx) != 0) {
9092                     return -TARGET_EFAULT;
9093                 }
9094             }
9095         }
9096         return ret;
9097 #endif
9098     case TARGET_NR_getpgid:
9099         return get_errno(getpgid(arg1));
9100     case TARGET_NR_fchdir:
9101         return get_errno(fchdir(arg1));
9102     case TARGET_NR_personality:
9103         return get_errno(personality(arg1));
9104 #ifdef TARGET_NR__llseek /* Not on alpha */
9105     case TARGET_NR__llseek:
9106         {
9107             int64_t res;
9108 #if !defined(__NR_llseek)
9109             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9110             if (res == -1) {
9111                 ret = get_errno(res);
9112             } else {
9113                 ret = 0;
9114             }
9115 #else
9116             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9117 #endif
9118             if ((ret == 0) && put_user_s64(res, arg4)) {
9119                 return -TARGET_EFAULT;
9120             }
9121         }
9122         return ret;
9123 #endif
9124 #ifdef TARGET_NR_getdents
9125     case TARGET_NR_getdents:
9126 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9127 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9128         {
9129             struct target_dirent *target_dirp;
9130             struct linux_dirent *dirp;
9131             abi_long count = arg3;
9132 
9133             dirp = g_try_malloc(count);
9134             if (!dirp) {
9135                 return -TARGET_ENOMEM;
9136             }
9137 
9138             ret = get_errno(sys_getdents(arg1, dirp, count));
9139             if (!is_error(ret)) {
9140                 struct linux_dirent *de;
9141 		struct target_dirent *tde;
9142                 int len = ret;
9143                 int reclen, treclen;
9144 		int count1, tnamelen;
9145 
9146 		count1 = 0;
9147                 de = dirp;
9148                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9149                     return -TARGET_EFAULT;
9150 		tde = target_dirp;
9151                 while (len > 0) {
9152                     reclen = de->d_reclen;
9153                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9154                     assert(tnamelen >= 0);
9155                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9156                     assert(count1 + treclen <= count);
9157                     tde->d_reclen = tswap16(treclen);
9158                     tde->d_ino = tswapal(de->d_ino);
9159                     tde->d_off = tswapal(de->d_off);
9160                     memcpy(tde->d_name, de->d_name, tnamelen);
9161                     de = (struct linux_dirent *)((char *)de + reclen);
9162                     len -= reclen;
9163                     tde = (struct target_dirent *)((char *)tde + treclen);
9164 		    count1 += treclen;
9165                 }
9166 		ret = count1;
9167                 unlock_user(target_dirp, arg2, ret);
9168             }
9169             g_free(dirp);
9170         }
9171 #else
9172         {
9173             struct linux_dirent *dirp;
9174             abi_long count = arg3;
9175 
9176             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9177                 return -TARGET_EFAULT;
9178             ret = get_errno(sys_getdents(arg1, dirp, count));
9179             if (!is_error(ret)) {
9180                 struct linux_dirent *de;
9181                 int len = ret;
9182                 int reclen;
9183                 de = dirp;
9184                 while (len > 0) {
9185                     reclen = de->d_reclen;
9186                     if (reclen > len)
9187                         break;
9188                     de->d_reclen = tswap16(reclen);
9189                     tswapls(&de->d_ino);
9190                     tswapls(&de->d_off);
9191                     de = (struct linux_dirent *)((char *)de + reclen);
9192                     len -= reclen;
9193                 }
9194             }
9195             unlock_user(dirp, arg2, ret);
9196         }
9197 #endif
9198 #else
9199         /* Implement getdents in terms of getdents64 */
9200         {
9201             struct linux_dirent64 *dirp;
9202             abi_long count = arg3;
9203 
9204             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9205             if (!dirp) {
9206                 return -TARGET_EFAULT;
9207             }
9208             ret = get_errno(sys_getdents64(arg1, dirp, count));
9209             if (!is_error(ret)) {
9210                 /* Convert the dirent64 structs to target dirent.  We do this
9211                  * in-place, since we can guarantee that a target_dirent is no
9212                  * larger than a dirent64; however this means we have to be
9213                  * careful to read everything before writing in the new format.
9214                  */
9215                 struct linux_dirent64 *de;
9216                 struct target_dirent *tde;
9217                 int len = ret;
9218                 int tlen = 0;
9219 
9220                 de = dirp;
9221                 tde = (struct target_dirent *)dirp;
9222                 while (len > 0) {
9223                     int namelen, treclen;
9224                     int reclen = de->d_reclen;
9225                     uint64_t ino = de->d_ino;
9226                     int64_t off = de->d_off;
9227                     uint8_t type = de->d_type;
9228 
9229                     namelen = strlen(de->d_name);
9230                     treclen = offsetof(struct target_dirent, d_name)
9231                         + namelen + 2;
9232                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9233 
9234                     memmove(tde->d_name, de->d_name, namelen + 1);
9235                     tde->d_ino = tswapal(ino);
9236                     tde->d_off = tswapal(off);
9237                     tde->d_reclen = tswap16(treclen);
9238                     /* The target_dirent type is in what was formerly a padding
9239                      * byte at the end of the structure:
9240                      */
9241                     *(((char *)tde) + treclen - 1) = type;
9242 
9243                     de = (struct linux_dirent64 *)((char *)de + reclen);
9244                     tde = (struct target_dirent *)((char *)tde + treclen);
9245                     len -= reclen;
9246                     tlen += treclen;
9247                 }
9248                 ret = tlen;
9249             }
9250             unlock_user(dirp, arg2, ret);
9251         }
9252 #endif
9253         return ret;
9254 #endif /* TARGET_NR_getdents */
9255 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9256     case TARGET_NR_getdents64:
9257         {
9258             struct linux_dirent64 *dirp;
9259             abi_long count = arg3;
9260             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9261                 return -TARGET_EFAULT;
9262             ret = get_errno(sys_getdents64(arg1, dirp, count));
9263             if (!is_error(ret)) {
9264                 struct linux_dirent64 *de;
9265                 int len = ret;
9266                 int reclen;
9267                 de = dirp;
9268                 while (len > 0) {
9269                     reclen = de->d_reclen;
9270                     if (reclen > len)
9271                         break;
9272                     de->d_reclen = tswap16(reclen);
9273                     tswap64s((uint64_t *)&de->d_ino);
9274                     tswap64s((uint64_t *)&de->d_off);
9275                     de = (struct linux_dirent64 *)((char *)de + reclen);
9276                     len -= reclen;
9277                 }
9278             }
9279             unlock_user(dirp, arg2, ret);
9280         }
9281         return ret;
9282 #endif /* TARGET_NR_getdents64 */
9283 #if defined(TARGET_NR__newselect)
9284     case TARGET_NR__newselect:
9285         return do_select(arg1, arg2, arg3, arg4, arg5);
9286 #endif
9287 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9288 # ifdef TARGET_NR_poll
9289     case TARGET_NR_poll:
9290 # endif
9291 # ifdef TARGET_NR_ppoll
9292     case TARGET_NR_ppoll:
9293 # endif
9294         {
9295             struct target_pollfd *target_pfd;
9296             unsigned int nfds = arg2;
9297             struct pollfd *pfd;
9298             unsigned int i;
9299 
9300             pfd = NULL;
9301             target_pfd = NULL;
9302             if (nfds) {
9303                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9304                     return -TARGET_EINVAL;
9305                 }
9306 
9307                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9308                                        sizeof(struct target_pollfd) * nfds, 1);
9309                 if (!target_pfd) {
9310                     return -TARGET_EFAULT;
9311                 }
9312 
9313                 pfd = alloca(sizeof(struct pollfd) * nfds);
9314                 for (i = 0; i < nfds; i++) {
9315                     pfd[i].fd = tswap32(target_pfd[i].fd);
9316                     pfd[i].events = tswap16(target_pfd[i].events);
9317                 }
9318             }
9319 
9320             switch (num) {
9321 # ifdef TARGET_NR_ppoll
9322             case TARGET_NR_ppoll:
9323             {
9324                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9325                 target_sigset_t *target_set;
9326                 sigset_t _set, *set = &_set;
9327 
9328                 if (arg3) {
9329                     if (target_to_host_timespec(timeout_ts, arg3)) {
9330                         unlock_user(target_pfd, arg1, 0);
9331                         return -TARGET_EFAULT;
9332                     }
9333                 } else {
9334                     timeout_ts = NULL;
9335                 }
9336 
9337                 if (arg4) {
9338                     if (arg5 != sizeof(target_sigset_t)) {
9339                         unlock_user(target_pfd, arg1, 0);
9340                         return -TARGET_EINVAL;
9341                     }
9342 
9343                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9344                     if (!target_set) {
9345                         unlock_user(target_pfd, arg1, 0);
9346                         return -TARGET_EFAULT;
9347                     }
9348                     target_to_host_sigset(set, target_set);
9349                 } else {
9350                     set = NULL;
9351                 }
9352 
9353                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9354                                            set, SIGSET_T_SIZE));
9355 
9356                 if (!is_error(ret) && arg3) {
9357                     host_to_target_timespec(arg3, timeout_ts);
9358                 }
9359                 if (arg4) {
9360                     unlock_user(target_set, arg4, 0);
9361                 }
9362                 break;
9363             }
9364 # endif
9365 # ifdef TARGET_NR_poll
9366             case TARGET_NR_poll:
9367             {
9368                 struct timespec ts, *pts;
9369 
9370                 if (arg3 >= 0) {
9371                     /* Convert ms to secs, ns */
9372                     ts.tv_sec = arg3 / 1000;
9373                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9374                     pts = &ts;
9375                 } else {
9376                     /* -ve poll() timeout means "infinite" */
9377                     pts = NULL;
9378                 }
9379                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9380                 break;
9381             }
9382 # endif
9383             default:
9384                 g_assert_not_reached();
9385             }
9386 
9387             if (!is_error(ret)) {
9388                 for(i = 0; i < nfds; i++) {
9389                     target_pfd[i].revents = tswap16(pfd[i].revents);
9390                 }
9391             }
9392             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9393         }
9394         return ret;
9395 #endif
9396     case TARGET_NR_flock:
9397         /* NOTE: the flock constant seems to be the same for every
9398            Linux platform */
9399         return get_errno(safe_flock(arg1, arg2));
9400     case TARGET_NR_readv:
9401         {
9402             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9403             if (vec != NULL) {
9404                 ret = get_errno(safe_readv(arg1, vec, arg3));
9405                 unlock_iovec(vec, arg2, arg3, 1);
9406             } else {
9407                 ret = -host_to_target_errno(errno);
9408             }
9409         }
9410         return ret;
9411     case TARGET_NR_writev:
9412         {
9413             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9414             if (vec != NULL) {
9415                 ret = get_errno(safe_writev(arg1, vec, arg3));
9416                 unlock_iovec(vec, arg2, arg3, 0);
9417             } else {
9418                 ret = -host_to_target_errno(errno);
9419             }
9420         }
9421         return ret;
9422 #if defined(TARGET_NR_preadv)
9423     case TARGET_NR_preadv:
9424         {
9425             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9426             if (vec != NULL) {
9427                 unsigned long low, high;
9428 
9429                 target_to_host_low_high(arg4, arg5, &low, &high);
9430                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9431                 unlock_iovec(vec, arg2, arg3, 1);
9432             } else {
9433                 ret = -host_to_target_errno(errno);
9434            }
9435         }
9436         return ret;
9437 #endif
9438 #if defined(TARGET_NR_pwritev)
9439     case TARGET_NR_pwritev:
9440         {
9441             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9442             if (vec != NULL) {
9443                 unsigned long low, high;
9444 
9445                 target_to_host_low_high(arg4, arg5, &low, &high);
9446                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9447                 unlock_iovec(vec, arg2, arg3, 0);
9448             } else {
9449                 ret = -host_to_target_errno(errno);
9450            }
9451         }
9452         return ret;
9453 #endif
9454     case TARGET_NR_getsid:
9455         return get_errno(getsid(arg1));
9456 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9457     case TARGET_NR_fdatasync:
9458         return get_errno(fdatasync(arg1));
9459 #endif
9460 #ifdef TARGET_NR__sysctl
9461     case TARGET_NR__sysctl:
9462         /* We don't implement this, but ENOTDIR is always a safe
9463            return value. */
9464         return -TARGET_ENOTDIR;
9465 #endif
9466     case TARGET_NR_sched_getaffinity:
9467         {
9468             unsigned int mask_size;
9469             unsigned long *mask;
9470 
9471             /*
9472              * sched_getaffinity needs multiples of ulong, so need to take
9473              * care of mismatches between target ulong and host ulong sizes.
9474              */
9475             if (arg2 & (sizeof(abi_ulong) - 1)) {
9476                 return -TARGET_EINVAL;
9477             }
9478             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9479 
9480             mask = alloca(mask_size);
9481             memset(mask, 0, mask_size);
9482             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9483 
9484             if (!is_error(ret)) {
9485                 if (ret > arg2) {
9486                     /* More data returned than the caller's buffer will fit.
9487                      * This only happens if sizeof(abi_long) < sizeof(long)
9488                      * and the caller passed us a buffer holding an odd number
9489                      * of abi_longs. If the host kernel is actually using the
9490                      * extra 4 bytes then fail EINVAL; otherwise we can just
9491                      * ignore them and only copy the interesting part.
9492                      */
9493                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9494                     if (numcpus > arg2 * 8) {
9495                         return -TARGET_EINVAL;
9496                     }
9497                     ret = arg2;
9498                 }
9499 
9500                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9501                     return -TARGET_EFAULT;
9502                 }
9503             }
9504         }
9505         return ret;
9506     case TARGET_NR_sched_setaffinity:
9507         {
9508             unsigned int mask_size;
9509             unsigned long *mask;
9510 
9511             /*
9512              * sched_setaffinity needs multiples of ulong, so need to take
9513              * care of mismatches between target ulong and host ulong sizes.
9514              */
9515             if (arg2 & (sizeof(abi_ulong) - 1)) {
9516                 return -TARGET_EINVAL;
9517             }
9518             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9519             mask = alloca(mask_size);
9520 
9521             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9522             if (ret) {
9523                 return ret;
9524             }
9525 
9526             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9527         }
9528     case TARGET_NR_getcpu:
9529         {
9530             unsigned cpu, node;
9531             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9532                                        arg2 ? &node : NULL,
9533                                        NULL));
9534             if (is_error(ret)) {
9535                 return ret;
9536             }
9537             if (arg1 && put_user_u32(cpu, arg1)) {
9538                 return -TARGET_EFAULT;
9539             }
9540             if (arg2 && put_user_u32(node, arg2)) {
9541                 return -TARGET_EFAULT;
9542             }
9543         }
9544         return ret;
9545     case TARGET_NR_sched_setparam:
9546         {
9547             struct sched_param *target_schp;
9548             struct sched_param schp;
9549 
9550             if (arg2 == 0) {
9551                 return -TARGET_EINVAL;
9552             }
9553             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9554                 return -TARGET_EFAULT;
9555             schp.sched_priority = tswap32(target_schp->sched_priority);
9556             unlock_user_struct(target_schp, arg2, 0);
9557             return get_errno(sched_setparam(arg1, &schp));
9558         }
9559     case TARGET_NR_sched_getparam:
9560         {
9561             struct sched_param *target_schp;
9562             struct sched_param schp;
9563 
9564             if (arg2 == 0) {
9565                 return -TARGET_EINVAL;
9566             }
9567             ret = get_errno(sched_getparam(arg1, &schp));
9568             if (!is_error(ret)) {
9569                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9570                     return -TARGET_EFAULT;
9571                 target_schp->sched_priority = tswap32(schp.sched_priority);
9572                 unlock_user_struct(target_schp, arg2, 1);
9573             }
9574         }
9575         return ret;
9576     case TARGET_NR_sched_setscheduler:
9577         {
9578             struct sched_param *target_schp;
9579             struct sched_param schp;
9580             if (arg3 == 0) {
9581                 return -TARGET_EINVAL;
9582             }
9583             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9584                 return -TARGET_EFAULT;
9585             schp.sched_priority = tswap32(target_schp->sched_priority);
9586             unlock_user_struct(target_schp, arg3, 0);
9587             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9588         }
9589     case TARGET_NR_sched_getscheduler:
9590         return get_errno(sched_getscheduler(arg1));
9591     case TARGET_NR_sched_yield:
9592         return get_errno(sched_yield());
9593     case TARGET_NR_sched_get_priority_max:
9594         return get_errno(sched_get_priority_max(arg1));
9595     case TARGET_NR_sched_get_priority_min:
9596         return get_errno(sched_get_priority_min(arg1));
9597     case TARGET_NR_sched_rr_get_interval:
9598         {
9599             struct timespec ts;
9600             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9601             if (!is_error(ret)) {
9602                 ret = host_to_target_timespec(arg2, &ts);
9603             }
9604         }
9605         return ret;
9606     case TARGET_NR_nanosleep:
9607         {
9608             struct timespec req, rem;
9609             target_to_host_timespec(&req, arg1);
9610             ret = get_errno(safe_nanosleep(&req, &rem));
9611             if (is_error(ret) && arg2) {
9612                 host_to_target_timespec(arg2, &rem);
9613             }
9614         }
9615         return ret;
9616     case TARGET_NR_prctl:
9617         switch (arg1) {
9618         case PR_GET_PDEATHSIG:
9619         {
9620             int deathsig;
9621             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9622             if (!is_error(ret) && arg2
9623                 && put_user_ual(deathsig, arg2)) {
9624                 return -TARGET_EFAULT;
9625             }
9626             return ret;
9627         }
9628 #ifdef PR_GET_NAME
9629         case PR_GET_NAME:
9630         {
9631             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9632             if (!name) {
9633                 return -TARGET_EFAULT;
9634             }
9635             ret = get_errno(prctl(arg1, (unsigned long)name,
9636                                   arg3, arg4, arg5));
9637             unlock_user(name, arg2, 16);
9638             return ret;
9639         }
9640         case PR_SET_NAME:
9641         {
9642             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9643             if (!name) {
9644                 return -TARGET_EFAULT;
9645             }
9646             ret = get_errno(prctl(arg1, (unsigned long)name,
9647                                   arg3, arg4, arg5));
9648             unlock_user(name, arg2, 0);
9649             return ret;
9650         }
9651 #endif
9652 #ifdef TARGET_MIPS
9653         case TARGET_PR_GET_FP_MODE:
9654         {
9655             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9656             ret = 0;
9657             if (env->CP0_Status & (1 << CP0St_FR)) {
9658                 ret |= TARGET_PR_FP_MODE_FR;
9659             }
9660             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9661                 ret |= TARGET_PR_FP_MODE_FRE;
9662             }
9663             return ret;
9664         }
9665         case TARGET_PR_SET_FP_MODE:
9666         {
9667             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9668             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9669             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9670             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9671             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9672 
9673             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9674                                             TARGET_PR_FP_MODE_FRE;
9675 
9676             /* If nothing to change, return right away, successfully.  */
9677             if (old_fr == new_fr && old_fre == new_fre) {
9678                 return 0;
9679             }
9680             /* Check the value is valid */
9681             if (arg2 & ~known_bits) {
9682                 return -TARGET_EOPNOTSUPP;
9683             }
9684             /* Setting FRE without FR is not supported.  */
9685             if (new_fre && !new_fr) {
9686                 return -TARGET_EOPNOTSUPP;
9687             }
9688             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9689                 /* FR1 is not supported */
9690                 return -TARGET_EOPNOTSUPP;
9691             }
9692             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9693                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9694                 /* cannot set FR=0 */
9695                 return -TARGET_EOPNOTSUPP;
9696             }
9697             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9698                 /* Cannot set FRE=1 */
9699                 return -TARGET_EOPNOTSUPP;
9700             }
9701 
9702             int i;
9703             fpr_t *fpr = env->active_fpu.fpr;
9704             for (i = 0; i < 32 ; i += 2) {
9705                 if (!old_fr && new_fr) {
9706                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9707                 } else if (old_fr && !new_fr) {
9708                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9709                 }
9710             }
9711 
9712             if (new_fr) {
9713                 env->CP0_Status |= (1 << CP0St_FR);
9714                 env->hflags |= MIPS_HFLAG_F64;
9715             } else {
9716                 env->CP0_Status &= ~(1 << CP0St_FR);
9717                 env->hflags &= ~MIPS_HFLAG_F64;
9718             }
9719             if (new_fre) {
9720                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9721                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9722                     env->hflags |= MIPS_HFLAG_FRE;
9723                 }
9724             } else {
9725                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9726                 env->hflags &= ~MIPS_HFLAG_FRE;
9727             }
9728 
9729             return 0;
9730         }
9731 #endif /* MIPS */
9732 #ifdef TARGET_AARCH64
9733         case TARGET_PR_SVE_SET_VL:
9734             /*
9735              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9736              * PR_SVE_VL_INHERIT.  Note the kernel definition
9737              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9738              * even though the current architectural maximum is VQ=16.
9739              */
9740             ret = -TARGET_EINVAL;
9741             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9742                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9743                 CPUARMState *env = cpu_env;
9744                 ARMCPU *cpu = arm_env_get_cpu(env);
9745                 uint32_t vq, old_vq;
9746 
9747                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9748                 vq = MAX(arg2 / 16, 1);
9749                 vq = MIN(vq, cpu->sve_max_vq);
9750 
9751                 if (vq < old_vq) {
9752                     aarch64_sve_narrow_vq(env, vq);
9753                 }
9754                 env->vfp.zcr_el[1] = vq - 1;
9755                 ret = vq * 16;
9756             }
9757             return ret;
9758         case TARGET_PR_SVE_GET_VL:
9759             ret = -TARGET_EINVAL;
9760             {
9761                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9762                 if (cpu_isar_feature(aa64_sve, cpu)) {
9763                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9764                 }
9765             }
9766             return ret;
9767         case TARGET_PR_PAC_RESET_KEYS:
9768             {
9769                 CPUARMState *env = cpu_env;
9770                 ARMCPU *cpu = arm_env_get_cpu(env);
9771 
9772                 if (arg3 || arg4 || arg5) {
9773                     return -TARGET_EINVAL;
9774                 }
9775                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9776                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9777                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9778                                TARGET_PR_PAC_APGAKEY);
9779                     if (arg2 == 0) {
9780                         arg2 = all;
9781                     } else if (arg2 & ~all) {
9782                         return -TARGET_EINVAL;
9783                     }
9784                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9785                         arm_init_pauth_key(&env->apia_key);
9786                     }
9787                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9788                         arm_init_pauth_key(&env->apib_key);
9789                     }
9790                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9791                         arm_init_pauth_key(&env->apda_key);
9792                     }
9793                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9794                         arm_init_pauth_key(&env->apdb_key);
9795                     }
9796                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9797                         arm_init_pauth_key(&env->apga_key);
9798                     }
9799                     return 0;
9800                 }
9801             }
9802             return -TARGET_EINVAL;
9803 #endif /* AARCH64 */
9804         case PR_GET_SECCOMP:
9805         case PR_SET_SECCOMP:
9806             /* Disable seccomp to prevent the target disabling syscalls we
9807              * need. */
9808             return -TARGET_EINVAL;
9809         default:
9810             /* Most prctl options have no pointer arguments */
9811             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9812         }
9813         break;
9814 #ifdef TARGET_NR_arch_prctl
9815     case TARGET_NR_arch_prctl:
9816 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9817         return do_arch_prctl(cpu_env, arg1, arg2);
9818 #else
9819 #error unreachable
9820 #endif
9821 #endif
9822 #ifdef TARGET_NR_pread64
9823     case TARGET_NR_pread64:
9824         if (regpairs_aligned(cpu_env, num)) {
9825             arg4 = arg5;
9826             arg5 = arg6;
9827         }
9828         if (arg2 == 0 && arg3 == 0) {
9829             /* Special-case NULL buffer and zero length, which should succeed */
9830             p = 0;
9831         } else {
9832             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9833             if (!p) {
9834                 return -TARGET_EFAULT;
9835             }
9836         }
9837         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9838         unlock_user(p, arg2, ret);
9839         return ret;
9840     case TARGET_NR_pwrite64:
9841         if (regpairs_aligned(cpu_env, num)) {
9842             arg4 = arg5;
9843             arg5 = arg6;
9844         }
9845         if (arg2 == 0 && arg3 == 0) {
9846             /* Special-case NULL buffer and zero length, which should succeed */
9847             p = 0;
9848         } else {
9849             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9850             if (!p) {
9851                 return -TARGET_EFAULT;
9852             }
9853         }
9854         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9855         unlock_user(p, arg2, 0);
9856         return ret;
9857 #endif
9858     case TARGET_NR_getcwd:
9859         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9860             return -TARGET_EFAULT;
9861         ret = get_errno(sys_getcwd1(p, arg2));
9862         unlock_user(p, arg1, ret);
9863         return ret;
9864     case TARGET_NR_capget:
9865     case TARGET_NR_capset:
9866     {
9867         struct target_user_cap_header *target_header;
9868         struct target_user_cap_data *target_data = NULL;
9869         struct __user_cap_header_struct header;
9870         struct __user_cap_data_struct data[2];
9871         struct __user_cap_data_struct *dataptr = NULL;
9872         int i, target_datalen;
9873         int data_items = 1;
9874 
9875         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9876             return -TARGET_EFAULT;
9877         }
9878         header.version = tswap32(target_header->version);
9879         header.pid = tswap32(target_header->pid);
9880 
9881         if (header.version != _LINUX_CAPABILITY_VERSION) {
9882             /* Version 2 and up takes pointer to two user_data structs */
9883             data_items = 2;
9884         }
9885 
9886         target_datalen = sizeof(*target_data) * data_items;
9887 
9888         if (arg2) {
9889             if (num == TARGET_NR_capget) {
9890                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9891             } else {
9892                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9893             }
9894             if (!target_data) {
9895                 unlock_user_struct(target_header, arg1, 0);
9896                 return -TARGET_EFAULT;
9897             }
9898 
9899             if (num == TARGET_NR_capset) {
9900                 for (i = 0; i < data_items; i++) {
9901                     data[i].effective = tswap32(target_data[i].effective);
9902                     data[i].permitted = tswap32(target_data[i].permitted);
9903                     data[i].inheritable = tswap32(target_data[i].inheritable);
9904                 }
9905             }
9906 
9907             dataptr = data;
9908         }
9909 
9910         if (num == TARGET_NR_capget) {
9911             ret = get_errno(capget(&header, dataptr));
9912         } else {
9913             ret = get_errno(capset(&header, dataptr));
9914         }
9915 
9916         /* The kernel always updates version for both capget and capset */
9917         target_header->version = tswap32(header.version);
9918         unlock_user_struct(target_header, arg1, 1);
9919 
9920         if (arg2) {
9921             if (num == TARGET_NR_capget) {
9922                 for (i = 0; i < data_items; i++) {
9923                     target_data[i].effective = tswap32(data[i].effective);
9924                     target_data[i].permitted = tswap32(data[i].permitted);
9925                     target_data[i].inheritable = tswap32(data[i].inheritable);
9926                 }
9927                 unlock_user(target_data, arg2, target_datalen);
9928             } else {
9929                 unlock_user(target_data, arg2, 0);
9930             }
9931         }
9932         return ret;
9933     }
9934     case TARGET_NR_sigaltstack:
9935         return do_sigaltstack(arg1, arg2,
9936                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9937 
9938 #ifdef CONFIG_SENDFILE
9939 #ifdef TARGET_NR_sendfile
9940     case TARGET_NR_sendfile:
9941     {
9942         off_t *offp = NULL;
9943         off_t off;
9944         if (arg3) {
9945             ret = get_user_sal(off, arg3);
9946             if (is_error(ret)) {
9947                 return ret;
9948             }
9949             offp = &off;
9950         }
9951         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9952         if (!is_error(ret) && arg3) {
9953             abi_long ret2 = put_user_sal(off, arg3);
9954             if (is_error(ret2)) {
9955                 ret = ret2;
9956             }
9957         }
9958         return ret;
9959     }
9960 #endif
9961 #ifdef TARGET_NR_sendfile64
9962     case TARGET_NR_sendfile64:
9963     {
9964         off_t *offp = NULL;
9965         off_t off;
9966         if (arg3) {
9967             ret = get_user_s64(off, arg3);
9968             if (is_error(ret)) {
9969                 return ret;
9970             }
9971             offp = &off;
9972         }
9973         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9974         if (!is_error(ret) && arg3) {
9975             abi_long ret2 = put_user_s64(off, arg3);
9976             if (is_error(ret2)) {
9977                 ret = ret2;
9978             }
9979         }
9980         return ret;
9981     }
9982 #endif
9983 #endif
9984 #ifdef TARGET_NR_vfork
9985     case TARGET_NR_vfork:
9986         return get_errno(do_fork(cpu_env,
9987                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9988                          0, 0, 0, 0));
9989 #endif
9990 #ifdef TARGET_NR_ugetrlimit
9991     case TARGET_NR_ugetrlimit:
9992     {
9993 	struct rlimit rlim;
9994 	int resource = target_to_host_resource(arg1);
9995 	ret = get_errno(getrlimit(resource, &rlim));
9996 	if (!is_error(ret)) {
9997 	    struct target_rlimit *target_rlim;
9998             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9999                 return -TARGET_EFAULT;
10000 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10001 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10002             unlock_user_struct(target_rlim, arg2, 1);
10003 	}
10004         return ret;
10005     }
10006 #endif
10007 #ifdef TARGET_NR_truncate64
10008     case TARGET_NR_truncate64:
10009         if (!(p = lock_user_string(arg1)))
10010             return -TARGET_EFAULT;
10011 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10012         unlock_user(p, arg1, 0);
10013         return ret;
10014 #endif
10015 #ifdef TARGET_NR_ftruncate64
10016     case TARGET_NR_ftruncate64:
10017         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10018 #endif
10019 #ifdef TARGET_NR_stat64
10020     case TARGET_NR_stat64:
10021         if (!(p = lock_user_string(arg1))) {
10022             return -TARGET_EFAULT;
10023         }
10024         ret = get_errno(stat(path(p), &st));
10025         unlock_user(p, arg1, 0);
10026         if (!is_error(ret))
10027             ret = host_to_target_stat64(cpu_env, arg2, &st);
10028         return ret;
10029 #endif
10030 #ifdef TARGET_NR_lstat64
10031     case TARGET_NR_lstat64:
10032         if (!(p = lock_user_string(arg1))) {
10033             return -TARGET_EFAULT;
10034         }
10035         ret = get_errno(lstat(path(p), &st));
10036         unlock_user(p, arg1, 0);
10037         if (!is_error(ret))
10038             ret = host_to_target_stat64(cpu_env, arg2, &st);
10039         return ret;
10040 #endif
10041 #ifdef TARGET_NR_fstat64
10042     case TARGET_NR_fstat64:
10043         ret = get_errno(fstat(arg1, &st));
10044         if (!is_error(ret))
10045             ret = host_to_target_stat64(cpu_env, arg2, &st);
10046         return ret;
10047 #endif
10048 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10049 #ifdef TARGET_NR_fstatat64
10050     case TARGET_NR_fstatat64:
10051 #endif
10052 #ifdef TARGET_NR_newfstatat
10053     case TARGET_NR_newfstatat:
10054 #endif
10055         if (!(p = lock_user_string(arg2))) {
10056             return -TARGET_EFAULT;
10057         }
10058         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10059         unlock_user(p, arg2, 0);
10060         if (!is_error(ret))
10061             ret = host_to_target_stat64(cpu_env, arg3, &st);
10062         return ret;
10063 #endif
10064 #ifdef TARGET_NR_lchown
10065     case TARGET_NR_lchown:
10066         if (!(p = lock_user_string(arg1)))
10067             return -TARGET_EFAULT;
10068         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10069         unlock_user(p, arg1, 0);
10070         return ret;
10071 #endif
10072 #ifdef TARGET_NR_getuid
10073     case TARGET_NR_getuid:
10074         return get_errno(high2lowuid(getuid()));
10075 #endif
10076 #ifdef TARGET_NR_getgid
10077     case TARGET_NR_getgid:
10078         return get_errno(high2lowgid(getgid()));
10079 #endif
10080 #ifdef TARGET_NR_geteuid
10081     case TARGET_NR_geteuid:
10082         return get_errno(high2lowuid(geteuid()));
10083 #endif
10084 #ifdef TARGET_NR_getegid
10085     case TARGET_NR_getegid:
10086         return get_errno(high2lowgid(getegid()));
10087 #endif
10088     case TARGET_NR_setreuid:
10089         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10090     case TARGET_NR_setregid:
10091         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10092     case TARGET_NR_getgroups:
10093         {
10094             int gidsetsize = arg1;
10095             target_id *target_grouplist;
10096             gid_t *grouplist;
10097             int i;
10098 
10099             grouplist = alloca(gidsetsize * sizeof(gid_t));
10100             ret = get_errno(getgroups(gidsetsize, grouplist));
10101             if (gidsetsize == 0)
10102                 return ret;
10103             if (!is_error(ret)) {
10104                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10105                 if (!target_grouplist)
10106                     return -TARGET_EFAULT;
10107                 for(i = 0;i < ret; i++)
10108                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10109                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10110             }
10111         }
10112         return ret;
10113     case TARGET_NR_setgroups:
10114         {
10115             int gidsetsize = arg1;
10116             target_id *target_grouplist;
10117             gid_t *grouplist = NULL;
10118             int i;
10119             if (gidsetsize) {
10120                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10121                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10122                 if (!target_grouplist) {
10123                     return -TARGET_EFAULT;
10124                 }
10125                 for (i = 0; i < gidsetsize; i++) {
10126                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10127                 }
10128                 unlock_user(target_grouplist, arg2, 0);
10129             }
10130             return get_errno(setgroups(gidsetsize, grouplist));
10131         }
10132     case TARGET_NR_fchown:
10133         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10134 #if defined(TARGET_NR_fchownat)
10135     case TARGET_NR_fchownat:
10136         if (!(p = lock_user_string(arg2)))
10137             return -TARGET_EFAULT;
10138         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10139                                  low2highgid(arg4), arg5));
10140         unlock_user(p, arg2, 0);
10141         return ret;
10142 #endif
10143 #ifdef TARGET_NR_setresuid
10144     case TARGET_NR_setresuid:
10145         return get_errno(sys_setresuid(low2highuid(arg1),
10146                                        low2highuid(arg2),
10147                                        low2highuid(arg3)));
10148 #endif
10149 #ifdef TARGET_NR_getresuid
10150     case TARGET_NR_getresuid:
10151         {
10152             uid_t ruid, euid, suid;
10153             ret = get_errno(getresuid(&ruid, &euid, &suid));
10154             if (!is_error(ret)) {
10155                 if (put_user_id(high2lowuid(ruid), arg1)
10156                     || put_user_id(high2lowuid(euid), arg2)
10157                     || put_user_id(high2lowuid(suid), arg3))
10158                     return -TARGET_EFAULT;
10159             }
10160         }
10161         return ret;
10162 #endif
10163 #ifdef TARGET_NR_getresgid
10164     case TARGET_NR_setresgid:
10165         return get_errno(sys_setresgid(low2highgid(arg1),
10166                                        low2highgid(arg2),
10167                                        low2highgid(arg3)));
10168 #endif
10169 #ifdef TARGET_NR_getresgid
10170     case TARGET_NR_getresgid:
10171         {
10172             gid_t rgid, egid, sgid;
10173             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10174             if (!is_error(ret)) {
10175                 if (put_user_id(high2lowgid(rgid), arg1)
10176                     || put_user_id(high2lowgid(egid), arg2)
10177                     || put_user_id(high2lowgid(sgid), arg3))
10178                     return -TARGET_EFAULT;
10179             }
10180         }
10181         return ret;
10182 #endif
10183 #ifdef TARGET_NR_chown
10184     case TARGET_NR_chown:
10185         if (!(p = lock_user_string(arg1)))
10186             return -TARGET_EFAULT;
10187         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10188         unlock_user(p, arg1, 0);
10189         return ret;
10190 #endif
10191     case TARGET_NR_setuid:
10192         return get_errno(sys_setuid(low2highuid(arg1)));
10193     case TARGET_NR_setgid:
10194         return get_errno(sys_setgid(low2highgid(arg1)));
10195     case TARGET_NR_setfsuid:
10196         return get_errno(setfsuid(arg1));
10197     case TARGET_NR_setfsgid:
10198         return get_errno(setfsgid(arg1));
10199 
10200 #ifdef TARGET_NR_lchown32
10201     case TARGET_NR_lchown32:
10202         if (!(p = lock_user_string(arg1)))
10203             return -TARGET_EFAULT;
10204         ret = get_errno(lchown(p, arg2, arg3));
10205         unlock_user(p, arg1, 0);
10206         return ret;
10207 #endif
10208 #ifdef TARGET_NR_getuid32
10209     case TARGET_NR_getuid32:
10210         return get_errno(getuid());
10211 #endif
10212 
10213 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10214    /* Alpha specific */
10215     case TARGET_NR_getxuid:
10216          {
10217             uid_t euid;
10218             euid=geteuid();
10219             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10220          }
10221         return get_errno(getuid());
10222 #endif
10223 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10224    /* Alpha specific */
10225     case TARGET_NR_getxgid:
10226          {
10227             uid_t egid;
10228             egid=getegid();
10229             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10230          }
10231         return get_errno(getgid());
10232 #endif
10233 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10234     /* Alpha specific */
10235     case TARGET_NR_osf_getsysinfo:
10236         ret = -TARGET_EOPNOTSUPP;
10237         switch (arg1) {
10238           case TARGET_GSI_IEEE_FP_CONTROL:
10239             {
10240                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10241                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10242 
10243                 swcr &= ~SWCR_STATUS_MASK;
10244                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10245 
10246                 if (put_user_u64 (swcr, arg2))
10247                         return -TARGET_EFAULT;
10248                 ret = 0;
10249             }
10250             break;
10251 
10252           /* case GSI_IEEE_STATE_AT_SIGNAL:
10253              -- Not implemented in linux kernel.
10254              case GSI_UACPROC:
10255              -- Retrieves current unaligned access state; not much used.
10256              case GSI_PROC_TYPE:
10257              -- Retrieves implver information; surely not used.
10258              case GSI_GET_HWRPB:
10259              -- Grabs a copy of the HWRPB; surely not used.
10260           */
10261         }
10262         return ret;
10263 #endif
10264 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10265     /* Alpha specific */
10266     case TARGET_NR_osf_setsysinfo:
10267         ret = -TARGET_EOPNOTSUPP;
10268         switch (arg1) {
10269           case TARGET_SSI_IEEE_FP_CONTROL:
10270             {
10271                 uint64_t swcr, fpcr;
10272 
10273                 if (get_user_u64 (swcr, arg2)) {
10274                     return -TARGET_EFAULT;
10275                 }
10276 
10277                 /*
10278                  * The kernel calls swcr_update_status to update the
10279                  * status bits from the fpcr at every point that it
10280                  * could be queried.  Therefore, we store the status
10281                  * bits only in FPCR.
10282                  */
10283                 ((CPUAlphaState *)cpu_env)->swcr
10284                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10285 
10286                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10287                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10288                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10289                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10290                 ret = 0;
10291             }
10292             break;
10293 
10294           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10295             {
10296                 uint64_t exc, fpcr, fex;
10297 
10298                 if (get_user_u64(exc, arg2)) {
10299                     return -TARGET_EFAULT;
10300                 }
10301                 exc &= SWCR_STATUS_MASK;
10302                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10303 
10304                 /* Old exceptions are not signaled.  */
10305                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10306                 fex = exc & ~fex;
10307                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10308                 fex &= ((CPUArchState *)cpu_env)->swcr;
10309 
10310                 /* Update the hardware fpcr.  */
10311                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10312                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10313 
10314                 if (fex) {
10315                     int si_code = TARGET_FPE_FLTUNK;
10316                     target_siginfo_t info;
10317 
10318                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10319                         si_code = TARGET_FPE_FLTUND;
10320                     }
10321                     if (fex & SWCR_TRAP_ENABLE_INE) {
10322                         si_code = TARGET_FPE_FLTRES;
10323                     }
10324                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10325                         si_code = TARGET_FPE_FLTUND;
10326                     }
10327                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10328                         si_code = TARGET_FPE_FLTOVF;
10329                     }
10330                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10331                         si_code = TARGET_FPE_FLTDIV;
10332                     }
10333                     if (fex & SWCR_TRAP_ENABLE_INV) {
10334                         si_code = TARGET_FPE_FLTINV;
10335                     }
10336 
10337                     info.si_signo = SIGFPE;
10338                     info.si_errno = 0;
10339                     info.si_code = si_code;
10340                     info._sifields._sigfault._addr
10341                         = ((CPUArchState *)cpu_env)->pc;
10342                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10343                                  QEMU_SI_FAULT, &info);
10344                 }
10345                 ret = 0;
10346             }
10347             break;
10348 
10349           /* case SSI_NVPAIRS:
10350              -- Used with SSIN_UACPROC to enable unaligned accesses.
10351              case SSI_IEEE_STATE_AT_SIGNAL:
10352              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10353              -- Not implemented in linux kernel
10354           */
10355         }
10356         return ret;
10357 #endif
10358 #ifdef TARGET_NR_osf_sigprocmask
10359     /* Alpha specific.  */
10360     case TARGET_NR_osf_sigprocmask:
10361         {
10362             abi_ulong mask;
10363             int how;
10364             sigset_t set, oldset;
10365 
10366             switch(arg1) {
10367             case TARGET_SIG_BLOCK:
10368                 how = SIG_BLOCK;
10369                 break;
10370             case TARGET_SIG_UNBLOCK:
10371                 how = SIG_UNBLOCK;
10372                 break;
10373             case TARGET_SIG_SETMASK:
10374                 how = SIG_SETMASK;
10375                 break;
10376             default:
10377                 return -TARGET_EINVAL;
10378             }
10379             mask = arg2;
10380             target_to_host_old_sigset(&set, &mask);
10381             ret = do_sigprocmask(how, &set, &oldset);
10382             if (!ret) {
10383                 host_to_target_old_sigset(&mask, &oldset);
10384                 ret = mask;
10385             }
10386         }
10387         return ret;
10388 #endif
10389 
10390 #ifdef TARGET_NR_getgid32
10391     case TARGET_NR_getgid32:
10392         return get_errno(getgid());
10393 #endif
10394 #ifdef TARGET_NR_geteuid32
10395     case TARGET_NR_geteuid32:
10396         return get_errno(geteuid());
10397 #endif
10398 #ifdef TARGET_NR_getegid32
10399     case TARGET_NR_getegid32:
10400         return get_errno(getegid());
10401 #endif
10402 #ifdef TARGET_NR_setreuid32
10403     case TARGET_NR_setreuid32:
10404         return get_errno(setreuid(arg1, arg2));
10405 #endif
10406 #ifdef TARGET_NR_setregid32
10407     case TARGET_NR_setregid32:
10408         return get_errno(setregid(arg1, arg2));
10409 #endif
10410 #ifdef TARGET_NR_getgroups32
10411     case TARGET_NR_getgroups32:
10412         {
10413             int gidsetsize = arg1;
10414             uint32_t *target_grouplist;
10415             gid_t *grouplist;
10416             int i;
10417 
10418             grouplist = alloca(gidsetsize * sizeof(gid_t));
10419             ret = get_errno(getgroups(gidsetsize, grouplist));
10420             if (gidsetsize == 0)
10421                 return ret;
10422             if (!is_error(ret)) {
10423                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10424                 if (!target_grouplist) {
10425                     return -TARGET_EFAULT;
10426                 }
10427                 for(i = 0;i < ret; i++)
10428                     target_grouplist[i] = tswap32(grouplist[i]);
10429                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10430             }
10431         }
10432         return ret;
10433 #endif
10434 #ifdef TARGET_NR_setgroups32
10435     case TARGET_NR_setgroups32:
10436         {
10437             int gidsetsize = arg1;
10438             uint32_t *target_grouplist;
10439             gid_t *grouplist;
10440             int i;
10441 
10442             grouplist = alloca(gidsetsize * sizeof(gid_t));
10443             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10444             if (!target_grouplist) {
10445                 return -TARGET_EFAULT;
10446             }
10447             for(i = 0;i < gidsetsize; i++)
10448                 grouplist[i] = tswap32(target_grouplist[i]);
10449             unlock_user(target_grouplist, arg2, 0);
10450             return get_errno(setgroups(gidsetsize, grouplist));
10451         }
10452 #endif
10453 #ifdef TARGET_NR_fchown32
10454     case TARGET_NR_fchown32:
10455         return get_errno(fchown(arg1, arg2, arg3));
10456 #endif
10457 #ifdef TARGET_NR_setresuid32
10458     case TARGET_NR_setresuid32:
10459         return get_errno(sys_setresuid(arg1, arg2, arg3));
10460 #endif
10461 #ifdef TARGET_NR_getresuid32
10462     case TARGET_NR_getresuid32:
10463         {
10464             uid_t ruid, euid, suid;
10465             ret = get_errno(getresuid(&ruid, &euid, &suid));
10466             if (!is_error(ret)) {
10467                 if (put_user_u32(ruid, arg1)
10468                     || put_user_u32(euid, arg2)
10469                     || put_user_u32(suid, arg3))
10470                     return -TARGET_EFAULT;
10471             }
10472         }
10473         return ret;
10474 #endif
10475 #ifdef TARGET_NR_setresgid32
10476     case TARGET_NR_setresgid32:
10477         return get_errno(sys_setresgid(arg1, arg2, arg3));
10478 #endif
10479 #ifdef TARGET_NR_getresgid32
10480     case TARGET_NR_getresgid32:
10481         {
10482             gid_t rgid, egid, sgid;
10483             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10484             if (!is_error(ret)) {
10485                 if (put_user_u32(rgid, arg1)
10486                     || put_user_u32(egid, arg2)
10487                     || put_user_u32(sgid, arg3))
10488                     return -TARGET_EFAULT;
10489             }
10490         }
10491         return ret;
10492 #endif
10493 #ifdef TARGET_NR_chown32
10494     case TARGET_NR_chown32:
10495         if (!(p = lock_user_string(arg1)))
10496             return -TARGET_EFAULT;
10497         ret = get_errno(chown(p, arg2, arg3));
10498         unlock_user(p, arg1, 0);
10499         return ret;
10500 #endif
10501 #ifdef TARGET_NR_setuid32
10502     case TARGET_NR_setuid32:
10503         return get_errno(sys_setuid(arg1));
10504 #endif
10505 #ifdef TARGET_NR_setgid32
10506     case TARGET_NR_setgid32:
10507         return get_errno(sys_setgid(arg1));
10508 #endif
10509 #ifdef TARGET_NR_setfsuid32
10510     case TARGET_NR_setfsuid32:
10511         return get_errno(setfsuid(arg1));
10512 #endif
10513 #ifdef TARGET_NR_setfsgid32
10514     case TARGET_NR_setfsgid32:
10515         return get_errno(setfsgid(arg1));
10516 #endif
10517 #ifdef TARGET_NR_mincore
10518     case TARGET_NR_mincore:
10519         {
10520             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10521             if (!a) {
10522                 return -TARGET_ENOMEM;
10523             }
10524             p = lock_user_string(arg3);
10525             if (!p) {
10526                 ret = -TARGET_EFAULT;
10527             } else {
10528                 ret = get_errno(mincore(a, arg2, p));
10529                 unlock_user(p, arg3, ret);
10530             }
10531             unlock_user(a, arg1, 0);
10532         }
10533         return ret;
10534 #endif
10535 #ifdef TARGET_NR_arm_fadvise64_64
10536     case TARGET_NR_arm_fadvise64_64:
10537         /* arm_fadvise64_64 looks like fadvise64_64 but
10538          * with different argument order: fd, advice, offset, len
10539          * rather than the usual fd, offset, len, advice.
10540          * Note that offset and len are both 64-bit so appear as
10541          * pairs of 32-bit registers.
10542          */
10543         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10544                             target_offset64(arg5, arg6), arg2);
10545         return -host_to_target_errno(ret);
10546 #endif
10547 
10548 #if TARGET_ABI_BITS == 32
10549 
10550 #ifdef TARGET_NR_fadvise64_64
10551     case TARGET_NR_fadvise64_64:
10552 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10553         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10554         ret = arg2;
10555         arg2 = arg3;
10556         arg3 = arg4;
10557         arg4 = arg5;
10558         arg5 = arg6;
10559         arg6 = ret;
10560 #else
10561         /* 6 args: fd, offset (high, low), len (high, low), advice */
10562         if (regpairs_aligned(cpu_env, num)) {
10563             /* offset is in (3,4), len in (5,6) and advice in 7 */
10564             arg2 = arg3;
10565             arg3 = arg4;
10566             arg4 = arg5;
10567             arg5 = arg6;
10568             arg6 = arg7;
10569         }
10570 #endif
10571         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10572                             target_offset64(arg4, arg5), arg6);
10573         return -host_to_target_errno(ret);
10574 #endif
10575 
10576 #ifdef TARGET_NR_fadvise64
10577     case TARGET_NR_fadvise64:
10578         /* 5 args: fd, offset (high, low), len, advice */
10579         if (regpairs_aligned(cpu_env, num)) {
10580             /* offset is in (3,4), len in 5 and advice in 6 */
10581             arg2 = arg3;
10582             arg3 = arg4;
10583             arg4 = arg5;
10584             arg5 = arg6;
10585         }
10586         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10587         return -host_to_target_errno(ret);
10588 #endif
10589 
10590 #else /* not a 32-bit ABI */
10591 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10592 #ifdef TARGET_NR_fadvise64_64
10593     case TARGET_NR_fadvise64_64:
10594 #endif
10595 #ifdef TARGET_NR_fadvise64
10596     case TARGET_NR_fadvise64:
10597 #endif
10598 #ifdef TARGET_S390X
10599         switch (arg4) {
10600         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10601         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10602         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10603         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10604         default: break;
10605         }
10606 #endif
10607         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10608 #endif
10609 #endif /* end of 64-bit ABI fadvise handling */
10610 
10611 #ifdef TARGET_NR_madvise
10612     case TARGET_NR_madvise:
10613         /* A straight passthrough may not be safe because qemu sometimes
10614            turns private file-backed mappings into anonymous mappings.
10615            This will break MADV_DONTNEED.
10616            This is a hint, so ignoring and returning success is ok.  */
10617         return 0;
10618 #endif
10619 #if TARGET_ABI_BITS == 32
10620     case TARGET_NR_fcntl64:
10621     {
10622 	int cmd;
10623 	struct flock64 fl;
10624         from_flock64_fn *copyfrom = copy_from_user_flock64;
10625         to_flock64_fn *copyto = copy_to_user_flock64;
10626 
10627 #ifdef TARGET_ARM
10628         if (!((CPUARMState *)cpu_env)->eabi) {
10629             copyfrom = copy_from_user_oabi_flock64;
10630             copyto = copy_to_user_oabi_flock64;
10631         }
10632 #endif
10633 
10634 	cmd = target_to_host_fcntl_cmd(arg2);
10635         if (cmd == -TARGET_EINVAL) {
10636             return cmd;
10637         }
10638 
10639         switch(arg2) {
10640         case TARGET_F_GETLK64:
10641             ret = copyfrom(&fl, arg3);
10642             if (ret) {
10643                 break;
10644             }
10645             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10646             if (ret == 0) {
10647                 ret = copyto(arg3, &fl);
10648             }
10649 	    break;
10650 
10651         case TARGET_F_SETLK64:
10652         case TARGET_F_SETLKW64:
10653             ret = copyfrom(&fl, arg3);
10654             if (ret) {
10655                 break;
10656             }
10657             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10658 	    break;
10659         default:
10660             ret = do_fcntl(arg1, arg2, arg3);
10661             break;
10662         }
10663         return ret;
10664     }
10665 #endif
10666 #ifdef TARGET_NR_cacheflush
10667     case TARGET_NR_cacheflush:
10668         /* self-modifying code is handled automatically, so nothing needed */
10669         return 0;
10670 #endif
10671 #ifdef TARGET_NR_getpagesize
10672     case TARGET_NR_getpagesize:
10673         return TARGET_PAGE_SIZE;
10674 #endif
10675     case TARGET_NR_gettid:
10676         return get_errno(sys_gettid());
10677 #ifdef TARGET_NR_readahead
10678     case TARGET_NR_readahead:
10679 #if TARGET_ABI_BITS == 32
10680         if (regpairs_aligned(cpu_env, num)) {
10681             arg2 = arg3;
10682             arg3 = arg4;
10683             arg4 = arg5;
10684         }
10685         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10686 #else
10687         ret = get_errno(readahead(arg1, arg2, arg3));
10688 #endif
10689         return ret;
10690 #endif
10691 #ifdef CONFIG_ATTR
10692 #ifdef TARGET_NR_setxattr
10693     case TARGET_NR_listxattr:
10694     case TARGET_NR_llistxattr:
10695     {
10696         void *p, *b = 0;
10697         if (arg2) {
10698             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10699             if (!b) {
10700                 return -TARGET_EFAULT;
10701             }
10702         }
10703         p = lock_user_string(arg1);
10704         if (p) {
10705             if (num == TARGET_NR_listxattr) {
10706                 ret = get_errno(listxattr(p, b, arg3));
10707             } else {
10708                 ret = get_errno(llistxattr(p, b, arg3));
10709             }
10710         } else {
10711             ret = -TARGET_EFAULT;
10712         }
10713         unlock_user(p, arg1, 0);
10714         unlock_user(b, arg2, arg3);
10715         return ret;
10716     }
10717     case TARGET_NR_flistxattr:
10718     {
10719         void *b = 0;
10720         if (arg2) {
10721             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10722             if (!b) {
10723                 return -TARGET_EFAULT;
10724             }
10725         }
10726         ret = get_errno(flistxattr(arg1, b, arg3));
10727         unlock_user(b, arg2, arg3);
10728         return ret;
10729     }
10730     case TARGET_NR_setxattr:
10731     case TARGET_NR_lsetxattr:
10732         {
10733             void *p, *n, *v = 0;
10734             if (arg3) {
10735                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10736                 if (!v) {
10737                     return -TARGET_EFAULT;
10738                 }
10739             }
10740             p = lock_user_string(arg1);
10741             n = lock_user_string(arg2);
10742             if (p && n) {
10743                 if (num == TARGET_NR_setxattr) {
10744                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10745                 } else {
10746                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10747                 }
10748             } else {
10749                 ret = -TARGET_EFAULT;
10750             }
10751             unlock_user(p, arg1, 0);
10752             unlock_user(n, arg2, 0);
10753             unlock_user(v, arg3, 0);
10754         }
10755         return ret;
10756     case TARGET_NR_fsetxattr:
10757         {
10758             void *n, *v = 0;
10759             if (arg3) {
10760                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10761                 if (!v) {
10762                     return -TARGET_EFAULT;
10763                 }
10764             }
10765             n = lock_user_string(arg2);
10766             if (n) {
10767                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10768             } else {
10769                 ret = -TARGET_EFAULT;
10770             }
10771             unlock_user(n, arg2, 0);
10772             unlock_user(v, arg3, 0);
10773         }
10774         return ret;
10775     case TARGET_NR_getxattr:
10776     case TARGET_NR_lgetxattr:
10777         {
10778             void *p, *n, *v = 0;
10779             if (arg3) {
10780                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10781                 if (!v) {
10782                     return -TARGET_EFAULT;
10783                 }
10784             }
10785             p = lock_user_string(arg1);
10786             n = lock_user_string(arg2);
10787             if (p && n) {
10788                 if (num == TARGET_NR_getxattr) {
10789                     ret = get_errno(getxattr(p, n, v, arg4));
10790                 } else {
10791                     ret = get_errno(lgetxattr(p, n, v, arg4));
10792                 }
10793             } else {
10794                 ret = -TARGET_EFAULT;
10795             }
10796             unlock_user(p, arg1, 0);
10797             unlock_user(n, arg2, 0);
10798             unlock_user(v, arg3, arg4);
10799         }
10800         return ret;
10801     case TARGET_NR_fgetxattr:
10802         {
10803             void *n, *v = 0;
10804             if (arg3) {
10805                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10806                 if (!v) {
10807                     return -TARGET_EFAULT;
10808                 }
10809             }
10810             n = lock_user_string(arg2);
10811             if (n) {
10812                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10813             } else {
10814                 ret = -TARGET_EFAULT;
10815             }
10816             unlock_user(n, arg2, 0);
10817             unlock_user(v, arg3, arg4);
10818         }
10819         return ret;
10820     case TARGET_NR_removexattr:
10821     case TARGET_NR_lremovexattr:
10822         {
10823             void *p, *n;
10824             p = lock_user_string(arg1);
10825             n = lock_user_string(arg2);
10826             if (p && n) {
10827                 if (num == TARGET_NR_removexattr) {
10828                     ret = get_errno(removexattr(p, n));
10829                 } else {
10830                     ret = get_errno(lremovexattr(p, n));
10831                 }
10832             } else {
10833                 ret = -TARGET_EFAULT;
10834             }
10835             unlock_user(p, arg1, 0);
10836             unlock_user(n, arg2, 0);
10837         }
10838         return ret;
10839     case TARGET_NR_fremovexattr:
10840         {
10841             void *n;
10842             n = lock_user_string(arg2);
10843             if (n) {
10844                 ret = get_errno(fremovexattr(arg1, n));
10845             } else {
10846                 ret = -TARGET_EFAULT;
10847             }
10848             unlock_user(n, arg2, 0);
10849         }
10850         return ret;
10851 #endif
10852 #endif /* CONFIG_ATTR */
10853 #ifdef TARGET_NR_set_thread_area
10854     case TARGET_NR_set_thread_area:
10855 #if defined(TARGET_MIPS)
10856       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10857       return 0;
10858 #elif defined(TARGET_CRIS)
10859       if (arg1 & 0xff)
10860           ret = -TARGET_EINVAL;
10861       else {
10862           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10863           ret = 0;
10864       }
10865       return ret;
10866 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10867       return do_set_thread_area(cpu_env, arg1);
10868 #elif defined(TARGET_M68K)
10869       {
10870           TaskState *ts = cpu->opaque;
10871           ts->tp_value = arg1;
10872           return 0;
10873       }
10874 #else
10875       return -TARGET_ENOSYS;
10876 #endif
10877 #endif
10878 #ifdef TARGET_NR_get_thread_area
10879     case TARGET_NR_get_thread_area:
10880 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10881         return do_get_thread_area(cpu_env, arg1);
10882 #elif defined(TARGET_M68K)
10883         {
10884             TaskState *ts = cpu->opaque;
10885             return ts->tp_value;
10886         }
10887 #else
10888         return -TARGET_ENOSYS;
10889 #endif
10890 #endif
10891 #ifdef TARGET_NR_getdomainname
10892     case TARGET_NR_getdomainname:
10893         return -TARGET_ENOSYS;
10894 #endif
10895 
10896 #ifdef TARGET_NR_clock_settime
10897     case TARGET_NR_clock_settime:
10898     {
10899         struct timespec ts;
10900 
10901         ret = target_to_host_timespec(&ts, arg2);
10902         if (!is_error(ret)) {
10903             ret = get_errno(clock_settime(arg1, &ts));
10904         }
10905         return ret;
10906     }
10907 #endif
10908 #ifdef TARGET_NR_clock_gettime
10909     case TARGET_NR_clock_gettime:
10910     {
10911         struct timespec ts;
10912         ret = get_errno(clock_gettime(arg1, &ts));
10913         if (!is_error(ret)) {
10914             ret = host_to_target_timespec(arg2, &ts);
10915         }
10916         return ret;
10917     }
10918 #endif
10919 #ifdef TARGET_NR_clock_getres
10920     case TARGET_NR_clock_getres:
10921     {
10922         struct timespec ts;
10923         ret = get_errno(clock_getres(arg1, &ts));
10924         if (!is_error(ret)) {
10925             host_to_target_timespec(arg2, &ts);
10926         }
10927         return ret;
10928     }
10929 #endif
10930 #ifdef TARGET_NR_clock_nanosleep
10931     case TARGET_NR_clock_nanosleep:
10932     {
10933         struct timespec ts;
10934         target_to_host_timespec(&ts, arg3);
10935         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10936                                              &ts, arg4 ? &ts : NULL));
10937         if (arg4)
10938             host_to_target_timespec(arg4, &ts);
10939 
10940 #if defined(TARGET_PPC)
10941         /* clock_nanosleep is odd in that it returns positive errno values.
10942          * On PPC, CR0 bit 3 should be set in such a situation. */
10943         if (ret && ret != -TARGET_ERESTARTSYS) {
10944             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10945         }
10946 #endif
10947         return ret;
10948     }
10949 #endif
10950 
10951 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10952     case TARGET_NR_set_tid_address:
10953         return get_errno(set_tid_address((int *)g2h(arg1)));
10954 #endif
10955 
10956     case TARGET_NR_tkill:
10957         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10958 
10959     case TARGET_NR_tgkill:
10960         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10961                          target_to_host_signal(arg3)));
10962 
10963 #ifdef TARGET_NR_set_robust_list
10964     case TARGET_NR_set_robust_list:
10965     case TARGET_NR_get_robust_list:
10966         /* The ABI for supporting robust futexes has userspace pass
10967          * the kernel a pointer to a linked list which is updated by
10968          * userspace after the syscall; the list is walked by the kernel
10969          * when the thread exits. Since the linked list in QEMU guest
10970          * memory isn't a valid linked list for the host and we have
10971          * no way to reliably intercept the thread-death event, we can't
10972          * support these. Silently return ENOSYS so that guest userspace
10973          * falls back to a non-robust futex implementation (which should
10974          * be OK except in the corner case of the guest crashing while
10975          * holding a mutex that is shared with another process via
10976          * shared memory).
10977          */
10978         return -TARGET_ENOSYS;
10979 #endif
10980 
10981 #if defined(TARGET_NR_utimensat)
10982     case TARGET_NR_utimensat:
10983         {
10984             struct timespec *tsp, ts[2];
10985             if (!arg3) {
10986                 tsp = NULL;
10987             } else {
10988                 target_to_host_timespec(ts, arg3);
10989                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10990                 tsp = ts;
10991             }
10992             if (!arg2)
10993                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10994             else {
10995                 if (!(p = lock_user_string(arg2))) {
10996                     return -TARGET_EFAULT;
10997                 }
10998                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10999                 unlock_user(p, arg2, 0);
11000             }
11001         }
11002         return ret;
11003 #endif
11004     case TARGET_NR_futex:
11005         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11006 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11007     case TARGET_NR_inotify_init:
11008         ret = get_errno(sys_inotify_init());
11009         if (ret >= 0) {
11010             fd_trans_register(ret, &target_inotify_trans);
11011         }
11012         return ret;
11013 #endif
11014 #ifdef CONFIG_INOTIFY1
11015 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11016     case TARGET_NR_inotify_init1:
11017         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11018                                           fcntl_flags_tbl)));
11019         if (ret >= 0) {
11020             fd_trans_register(ret, &target_inotify_trans);
11021         }
11022         return ret;
11023 #endif
11024 #endif
11025 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11026     case TARGET_NR_inotify_add_watch:
11027         p = lock_user_string(arg2);
11028         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11029         unlock_user(p, arg2, 0);
11030         return ret;
11031 #endif
11032 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11033     case TARGET_NR_inotify_rm_watch:
11034         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11035 #endif
11036 
11037 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11038     case TARGET_NR_mq_open:
11039         {
11040             struct mq_attr posix_mq_attr;
11041             struct mq_attr *pposix_mq_attr;
11042             int host_flags;
11043 
11044             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11045             pposix_mq_attr = NULL;
11046             if (arg4) {
11047                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11048                     return -TARGET_EFAULT;
11049                 }
11050                 pposix_mq_attr = &posix_mq_attr;
11051             }
11052             p = lock_user_string(arg1 - 1);
11053             if (!p) {
11054                 return -TARGET_EFAULT;
11055             }
11056             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11057             unlock_user (p, arg1, 0);
11058         }
11059         return ret;
11060 
11061     case TARGET_NR_mq_unlink:
11062         p = lock_user_string(arg1 - 1);
11063         if (!p) {
11064             return -TARGET_EFAULT;
11065         }
11066         ret = get_errno(mq_unlink(p));
11067         unlock_user (p, arg1, 0);
11068         return ret;
11069 
11070     case TARGET_NR_mq_timedsend:
11071         {
11072             struct timespec ts;
11073 
11074             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11075             if (arg5 != 0) {
11076                 target_to_host_timespec(&ts, arg5);
11077                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11078                 host_to_target_timespec(arg5, &ts);
11079             } else {
11080                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11081             }
11082             unlock_user (p, arg2, arg3);
11083         }
11084         return ret;
11085 
11086     case TARGET_NR_mq_timedreceive:
11087         {
11088             struct timespec ts;
11089             unsigned int prio;
11090 
11091             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11092             if (arg5 != 0) {
11093                 target_to_host_timespec(&ts, arg5);
11094                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11095                                                      &prio, &ts));
11096                 host_to_target_timespec(arg5, &ts);
11097             } else {
11098                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11099                                                      &prio, NULL));
11100             }
11101             unlock_user (p, arg2, arg3);
11102             if (arg4 != 0)
11103                 put_user_u32(prio, arg4);
11104         }
11105         return ret;
11106 
11107     /* Not implemented for now... */
11108 /*     case TARGET_NR_mq_notify: */
11109 /*         break; */
11110 
11111     case TARGET_NR_mq_getsetattr:
11112         {
11113             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11114             ret = 0;
11115             if (arg2 != 0) {
11116                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11117                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11118                                            &posix_mq_attr_out));
11119             } else if (arg3 != 0) {
11120                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11121             }
11122             if (ret == 0 && arg3 != 0) {
11123                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11124             }
11125         }
11126         return ret;
11127 #endif
11128 
11129 #ifdef CONFIG_SPLICE
11130 #ifdef TARGET_NR_tee
11131     case TARGET_NR_tee:
11132         {
11133             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11134         }
11135         return ret;
11136 #endif
11137 #ifdef TARGET_NR_splice
11138     case TARGET_NR_splice:
11139         {
11140             loff_t loff_in, loff_out;
11141             loff_t *ploff_in = NULL, *ploff_out = NULL;
11142             if (arg2) {
11143                 if (get_user_u64(loff_in, arg2)) {
11144                     return -TARGET_EFAULT;
11145                 }
11146                 ploff_in = &loff_in;
11147             }
11148             if (arg4) {
11149                 if (get_user_u64(loff_out, arg4)) {
11150                     return -TARGET_EFAULT;
11151                 }
11152                 ploff_out = &loff_out;
11153             }
11154             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11155             if (arg2) {
11156                 if (put_user_u64(loff_in, arg2)) {
11157                     return -TARGET_EFAULT;
11158                 }
11159             }
11160             if (arg4) {
11161                 if (put_user_u64(loff_out, arg4)) {
11162                     return -TARGET_EFAULT;
11163                 }
11164             }
11165         }
11166         return ret;
11167 #endif
11168 #ifdef TARGET_NR_vmsplice
11169 	case TARGET_NR_vmsplice:
11170         {
11171             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11172             if (vec != NULL) {
11173                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11174                 unlock_iovec(vec, arg2, arg3, 0);
11175             } else {
11176                 ret = -host_to_target_errno(errno);
11177             }
11178         }
11179         return ret;
11180 #endif
11181 #endif /* CONFIG_SPLICE */
11182 #ifdef CONFIG_EVENTFD
11183 #if defined(TARGET_NR_eventfd)
11184     case TARGET_NR_eventfd:
11185         ret = get_errno(eventfd(arg1, 0));
11186         if (ret >= 0) {
11187             fd_trans_register(ret, &target_eventfd_trans);
11188         }
11189         return ret;
11190 #endif
11191 #if defined(TARGET_NR_eventfd2)
11192     case TARGET_NR_eventfd2:
11193     {
11194         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11195         if (arg2 & TARGET_O_NONBLOCK) {
11196             host_flags |= O_NONBLOCK;
11197         }
11198         if (arg2 & TARGET_O_CLOEXEC) {
11199             host_flags |= O_CLOEXEC;
11200         }
11201         ret = get_errno(eventfd(arg1, host_flags));
11202         if (ret >= 0) {
11203             fd_trans_register(ret, &target_eventfd_trans);
11204         }
11205         return ret;
11206     }
11207 #endif
11208 #endif /* CONFIG_EVENTFD  */
11209 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11210     case TARGET_NR_fallocate:
11211 #if TARGET_ABI_BITS == 32
11212         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11213                                   target_offset64(arg5, arg6)));
11214 #else
11215         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11216 #endif
11217         return ret;
11218 #endif
11219 #if defined(CONFIG_SYNC_FILE_RANGE)
11220 #if defined(TARGET_NR_sync_file_range)
11221     case TARGET_NR_sync_file_range:
11222 #if TARGET_ABI_BITS == 32
11223 #if defined(TARGET_MIPS)
11224         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11225                                         target_offset64(arg5, arg6), arg7));
11226 #else
11227         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11228                                         target_offset64(arg4, arg5), arg6));
11229 #endif /* !TARGET_MIPS */
11230 #else
11231         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11232 #endif
11233         return ret;
11234 #endif
11235 #if defined(TARGET_NR_sync_file_range2)
11236     case TARGET_NR_sync_file_range2:
11237         /* This is like sync_file_range but the arguments are reordered */
11238 #if TARGET_ABI_BITS == 32
11239         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11240                                         target_offset64(arg5, arg6), arg2));
11241 #else
11242         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11243 #endif
11244         return ret;
11245 #endif
11246 #endif
11247 #if defined(TARGET_NR_signalfd4)
11248     case TARGET_NR_signalfd4:
11249         return do_signalfd4(arg1, arg2, arg4);
11250 #endif
11251 #if defined(TARGET_NR_signalfd)
11252     case TARGET_NR_signalfd:
11253         return do_signalfd4(arg1, arg2, 0);
11254 #endif
11255 #if defined(CONFIG_EPOLL)
11256 #if defined(TARGET_NR_epoll_create)
11257     case TARGET_NR_epoll_create:
11258         return get_errno(epoll_create(arg1));
11259 #endif
11260 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11261     case TARGET_NR_epoll_create1:
11262         return get_errno(epoll_create1(arg1));
11263 #endif
11264 #if defined(TARGET_NR_epoll_ctl)
11265     case TARGET_NR_epoll_ctl:
11266     {
11267         struct epoll_event ep;
11268         struct epoll_event *epp = 0;
11269         if (arg4) {
11270             struct target_epoll_event *target_ep;
11271             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11272                 return -TARGET_EFAULT;
11273             }
11274             ep.events = tswap32(target_ep->events);
11275             /* The epoll_data_t union is just opaque data to the kernel,
11276              * so we transfer all 64 bits across and need not worry what
11277              * actual data type it is.
11278              */
11279             ep.data.u64 = tswap64(target_ep->data.u64);
11280             unlock_user_struct(target_ep, arg4, 0);
11281             epp = &ep;
11282         }
11283         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11284     }
11285 #endif
11286 
11287 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11288 #if defined(TARGET_NR_epoll_wait)
11289     case TARGET_NR_epoll_wait:
11290 #endif
11291 #if defined(TARGET_NR_epoll_pwait)
11292     case TARGET_NR_epoll_pwait:
11293 #endif
11294     {
11295         struct target_epoll_event *target_ep;
11296         struct epoll_event *ep;
11297         int epfd = arg1;
11298         int maxevents = arg3;
11299         int timeout = arg4;
11300 
11301         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11302             return -TARGET_EINVAL;
11303         }
11304 
11305         target_ep = lock_user(VERIFY_WRITE, arg2,
11306                               maxevents * sizeof(struct target_epoll_event), 1);
11307         if (!target_ep) {
11308             return -TARGET_EFAULT;
11309         }
11310 
11311         ep = g_try_new(struct epoll_event, maxevents);
11312         if (!ep) {
11313             unlock_user(target_ep, arg2, 0);
11314             return -TARGET_ENOMEM;
11315         }
11316 
11317         switch (num) {
11318 #if defined(TARGET_NR_epoll_pwait)
11319         case TARGET_NR_epoll_pwait:
11320         {
11321             target_sigset_t *target_set;
11322             sigset_t _set, *set = &_set;
11323 
11324             if (arg5) {
11325                 if (arg6 != sizeof(target_sigset_t)) {
11326                     ret = -TARGET_EINVAL;
11327                     break;
11328                 }
11329 
11330                 target_set = lock_user(VERIFY_READ, arg5,
11331                                        sizeof(target_sigset_t), 1);
11332                 if (!target_set) {
11333                     ret = -TARGET_EFAULT;
11334                     break;
11335                 }
11336                 target_to_host_sigset(set, target_set);
11337                 unlock_user(target_set, arg5, 0);
11338             } else {
11339                 set = NULL;
11340             }
11341 
11342             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11343                                              set, SIGSET_T_SIZE));
11344             break;
11345         }
11346 #endif
11347 #if defined(TARGET_NR_epoll_wait)
11348         case TARGET_NR_epoll_wait:
11349             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11350                                              NULL, 0));
11351             break;
11352 #endif
11353         default:
11354             ret = -TARGET_ENOSYS;
11355         }
11356         if (!is_error(ret)) {
11357             int i;
11358             for (i = 0; i < ret; i++) {
11359                 target_ep[i].events = tswap32(ep[i].events);
11360                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11361             }
11362             unlock_user(target_ep, arg2,
11363                         ret * sizeof(struct target_epoll_event));
11364         } else {
11365             unlock_user(target_ep, arg2, 0);
11366         }
11367         g_free(ep);
11368         return ret;
11369     }
11370 #endif
11371 #endif
11372 #ifdef TARGET_NR_prlimit64
11373     case TARGET_NR_prlimit64:
11374     {
11375         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11376         struct target_rlimit64 *target_rnew, *target_rold;
11377         struct host_rlimit64 rnew, rold, *rnewp = 0;
11378         int resource = target_to_host_resource(arg2);
11379         if (arg3) {
11380             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11381                 return -TARGET_EFAULT;
11382             }
11383             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11384             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11385             unlock_user_struct(target_rnew, arg3, 0);
11386             rnewp = &rnew;
11387         }
11388 
11389         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11390         if (!is_error(ret) && arg4) {
11391             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11392                 return -TARGET_EFAULT;
11393             }
11394             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11395             target_rold->rlim_max = tswap64(rold.rlim_max);
11396             unlock_user_struct(target_rold, arg4, 1);
11397         }
11398         return ret;
11399     }
11400 #endif
11401 #ifdef TARGET_NR_gethostname
11402     case TARGET_NR_gethostname:
11403     {
11404         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11405         if (name) {
11406             ret = get_errno(gethostname(name, arg2));
11407             unlock_user(name, arg1, arg2);
11408         } else {
11409             ret = -TARGET_EFAULT;
11410         }
11411         return ret;
11412     }
11413 #endif
11414 #ifdef TARGET_NR_atomic_cmpxchg_32
11415     case TARGET_NR_atomic_cmpxchg_32:
11416     {
11417         /* should use start_exclusive from main.c */
11418         abi_ulong mem_value;
11419         if (get_user_u32(mem_value, arg6)) {
11420             target_siginfo_t info;
11421             info.si_signo = SIGSEGV;
11422             info.si_errno = 0;
11423             info.si_code = TARGET_SEGV_MAPERR;
11424             info._sifields._sigfault._addr = arg6;
11425             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11426                          QEMU_SI_FAULT, &info);
11427             ret = 0xdeadbeef;
11428 
11429         }
11430         if (mem_value == arg2)
11431             put_user_u32(arg1, arg6);
11432         return mem_value;
11433     }
11434 #endif
11435 #ifdef TARGET_NR_atomic_barrier
11436     case TARGET_NR_atomic_barrier:
11437         /* Like the kernel implementation and the
11438            qemu arm barrier, no-op this? */
11439         return 0;
11440 #endif
11441 
11442 #ifdef TARGET_NR_timer_create
11443     case TARGET_NR_timer_create:
11444     {
11445         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11446 
11447         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11448 
11449         int clkid = arg1;
11450         int timer_index = next_free_host_timer();
11451 
11452         if (timer_index < 0) {
11453             ret = -TARGET_EAGAIN;
11454         } else {
11455             timer_t *phtimer = g_posix_timers  + timer_index;
11456 
11457             if (arg2) {
11458                 phost_sevp = &host_sevp;
11459                 ret = target_to_host_sigevent(phost_sevp, arg2);
11460                 if (ret != 0) {
11461                     return ret;
11462                 }
11463             }
11464 
11465             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11466             if (ret) {
11467                 phtimer = NULL;
11468             } else {
11469                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11470                     return -TARGET_EFAULT;
11471                 }
11472             }
11473         }
11474         return ret;
11475     }
11476 #endif
11477 
11478 #ifdef TARGET_NR_timer_settime
11479     case TARGET_NR_timer_settime:
11480     {
11481         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11482          * struct itimerspec * old_value */
11483         target_timer_t timerid = get_timer_id(arg1);
11484 
11485         if (timerid < 0) {
11486             ret = timerid;
11487         } else if (arg3 == 0) {
11488             ret = -TARGET_EINVAL;
11489         } else {
11490             timer_t htimer = g_posix_timers[timerid];
11491             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11492 
11493             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11494                 return -TARGET_EFAULT;
11495             }
11496             ret = get_errno(
11497                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11498             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11499                 return -TARGET_EFAULT;
11500             }
11501         }
11502         return ret;
11503     }
11504 #endif
11505 
11506 #ifdef TARGET_NR_timer_gettime
11507     case TARGET_NR_timer_gettime:
11508     {
11509         /* args: timer_t timerid, struct itimerspec *curr_value */
11510         target_timer_t timerid = get_timer_id(arg1);
11511 
11512         if (timerid < 0) {
11513             ret = timerid;
11514         } else if (!arg2) {
11515             ret = -TARGET_EFAULT;
11516         } else {
11517             timer_t htimer = g_posix_timers[timerid];
11518             struct itimerspec hspec;
11519             ret = get_errno(timer_gettime(htimer, &hspec));
11520 
11521             if (host_to_target_itimerspec(arg2, &hspec)) {
11522                 ret = -TARGET_EFAULT;
11523             }
11524         }
11525         return ret;
11526     }
11527 #endif
11528 
11529 #ifdef TARGET_NR_timer_getoverrun
11530     case TARGET_NR_timer_getoverrun:
11531     {
11532         /* args: timer_t timerid */
11533         target_timer_t timerid = get_timer_id(arg1);
11534 
11535         if (timerid < 0) {
11536             ret = timerid;
11537         } else {
11538             timer_t htimer = g_posix_timers[timerid];
11539             ret = get_errno(timer_getoverrun(htimer));
11540         }
11541         fd_trans_unregister(ret);
11542         return ret;
11543     }
11544 #endif
11545 
11546 #ifdef TARGET_NR_timer_delete
11547     case TARGET_NR_timer_delete:
11548     {
11549         /* args: timer_t timerid */
11550         target_timer_t timerid = get_timer_id(arg1);
11551 
11552         if (timerid < 0) {
11553             ret = timerid;
11554         } else {
11555             timer_t htimer = g_posix_timers[timerid];
11556             ret = get_errno(timer_delete(htimer));
11557             g_posix_timers[timerid] = 0;
11558         }
11559         return ret;
11560     }
11561 #endif
11562 
11563 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11564     case TARGET_NR_timerfd_create:
11565         return get_errno(timerfd_create(arg1,
11566                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11567 #endif
11568 
11569 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11570     case TARGET_NR_timerfd_gettime:
11571         {
11572             struct itimerspec its_curr;
11573 
11574             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11575 
11576             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11577                 return -TARGET_EFAULT;
11578             }
11579         }
11580         return ret;
11581 #endif
11582 
11583 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11584     case TARGET_NR_timerfd_settime:
11585         {
11586             struct itimerspec its_new, its_old, *p_new;
11587 
11588             if (arg3) {
11589                 if (target_to_host_itimerspec(&its_new, arg3)) {
11590                     return -TARGET_EFAULT;
11591                 }
11592                 p_new = &its_new;
11593             } else {
11594                 p_new = NULL;
11595             }
11596 
11597             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11598 
11599             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11600                 return -TARGET_EFAULT;
11601             }
11602         }
11603         return ret;
11604 #endif
11605 
11606 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11607     case TARGET_NR_ioprio_get:
11608         return get_errno(ioprio_get(arg1, arg2));
11609 #endif
11610 
11611 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11612     case TARGET_NR_ioprio_set:
11613         return get_errno(ioprio_set(arg1, arg2, arg3));
11614 #endif
11615 
11616 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11617     case TARGET_NR_setns:
11618         return get_errno(setns(arg1, arg2));
11619 #endif
11620 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11621     case TARGET_NR_unshare:
11622         return get_errno(unshare(arg1));
11623 #endif
11624 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11625     case TARGET_NR_kcmp:
11626         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11627 #endif
11628 #ifdef TARGET_NR_swapcontext
11629     case TARGET_NR_swapcontext:
11630         /* PowerPC specific.  */
11631         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11632 #endif
11633 
11634     default:
11635         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11636         return -TARGET_ENOSYS;
11637     }
11638     return ret;
11639 }
11640 
11641 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11642                     abi_long arg2, abi_long arg3, abi_long arg4,
11643                     abi_long arg5, abi_long arg6, abi_long arg7,
11644                     abi_long arg8)
11645 {
11646     CPUState *cpu = ENV_GET_CPU(cpu_env);
11647     abi_long ret;
11648 
11649 #ifdef DEBUG_ERESTARTSYS
11650     /* Debug-only code for exercising the syscall-restart code paths
11651      * in the per-architecture cpu main loops: restart every syscall
11652      * the guest makes once before letting it through.
11653      */
11654     {
11655         static bool flag;
11656         flag = !flag;
11657         if (flag) {
11658             return -TARGET_ERESTARTSYS;
11659         }
11660     }
11661 #endif
11662 
11663     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11664                              arg5, arg6, arg7, arg8);
11665 
11666     if (unlikely(do_strace)) {
11667         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11668         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11669                           arg5, arg6, arg7, arg8);
11670         print_syscall_ret(num, ret);
11671     } else {
11672         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11673                           arg5, arg6, arg7, arg8);
11674     }
11675 
11676     trace_guest_user_syscall_ret(cpu, num, ret);
11677     return ret;
11678 }
11679