xref: /openbmc/qemu/linux-user/syscall.c (revision 4ab6713e)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "fd-trans.h"
111 
112 #ifndef CLONE_IO
113 #define CLONE_IO                0x80000000      /* Clone io context */
114 #endif
115 
116 /* We can't directly call the host clone syscall, because this will
117  * badly confuse libc (breaking mutexes, for example). So we must
118  * divide clone flags into:
119  *  * flag combinations that look like pthread_create()
120  *  * flag combinations that look like fork()
121  *  * flags we can implement within QEMU itself
122  *  * flags we can't support and will return an error for
123  */
124 /* For thread creation, all these flags must be present; for
125  * fork, none must be present.
126  */
127 #define CLONE_THREAD_FLAGS                              \
128     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
129      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
130 
131 /* These flags are ignored:
132  * CLONE_DETACHED is now ignored by the kernel;
133  * CLONE_IO is just an optimisation hint to the I/O scheduler
134  */
135 #define CLONE_IGNORED_FLAGS                     \
136     (CLONE_DETACHED | CLONE_IO)
137 
138 /* Flags for fork which we can implement within QEMU itself */
139 #define CLONE_OPTIONAL_FORK_FLAGS               \
140     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
141      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
142 
143 /* Flags for thread creation which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
145     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
146      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
147 
148 #define CLONE_INVALID_FORK_FLAGS                                        \
149     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
150 
151 #define CLONE_INVALID_THREAD_FLAGS                                      \
152     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
153        CLONE_IGNORED_FLAGS))
154 
155 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
156  * have almost all been allocated. We cannot support any of
157  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
158  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
159  * The checks against the invalid thread masks above will catch these.
160  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
161  */
162 
163 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
164  * once. This exercises the codepaths for restart.
165  */
166 //#define DEBUG_ERESTARTSYS
167 
168 //#include <linux/msdos_fs.h>
169 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
170 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
171 
172 #undef _syscall0
173 #undef _syscall1
174 #undef _syscall2
175 #undef _syscall3
176 #undef _syscall4
177 #undef _syscall5
178 #undef _syscall6
179 
180 #define _syscall0(type,name)		\
181 static type name (void)			\
182 {					\
183 	return syscall(__NR_##name);	\
184 }
185 
186 #define _syscall1(type,name,type1,arg1)		\
187 static type name (type1 arg1)			\
188 {						\
189 	return syscall(__NR_##name, arg1);	\
190 }
191 
192 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
193 static type name (type1 arg1,type2 arg2)		\
194 {							\
195 	return syscall(__NR_##name, arg1, arg2);	\
196 }
197 
198 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
199 static type name (type1 arg1,type2 arg2,type3 arg3)		\
200 {								\
201 	return syscall(__NR_##name, arg1, arg2, arg3);		\
202 }
203 
204 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
205 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
206 {										\
207 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
208 }
209 
210 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
211 		  type5,arg5)							\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
215 }
216 
217 
218 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
219 		  type5,arg5,type6,arg6)					\
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
221                   type6 arg6)							\
222 {										\
223 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
224 }
225 
226 
227 #define __NR_sys_uname __NR_uname
228 #define __NR_sys_getcwd1 __NR_getcwd
229 #define __NR_sys_getdents __NR_getdents
230 #define __NR_sys_getdents64 __NR_getdents64
231 #define __NR_sys_getpriority __NR_getpriority
232 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
233 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
234 #define __NR_sys_syslog __NR_syslog
235 #define __NR_sys_futex __NR_futex
236 #define __NR_sys_inotify_init __NR_inotify_init
237 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
238 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
239 
240 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
241 #define __NR__llseek __NR_lseek
242 #endif
243 
244 /* Newer kernel ports have llseek() instead of _llseek() */
245 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
246 #define TARGET_NR__llseek TARGET_NR_llseek
247 #endif
248 
249 #define __NR_sys_gettid __NR_gettid
250 _syscall0(int, sys_gettid)
251 
252 /* For the 64-bit guest on 32-bit host case we must emulate
253  * getdents using getdents64, because otherwise the host
254  * might hand us back more dirent records than we can fit
255  * into the guest buffer after structure format conversion.
256  * Otherwise we emulate getdents with getdents if the host has it.
257  */
258 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
259 #define EMULATE_GETDENTS_WITH_GETDENTS
260 #endif
261 
262 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if (defined(TARGET_NR_getdents) && \
266       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
267     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #endif
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
272           loff_t *, res, uint, wh);
273 #endif
274 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
275 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276           siginfo_t *, uinfo)
277 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
278 #ifdef __NR_exit_group
279 _syscall1(int,exit_group,int,error_code)
280 #endif
281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
282 _syscall1(int,set_tid_address,int *,tidptr)
283 #endif
284 #if defined(TARGET_NR_futex) && defined(__NR_futex)
285 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
286           const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #endif
288 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
289 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
290           unsigned long *, user_mask_ptr);
291 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
292 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
293           unsigned long *, user_mask_ptr);
294 #define __NR_sys_getcpu __NR_getcpu
295 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
296 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297           void *, arg);
298 _syscall2(int, capget, struct __user_cap_header_struct *, header,
299           struct __user_cap_data_struct *, data);
300 _syscall2(int, capset, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
303 _syscall2(int, ioprio_get, int, which, int, who)
304 #endif
305 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
306 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #endif
308 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
309 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
310 #endif
311 
312 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
313 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
314           unsigned long, idx1, unsigned long, idx2)
315 #endif
316 
317 static bitmask_transtbl fcntl_flags_tbl[] = {
318   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
319   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
320   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
321   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
322   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
323   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
324   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
325   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
326   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
327   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
328   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
329   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
330   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
331 #if defined(O_DIRECT)
332   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
333 #endif
334 #if defined(O_NOATIME)
335   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
336 #endif
337 #if defined(O_CLOEXEC)
338   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
339 #endif
340 #if defined(O_PATH)
341   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
342 #endif
343 #if defined(O_TMPFILE)
344   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
345 #endif
346   /* Don't terminate the list prematurely on 64-bit host+guest.  */
347 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
348   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
349 #endif
350   { 0, 0, 0, 0 }
351 };
352 
353 static int sys_getcwd1(char *buf, size_t size)
354 {
355   if (getcwd(buf, size) == NULL) {
356       /* getcwd() sets errno */
357       return (-1);
358   }
359   return strlen(buf)+1;
360 }
361 
362 #ifdef TARGET_NR_utimensat
363 #if defined(__NR_utimensat)
364 #define __NR_sys_utimensat __NR_utimensat
365 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
366           const struct timespec *,tsp,int,flags)
367 #else
368 static int sys_utimensat(int dirfd, const char *pathname,
369                          const struct timespec times[2], int flags)
370 {
371     errno = ENOSYS;
372     return -1;
373 }
374 #endif
375 #endif /* TARGET_NR_utimensat */
376 
377 #ifdef TARGET_NR_renameat2
378 #if defined(__NR_renameat2)
379 #define __NR_sys_renameat2 __NR_renameat2
380 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
381           const char *, new, unsigned int, flags)
382 #else
383 static int sys_renameat2(int oldfd, const char *old,
384                          int newfd, const char *new, int flags)
385 {
386     if (flags == 0) {
387         return renameat(oldfd, old, newfd, new);
388     }
389     errno = ENOSYS;
390     return -1;
391 }
392 #endif
393 #endif /* TARGET_NR_renameat2 */
394 
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
397 
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
400 {
401   return (inotify_init());
402 }
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
406 {
407   return (inotify_add_watch(fd, pathname, mask));
408 }
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
412 {
413   return (inotify_rm_watch(fd, wd));
414 }
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
419 {
420   return (inotify_init1(flags));
421 }
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY  */
431 
432 #if defined(TARGET_NR_prlimit64)
433 #ifndef __NR_prlimit64
434 # define __NR_prlimit64 -1
435 #endif
436 #define __NR_sys_prlimit64 __NR_prlimit64
437 /* The glibc rlimit structure may not be that used by the underlying syscall */
438 struct host_rlimit64 {
439     uint64_t rlim_cur;
440     uint64_t rlim_max;
441 };
442 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
443           const struct host_rlimit64 *, new_limit,
444           struct host_rlimit64 *, old_limit)
445 #endif
446 
447 
448 #if defined(TARGET_NR_timer_create)
449 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
450 static timer_t g_posix_timers[32] = { 0, } ;
451 
452 static inline int next_free_host_timer(void)
453 {
454     int k ;
455     /* FIXME: Does finding the next free slot require a lock? */
456     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
457         if (g_posix_timers[k] == 0) {
458             g_posix_timers[k] = (timer_t) 1;
459             return k;
460         }
461     }
462     return -1;
463 }
464 #endif
465 
466 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
467 #ifdef TARGET_ARM
468 static inline int regpairs_aligned(void *cpu_env, int num)
469 {
470     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
471 }
472 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
473 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476  * of registers which translates to the same as ARM/MIPS, because we start with
477  * r3 as arg1 */
478 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
479 #elif defined(TARGET_SH4)
480 /* SH4 doesn't align register pairs, except for p{read,write}64 */
481 static inline int regpairs_aligned(void *cpu_env, int num)
482 {
483     switch (num) {
484     case TARGET_NR_pread64:
485     case TARGET_NR_pwrite64:
486         return 1;
487 
488     default:
489         return 0;
490     }
491 }
492 #elif defined(TARGET_XTENSA)
493 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
494 #else
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
721               struct rusage *, rusage)
722 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
723               int, options, struct rusage *, rusage)
724 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
725 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
726               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
727 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
728               struct timespec *, tsp, const sigset_t *, sigmask,
729               size_t, sigsetsize)
730 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
731               int, maxevents, int, timeout, const sigset_t *, sigmask,
732               size_t, sigsetsize)
733 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
734               const struct timespec *,timeout,int *,uaddr2,int,val3)
735 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
736 safe_syscall2(int, kill, pid_t, pid, int, sig)
737 safe_syscall2(int, tkill, int, tid, int, sig)
738 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
739 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
740 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
741 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
742               unsigned long, pos_l, unsigned long, pos_h)
743 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
746               socklen_t, addrlen)
747 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
748               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
749 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
750               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
751 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
752 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
753 safe_syscall2(int, flock, int, fd, int, operation)
754 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
755               const struct timespec *, uts, size_t, sigsetsize)
756 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
757               int, flags)
758 safe_syscall2(int, nanosleep, const struct timespec *, req,
759               struct timespec *, rem)
760 #ifdef TARGET_NR_clock_nanosleep
761 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
762               const struct timespec *, req, struct timespec *, rem)
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
768               long, msgtype, int, flags)
769 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
770               unsigned, nsops, const struct timespec *, timeout)
771 #else
772 /* This host kernel architecture uses a single ipc syscall; fake up
773  * wrappers for the sub-operations to hide this implementation detail.
774  * Annoyingly we can't include linux/ipc.h to get the constant definitions
775  * for the call parameter because some structs in there conflict with the
776  * sys/ipc.h ones. So we just define them here, and rely on them being
777  * the same for all host architectures.
778  */
779 #define Q_SEMTIMEDOP 4
780 #define Q_MSGSND 11
781 #define Q_MSGRCV 12
782 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
783 
784 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
785               void *, ptr, long, fifth)
786 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
787 {
788     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
789 }
790 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
791 {
792     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
793 }
794 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
795                            const struct timespec *timeout)
796 {
797     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
798                     (long)timeout);
799 }
800 #endif
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
803               size_t, len, unsigned, prio, const struct timespec *, timeout)
804 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
805               size_t, len, unsigned *, prio, const struct timespec *, timeout)
806 #endif
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808  * "third argument might be integer or pointer or not present" behaviour of
809  * the libc function.
810  */
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814  *  use the flock64 struct rather than unsuffixed flock
815  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
816  */
817 #ifdef __NR_fcntl64
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
819 #else
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
821 #endif
822 
823 static inline int host_to_target_sock_type(int host_type)
824 {
825     int target_type;
826 
827     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
828     case SOCK_DGRAM:
829         target_type = TARGET_SOCK_DGRAM;
830         break;
831     case SOCK_STREAM:
832         target_type = TARGET_SOCK_STREAM;
833         break;
834     default:
835         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
836         break;
837     }
838 
839 #if defined(SOCK_CLOEXEC)
840     if (host_type & SOCK_CLOEXEC) {
841         target_type |= TARGET_SOCK_CLOEXEC;
842     }
843 #endif
844 
845 #if defined(SOCK_NONBLOCK)
846     if (host_type & SOCK_NONBLOCK) {
847         target_type |= TARGET_SOCK_NONBLOCK;
848     }
849 #endif
850 
851     return target_type;
852 }
853 
854 static abi_ulong target_brk;
855 static abi_ulong target_original_brk;
856 static abi_ulong brk_page;
857 
858 void target_set_brk(abi_ulong new_brk)
859 {
860     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
861     brk_page = HOST_PAGE_ALIGN(target_brk);
862 }
863 
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
866 
867 /* do_brk() must return target values and target errnos. */
868 abi_long do_brk(abi_ulong new_brk)
869 {
870     abi_long mapped_addr;
871     abi_ulong new_alloc_size;
872 
873     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
874 
875     if (!new_brk) {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
877         return target_brk;
878     }
879     if (new_brk < target_original_brk) {
880         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
881                    target_brk);
882         return target_brk;
883     }
884 
885     /* If the new brk is less than the highest page reserved to the
886      * target heap allocation, set it and we're almost done...  */
887     if (new_brk <= brk_page) {
888         /* Heap contents are initialized to zero, as for anonymous
889          * mapped pages.  */
890         if (new_brk > target_brk) {
891             memset(g2h(target_brk), 0, new_brk - target_brk);
892         }
893 	target_brk = new_brk;
894         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
895 	return target_brk;
896     }
897 
898     /* We need to allocate more memory after the brk... Note that
899      * we don't use MAP_FIXED because that will map over the top of
900      * any existing mapping (like the one with the host libc or qemu
901      * itself); instead we treat "mapped but at wrong address" as
902      * a failure and unmap again.
903      */
904     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
905     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
906                                         PROT_READ|PROT_WRITE,
907                                         MAP_ANON|MAP_PRIVATE, 0, 0));
908 
909     if (mapped_addr == brk_page) {
910         /* Heap contents are initialized to zero, as for anonymous
911          * mapped pages.  Technically the new pages are already
912          * initialized to zero since they *are* anonymous mapped
913          * pages, however we have to take care with the contents that
914          * come from the remaining part of the previous page: it may
915          * contains garbage data due to a previous heap usage (grown
916          * then shrunken).  */
917         memset(g2h(target_brk), 0, brk_page - target_brk);
918 
919         target_brk = new_brk;
920         brk_page = HOST_PAGE_ALIGN(target_brk);
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
922             target_brk);
923         return target_brk;
924     } else if (mapped_addr != -1) {
925         /* Mapped but at wrong address, meaning there wasn't actually
926          * enough space for this brk.
927          */
928         target_munmap(mapped_addr, new_alloc_size);
929         mapped_addr = -1;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
931     }
932     else {
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
934     }
935 
936 #if defined(TARGET_ALPHA)
937     /* We (partially) emulate OSF/1 on Alpha, which requires we
938        return a proper errno, not an unchanged brk value.  */
939     return -TARGET_ENOMEM;
940 #endif
941     /* For everything else, return the previous break. */
942     return target_brk;
943 }
944 
945 static inline abi_long copy_from_user_fdset(fd_set *fds,
946                                             abi_ulong target_fds_addr,
947                                             int n)
948 {
949     int i, nw, j, k;
950     abi_ulong b, *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_READ,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  1)))
957         return -TARGET_EFAULT;
958 
959     FD_ZERO(fds);
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         /* grab the abi_ulong */
963         __get_user(b, &target_fds[i]);
964         for (j = 0; j < TARGET_ABI_BITS; j++) {
965             /* check the bit inside the abi_ulong */
966             if ((b >> j) & 1)
967                 FD_SET(k, fds);
968             k++;
969         }
970     }
971 
972     unlock_user(target_fds, target_fds_addr, 0);
973 
974     return 0;
975 }
976 
977 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
978                                                  abi_ulong target_fds_addr,
979                                                  int n)
980 {
981     if (target_fds_addr) {
982         if (copy_from_user_fdset(fds, target_fds_addr, n))
983             return -TARGET_EFAULT;
984         *fds_ptr = fds;
985     } else {
986         *fds_ptr = NULL;
987     }
988     return 0;
989 }
990 
991 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
992                                           const fd_set *fds,
993                                           int n)
994 {
995     int i, nw, j, k;
996     abi_long v;
997     abi_ulong *target_fds;
998 
999     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1000     if (!(target_fds = lock_user(VERIFY_WRITE,
1001                                  target_fds_addr,
1002                                  sizeof(abi_ulong) * nw,
1003                                  0)))
1004         return -TARGET_EFAULT;
1005 
1006     k = 0;
1007     for (i = 0; i < nw; i++) {
1008         v = 0;
1009         for (j = 0; j < TARGET_ABI_BITS; j++) {
1010             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1011             k++;
1012         }
1013         __put_user(v, &target_fds[i]);
1014     }
1015 
1016     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1017 
1018     return 0;
1019 }
1020 
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1023 #else
1024 #define HOST_HZ 100
1025 #endif
1026 
1027 static inline abi_long host_to_target_clock_t(long ticks)
1028 {
1029 #if HOST_HZ == TARGET_HZ
1030     return ticks;
1031 #else
1032     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1033 #endif
1034 }
1035 
1036 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1037                                              const struct rusage *rusage)
1038 {
1039     struct target_rusage *target_rusage;
1040 
1041     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1042         return -TARGET_EFAULT;
1043     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1044     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1045     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1046     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1047     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1048     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1049     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1050     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1051     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1052     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1053     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1054     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1055     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1056     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1057     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1058     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1059     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1060     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1061     unlock_user_struct(target_rusage, target_addr, 1);
1062 
1063     return 0;
1064 }
1065 
1066 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1067 {
1068     abi_ulong target_rlim_swap;
1069     rlim_t result;
1070 
1071     target_rlim_swap = tswapal(target_rlim);
1072     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1073         return RLIM_INFINITY;
1074 
1075     result = target_rlim_swap;
1076     if (target_rlim_swap != (rlim_t)result)
1077         return RLIM_INFINITY;
1078 
1079     return result;
1080 }
1081 
1082 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1083 {
1084     abi_ulong target_rlim_swap;
1085     abi_ulong result;
1086 
1087     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1088         target_rlim_swap = TARGET_RLIM_INFINITY;
1089     else
1090         target_rlim_swap = rlim;
1091     result = tswapal(target_rlim_swap);
1092 
1093     return result;
1094 }
1095 
1096 static inline int target_to_host_resource(int code)
1097 {
1098     switch (code) {
1099     case TARGET_RLIMIT_AS:
1100         return RLIMIT_AS;
1101     case TARGET_RLIMIT_CORE:
1102         return RLIMIT_CORE;
1103     case TARGET_RLIMIT_CPU:
1104         return RLIMIT_CPU;
1105     case TARGET_RLIMIT_DATA:
1106         return RLIMIT_DATA;
1107     case TARGET_RLIMIT_FSIZE:
1108         return RLIMIT_FSIZE;
1109     case TARGET_RLIMIT_LOCKS:
1110         return RLIMIT_LOCKS;
1111     case TARGET_RLIMIT_MEMLOCK:
1112         return RLIMIT_MEMLOCK;
1113     case TARGET_RLIMIT_MSGQUEUE:
1114         return RLIMIT_MSGQUEUE;
1115     case TARGET_RLIMIT_NICE:
1116         return RLIMIT_NICE;
1117     case TARGET_RLIMIT_NOFILE:
1118         return RLIMIT_NOFILE;
1119     case TARGET_RLIMIT_NPROC:
1120         return RLIMIT_NPROC;
1121     case TARGET_RLIMIT_RSS:
1122         return RLIMIT_RSS;
1123     case TARGET_RLIMIT_RTPRIO:
1124         return RLIMIT_RTPRIO;
1125     case TARGET_RLIMIT_SIGPENDING:
1126         return RLIMIT_SIGPENDING;
1127     case TARGET_RLIMIT_STACK:
1128         return RLIMIT_STACK;
1129     default:
1130         return code;
1131     }
1132 }
1133 
1134 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1135                                               abi_ulong target_tv_addr)
1136 {
1137     struct target_timeval *target_tv;
1138 
1139     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1140         return -TARGET_EFAULT;
1141 
1142     __get_user(tv->tv_sec, &target_tv->tv_sec);
1143     __get_user(tv->tv_usec, &target_tv->tv_usec);
1144 
1145     unlock_user_struct(target_tv, target_tv_addr, 0);
1146 
1147     return 0;
1148 }
1149 
1150 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1151                                             const struct timeval *tv)
1152 {
1153     struct target_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1156         return -TARGET_EFAULT;
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1167                                                abi_ulong target_tz_addr)
1168 {
1169     struct target_timezone *target_tz;
1170 
1171     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1172         return -TARGET_EFAULT;
1173     }
1174 
1175     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1176     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1177 
1178     unlock_user_struct(target_tz, target_tz_addr, 0);
1179 
1180     return 0;
1181 }
1182 
1183 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1184 #include <mqueue.h>
1185 
1186 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1187                                               abi_ulong target_mq_attr_addr)
1188 {
1189     struct target_mq_attr *target_mq_attr;
1190 
1191     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1192                           target_mq_attr_addr, 1))
1193         return -TARGET_EFAULT;
1194 
1195     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1196     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1197     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1198     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1199 
1200     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1201 
1202     return 0;
1203 }
1204 
1205 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1206                                             const struct mq_attr *attr)
1207 {
1208     struct target_mq_attr *target_mq_attr;
1209 
1210     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1211                           target_mq_attr_addr, 0))
1212         return -TARGET_EFAULT;
1213 
1214     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1215     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1216     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1217     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1218 
1219     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1220 
1221     return 0;
1222 }
1223 #endif
1224 
1225 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1226 /* do_select() must return target values and target errnos. */
1227 static abi_long do_select(int n,
1228                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1229                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1230 {
1231     fd_set rfds, wfds, efds;
1232     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1233     struct timeval tv;
1234     struct timespec ts, *ts_ptr;
1235     abi_long ret;
1236 
1237     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1238     if (ret) {
1239         return ret;
1240     }
1241     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1242     if (ret) {
1243         return ret;
1244     }
1245     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1246     if (ret) {
1247         return ret;
1248     }
1249 
1250     if (target_tv_addr) {
1251         if (copy_from_user_timeval(&tv, target_tv_addr))
1252             return -TARGET_EFAULT;
1253         ts.tv_sec = tv.tv_sec;
1254         ts.tv_nsec = tv.tv_usec * 1000;
1255         ts_ptr = &ts;
1256     } else {
1257         ts_ptr = NULL;
1258     }
1259 
1260     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1261                                   ts_ptr, NULL));
1262 
1263     if (!is_error(ret)) {
1264         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1265             return -TARGET_EFAULT;
1266         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1267             return -TARGET_EFAULT;
1268         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1269             return -TARGET_EFAULT;
1270 
1271         if (target_tv_addr) {
1272             tv.tv_sec = ts.tv_sec;
1273             tv.tv_usec = ts.tv_nsec / 1000;
1274             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1275                 return -TARGET_EFAULT;
1276             }
1277         }
1278     }
1279 
1280     return ret;
1281 }
1282 
1283 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1284 static abi_long do_old_select(abi_ulong arg1)
1285 {
1286     struct target_sel_arg_struct *sel;
1287     abi_ulong inp, outp, exp, tvp;
1288     long nsel;
1289 
1290     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1291         return -TARGET_EFAULT;
1292     }
1293 
1294     nsel = tswapal(sel->n);
1295     inp = tswapal(sel->inp);
1296     outp = tswapal(sel->outp);
1297     exp = tswapal(sel->exp);
1298     tvp = tswapal(sel->tvp);
1299 
1300     unlock_user_struct(sel, arg1, 0);
1301 
1302     return do_select(nsel, inp, outp, exp, tvp);
1303 }
1304 #endif
1305 #endif
1306 
1307 static abi_long do_pipe2(int host_pipe[], int flags)
1308 {
1309 #ifdef CONFIG_PIPE2
1310     return pipe2(host_pipe, flags);
1311 #else
1312     return -ENOSYS;
1313 #endif
1314 }
1315 
1316 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1317                         int flags, int is_pipe2)
1318 {
1319     int host_pipe[2];
1320     abi_long ret;
1321     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1322 
1323     if (is_error(ret))
1324         return get_errno(ret);
1325 
1326     /* Several targets have special calling conventions for the original
1327        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1328     if (!is_pipe2) {
1329 #if defined(TARGET_ALPHA)
1330         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1331         return host_pipe[0];
1332 #elif defined(TARGET_MIPS)
1333         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1334         return host_pipe[0];
1335 #elif defined(TARGET_SH4)
1336         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1337         return host_pipe[0];
1338 #elif defined(TARGET_SPARC)
1339         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1340         return host_pipe[0];
1341 #endif
1342     }
1343 
1344     if (put_user_s32(host_pipe[0], pipedes)
1345         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1346         return -TARGET_EFAULT;
1347     return get_errno(ret);
1348 }
1349 
1350 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1351                                               abi_ulong target_addr,
1352                                               socklen_t len)
1353 {
1354     struct target_ip_mreqn *target_smreqn;
1355 
1356     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1357     if (!target_smreqn)
1358         return -TARGET_EFAULT;
1359     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1360     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1361     if (len == sizeof(struct target_ip_mreqn))
1362         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1363     unlock_user(target_smreqn, target_addr, 0);
1364 
1365     return 0;
1366 }
1367 
1368 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1369                                                abi_ulong target_addr,
1370                                                socklen_t len)
1371 {
1372     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1373     sa_family_t sa_family;
1374     struct target_sockaddr *target_saddr;
1375 
1376     if (fd_trans_target_to_host_addr(fd)) {
1377         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1378     }
1379 
1380     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1381     if (!target_saddr)
1382         return -TARGET_EFAULT;
1383 
1384     sa_family = tswap16(target_saddr->sa_family);
1385 
1386     /* Oops. The caller might send a incomplete sun_path; sun_path
1387      * must be terminated by \0 (see the manual page), but
1388      * unfortunately it is quite common to specify sockaddr_un
1389      * length as "strlen(x->sun_path)" while it should be
1390      * "strlen(...) + 1". We'll fix that here if needed.
1391      * Linux kernel has a similar feature.
1392      */
1393 
1394     if (sa_family == AF_UNIX) {
1395         if (len < unix_maxlen && len > 0) {
1396             char *cp = (char*)target_saddr;
1397 
1398             if ( cp[len-1] && !cp[len] )
1399                 len++;
1400         }
1401         if (len > unix_maxlen)
1402             len = unix_maxlen;
1403     }
1404 
1405     memcpy(addr, target_saddr, len);
1406     addr->sa_family = sa_family;
1407     if (sa_family == AF_NETLINK) {
1408         struct sockaddr_nl *nladdr;
1409 
1410         nladdr = (struct sockaddr_nl *)addr;
1411         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1412         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1413     } else if (sa_family == AF_PACKET) {
1414 	struct target_sockaddr_ll *lladdr;
1415 
1416 	lladdr = (struct target_sockaddr_ll *)addr;
1417 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1418 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1419     }
1420     unlock_user(target_saddr, target_addr, 0);
1421 
1422     return 0;
1423 }
1424 
1425 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1426                                                struct sockaddr *addr,
1427                                                socklen_t len)
1428 {
1429     struct target_sockaddr *target_saddr;
1430 
1431     if (len == 0) {
1432         return 0;
1433     }
1434     assert(addr);
1435 
1436     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439     memcpy(target_saddr, addr, len);
1440     if (len >= offsetof(struct target_sockaddr, sa_family) +
1441         sizeof(target_saddr->sa_family)) {
1442         target_saddr->sa_family = tswap16(addr->sa_family);
1443     }
1444     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1445         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1446         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1447         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1448     } else if (addr->sa_family == AF_PACKET) {
1449         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1450         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1451         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1452     } else if (addr->sa_family == AF_INET6 &&
1453                len >= sizeof(struct target_sockaddr_in6)) {
1454         struct target_sockaddr_in6 *target_in6 =
1455                (struct target_sockaddr_in6 *)target_saddr;
1456         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1457     }
1458     unlock_user(target_saddr, target_addr, len);
1459 
1460     return 0;
1461 }
1462 
1463 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1464                                            struct target_msghdr *target_msgh)
1465 {
1466     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1467     abi_long msg_controllen;
1468     abi_ulong target_cmsg_addr;
1469     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1470     socklen_t space = 0;
1471 
1472     msg_controllen = tswapal(target_msgh->msg_controllen);
1473     if (msg_controllen < sizeof (struct target_cmsghdr))
1474         goto the_end;
1475     target_cmsg_addr = tswapal(target_msgh->msg_control);
1476     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1477     target_cmsg_start = target_cmsg;
1478     if (!target_cmsg)
1479         return -TARGET_EFAULT;
1480 
1481     while (cmsg && target_cmsg) {
1482         void *data = CMSG_DATA(cmsg);
1483         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1484 
1485         int len = tswapal(target_cmsg->cmsg_len)
1486             - sizeof(struct target_cmsghdr);
1487 
1488         space += CMSG_SPACE(len);
1489         if (space > msgh->msg_controllen) {
1490             space -= CMSG_SPACE(len);
1491             /* This is a QEMU bug, since we allocated the payload
1492              * area ourselves (unlike overflow in host-to-target
1493              * conversion, which is just the guest giving us a buffer
1494              * that's too small). It can't happen for the payload types
1495              * we currently support; if it becomes an issue in future
1496              * we would need to improve our allocation strategy to
1497              * something more intelligent than "twice the size of the
1498              * target buffer we're reading from".
1499              */
1500             gemu_log("Host cmsg overflow\n");
1501             break;
1502         }
1503 
1504         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1505             cmsg->cmsg_level = SOL_SOCKET;
1506         } else {
1507             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1508         }
1509         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1510         cmsg->cmsg_len = CMSG_LEN(len);
1511 
1512         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1513             int *fd = (int *)data;
1514             int *target_fd = (int *)target_data;
1515             int i, numfds = len / sizeof(int);
1516 
1517             for (i = 0; i < numfds; i++) {
1518                 __get_user(fd[i], target_fd + i);
1519             }
1520         } else if (cmsg->cmsg_level == SOL_SOCKET
1521                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1522             struct ucred *cred = (struct ucred *)data;
1523             struct target_ucred *target_cred =
1524                 (struct target_ucred *)target_data;
1525 
1526             __get_user(cred->pid, &target_cred->pid);
1527             __get_user(cred->uid, &target_cred->uid);
1528             __get_user(cred->gid, &target_cred->gid);
1529         } else {
1530             gemu_log("Unsupported ancillary data: %d/%d\n",
1531                                         cmsg->cmsg_level, cmsg->cmsg_type);
1532             memcpy(data, target_data, len);
1533         }
1534 
1535         cmsg = CMSG_NXTHDR(msgh, cmsg);
1536         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1537                                          target_cmsg_start);
1538     }
1539     unlock_user(target_cmsg, target_cmsg_addr, 0);
1540  the_end:
1541     msgh->msg_controllen = space;
1542     return 0;
1543 }
1544 
1545 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1546                                            struct msghdr *msgh)
1547 {
1548     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1549     abi_long msg_controllen;
1550     abi_ulong target_cmsg_addr;
1551     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1552     socklen_t space = 0;
1553 
1554     msg_controllen = tswapal(target_msgh->msg_controllen);
1555     if (msg_controllen < sizeof (struct target_cmsghdr))
1556         goto the_end;
1557     target_cmsg_addr = tswapal(target_msgh->msg_control);
1558     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1559     target_cmsg_start = target_cmsg;
1560     if (!target_cmsg)
1561         return -TARGET_EFAULT;
1562 
1563     while (cmsg && target_cmsg) {
1564         void *data = CMSG_DATA(cmsg);
1565         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1566 
1567         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1568         int tgt_len, tgt_space;
1569 
1570         /* We never copy a half-header but may copy half-data;
1571          * this is Linux's behaviour in put_cmsg(). Note that
1572          * truncation here is a guest problem (which we report
1573          * to the guest via the CTRUNC bit), unlike truncation
1574          * in target_to_host_cmsg, which is a QEMU bug.
1575          */
1576         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1577             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1578             break;
1579         }
1580 
1581         if (cmsg->cmsg_level == SOL_SOCKET) {
1582             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1583         } else {
1584             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1585         }
1586         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1587 
1588         /* Payload types which need a different size of payload on
1589          * the target must adjust tgt_len here.
1590          */
1591         tgt_len = len;
1592         switch (cmsg->cmsg_level) {
1593         case SOL_SOCKET:
1594             switch (cmsg->cmsg_type) {
1595             case SO_TIMESTAMP:
1596                 tgt_len = sizeof(struct target_timeval);
1597                 break;
1598             default:
1599                 break;
1600             }
1601             break;
1602         default:
1603             break;
1604         }
1605 
1606         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1607             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1608             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1609         }
1610 
1611         /* We must now copy-and-convert len bytes of payload
1612          * into tgt_len bytes of destination space. Bear in mind
1613          * that in both source and destination we may be dealing
1614          * with a truncated value!
1615          */
1616         switch (cmsg->cmsg_level) {
1617         case SOL_SOCKET:
1618             switch (cmsg->cmsg_type) {
1619             case SCM_RIGHTS:
1620             {
1621                 int *fd = (int *)data;
1622                 int *target_fd = (int *)target_data;
1623                 int i, numfds = tgt_len / sizeof(int);
1624 
1625                 for (i = 0; i < numfds; i++) {
1626                     __put_user(fd[i], target_fd + i);
1627                 }
1628                 break;
1629             }
1630             case SO_TIMESTAMP:
1631             {
1632                 struct timeval *tv = (struct timeval *)data;
1633                 struct target_timeval *target_tv =
1634                     (struct target_timeval *)target_data;
1635 
1636                 if (len != sizeof(struct timeval) ||
1637                     tgt_len != sizeof(struct target_timeval)) {
1638                     goto unimplemented;
1639                 }
1640 
1641                 /* copy struct timeval to target */
1642                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1643                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1644                 break;
1645             }
1646             case SCM_CREDENTIALS:
1647             {
1648                 struct ucred *cred = (struct ucred *)data;
1649                 struct target_ucred *target_cred =
1650                     (struct target_ucred *)target_data;
1651 
1652                 __put_user(cred->pid, &target_cred->pid);
1653                 __put_user(cred->uid, &target_cred->uid);
1654                 __put_user(cred->gid, &target_cred->gid);
1655                 break;
1656             }
1657             default:
1658                 goto unimplemented;
1659             }
1660             break;
1661 
1662         case SOL_IP:
1663             switch (cmsg->cmsg_type) {
1664             case IP_TTL:
1665             {
1666                 uint32_t *v = (uint32_t *)data;
1667                 uint32_t *t_int = (uint32_t *)target_data;
1668 
1669                 if (len != sizeof(uint32_t) ||
1670                     tgt_len != sizeof(uint32_t)) {
1671                     goto unimplemented;
1672                 }
1673                 __put_user(*v, t_int);
1674                 break;
1675             }
1676             case IP_RECVERR:
1677             {
1678                 struct errhdr_t {
1679                    struct sock_extended_err ee;
1680                    struct sockaddr_in offender;
1681                 };
1682                 struct errhdr_t *errh = (struct errhdr_t *)data;
1683                 struct errhdr_t *target_errh =
1684                     (struct errhdr_t *)target_data;
1685 
1686                 if (len != sizeof(struct errhdr_t) ||
1687                     tgt_len != sizeof(struct errhdr_t)) {
1688                     goto unimplemented;
1689                 }
1690                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1691                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1692                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1693                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1694                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1695                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1696                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1697                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1698                     (void *) &errh->offender, sizeof(errh->offender));
1699                 break;
1700             }
1701             default:
1702                 goto unimplemented;
1703             }
1704             break;
1705 
1706         case SOL_IPV6:
1707             switch (cmsg->cmsg_type) {
1708             case IPV6_HOPLIMIT:
1709             {
1710                 uint32_t *v = (uint32_t *)data;
1711                 uint32_t *t_int = (uint32_t *)target_data;
1712 
1713                 if (len != sizeof(uint32_t) ||
1714                     tgt_len != sizeof(uint32_t)) {
1715                     goto unimplemented;
1716                 }
1717                 __put_user(*v, t_int);
1718                 break;
1719             }
1720             case IPV6_RECVERR:
1721             {
1722                 struct errhdr6_t {
1723                    struct sock_extended_err ee;
1724                    struct sockaddr_in6 offender;
1725                 };
1726                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1727                 struct errhdr6_t *target_errh =
1728                     (struct errhdr6_t *)target_data;
1729 
1730                 if (len != sizeof(struct errhdr6_t) ||
1731                     tgt_len != sizeof(struct errhdr6_t)) {
1732                     goto unimplemented;
1733                 }
1734                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1735                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1736                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1737                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1738                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1739                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1740                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1741                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1742                     (void *) &errh->offender, sizeof(errh->offender));
1743                 break;
1744             }
1745             default:
1746                 goto unimplemented;
1747             }
1748             break;
1749 
1750         default:
1751         unimplemented:
1752             gemu_log("Unsupported ancillary data: %d/%d\n",
1753                                         cmsg->cmsg_level, cmsg->cmsg_type);
1754             memcpy(target_data, data, MIN(len, tgt_len));
1755             if (tgt_len > len) {
1756                 memset(target_data + len, 0, tgt_len - len);
1757             }
1758         }
1759 
1760         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1761         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1762         if (msg_controllen < tgt_space) {
1763             tgt_space = msg_controllen;
1764         }
1765         msg_controllen -= tgt_space;
1766         space += tgt_space;
1767         cmsg = CMSG_NXTHDR(msgh, cmsg);
1768         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1769                                          target_cmsg_start);
1770     }
1771     unlock_user(target_cmsg, target_cmsg_addr, space);
1772  the_end:
1773     target_msgh->msg_controllen = tswapal(space);
1774     return 0;
1775 }
1776 
1777 /* do_setsockopt() Must return target values and target errnos. */
1778 static abi_long do_setsockopt(int sockfd, int level, int optname,
1779                               abi_ulong optval_addr, socklen_t optlen)
1780 {
1781     abi_long ret;
1782     int val;
1783     struct ip_mreqn *ip_mreq;
1784     struct ip_mreq_source *ip_mreq_source;
1785 
1786     switch(level) {
1787     case SOL_TCP:
1788         /* TCP options all take an 'int' value.  */
1789         if (optlen < sizeof(uint32_t))
1790             return -TARGET_EINVAL;
1791 
1792         if (get_user_u32(val, optval_addr))
1793             return -TARGET_EFAULT;
1794         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1795         break;
1796     case SOL_IP:
1797         switch(optname) {
1798         case IP_TOS:
1799         case IP_TTL:
1800         case IP_HDRINCL:
1801         case IP_ROUTER_ALERT:
1802         case IP_RECVOPTS:
1803         case IP_RETOPTS:
1804         case IP_PKTINFO:
1805         case IP_MTU_DISCOVER:
1806         case IP_RECVERR:
1807         case IP_RECVTTL:
1808         case IP_RECVTOS:
1809 #ifdef IP_FREEBIND
1810         case IP_FREEBIND:
1811 #endif
1812         case IP_MULTICAST_TTL:
1813         case IP_MULTICAST_LOOP:
1814             val = 0;
1815             if (optlen >= sizeof(uint32_t)) {
1816                 if (get_user_u32(val, optval_addr))
1817                     return -TARGET_EFAULT;
1818             } else if (optlen >= 1) {
1819                 if (get_user_u8(val, optval_addr))
1820                     return -TARGET_EFAULT;
1821             }
1822             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1823             break;
1824         case IP_ADD_MEMBERSHIP:
1825         case IP_DROP_MEMBERSHIP:
1826             if (optlen < sizeof (struct target_ip_mreq) ||
1827                 optlen > sizeof (struct target_ip_mreqn))
1828                 return -TARGET_EINVAL;
1829 
1830             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1831             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1832             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1833             break;
1834 
1835         case IP_BLOCK_SOURCE:
1836         case IP_UNBLOCK_SOURCE:
1837         case IP_ADD_SOURCE_MEMBERSHIP:
1838         case IP_DROP_SOURCE_MEMBERSHIP:
1839             if (optlen != sizeof (struct target_ip_mreq_source))
1840                 return -TARGET_EINVAL;
1841 
1842             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1843             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1844             unlock_user (ip_mreq_source, optval_addr, 0);
1845             break;
1846 
1847         default:
1848             goto unimplemented;
1849         }
1850         break;
1851     case SOL_IPV6:
1852         switch (optname) {
1853         case IPV6_MTU_DISCOVER:
1854         case IPV6_MTU:
1855         case IPV6_V6ONLY:
1856         case IPV6_RECVPKTINFO:
1857         case IPV6_UNICAST_HOPS:
1858         case IPV6_MULTICAST_HOPS:
1859         case IPV6_MULTICAST_LOOP:
1860         case IPV6_RECVERR:
1861         case IPV6_RECVHOPLIMIT:
1862         case IPV6_2292HOPLIMIT:
1863         case IPV6_CHECKSUM:
1864         case IPV6_ADDRFORM:
1865         case IPV6_2292PKTINFO:
1866         case IPV6_RECVTCLASS:
1867         case IPV6_RECVRTHDR:
1868         case IPV6_2292RTHDR:
1869         case IPV6_RECVHOPOPTS:
1870         case IPV6_2292HOPOPTS:
1871         case IPV6_RECVDSTOPTS:
1872         case IPV6_2292DSTOPTS:
1873         case IPV6_TCLASS:
1874 #ifdef IPV6_RECVPATHMTU
1875         case IPV6_RECVPATHMTU:
1876 #endif
1877 #ifdef IPV6_TRANSPARENT
1878         case IPV6_TRANSPARENT:
1879 #endif
1880 #ifdef IPV6_FREEBIND
1881         case IPV6_FREEBIND:
1882 #endif
1883 #ifdef IPV6_RECVORIGDSTADDR
1884         case IPV6_RECVORIGDSTADDR:
1885 #endif
1886             val = 0;
1887             if (optlen < sizeof(uint32_t)) {
1888                 return -TARGET_EINVAL;
1889             }
1890             if (get_user_u32(val, optval_addr)) {
1891                 return -TARGET_EFAULT;
1892             }
1893             ret = get_errno(setsockopt(sockfd, level, optname,
1894                                        &val, sizeof(val)));
1895             break;
1896         case IPV6_PKTINFO:
1897         {
1898             struct in6_pktinfo pki;
1899 
1900             if (optlen < sizeof(pki)) {
1901                 return -TARGET_EINVAL;
1902             }
1903 
1904             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1905                 return -TARGET_EFAULT;
1906             }
1907 
1908             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1909 
1910             ret = get_errno(setsockopt(sockfd, level, optname,
1911                                        &pki, sizeof(pki)));
1912             break;
1913         }
1914         default:
1915             goto unimplemented;
1916         }
1917         break;
1918     case SOL_ICMPV6:
1919         switch (optname) {
1920         case ICMPV6_FILTER:
1921         {
1922             struct icmp6_filter icmp6f;
1923 
1924             if (optlen > sizeof(icmp6f)) {
1925                 optlen = sizeof(icmp6f);
1926             }
1927 
1928             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1929                 return -TARGET_EFAULT;
1930             }
1931 
1932             for (val = 0; val < 8; val++) {
1933                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1934             }
1935 
1936             ret = get_errno(setsockopt(sockfd, level, optname,
1937                                        &icmp6f, optlen));
1938             break;
1939         }
1940         default:
1941             goto unimplemented;
1942         }
1943         break;
1944     case SOL_RAW:
1945         switch (optname) {
1946         case ICMP_FILTER:
1947         case IPV6_CHECKSUM:
1948             /* those take an u32 value */
1949             if (optlen < sizeof(uint32_t)) {
1950                 return -TARGET_EINVAL;
1951             }
1952 
1953             if (get_user_u32(val, optval_addr)) {
1954                 return -TARGET_EFAULT;
1955             }
1956             ret = get_errno(setsockopt(sockfd, level, optname,
1957                                        &val, sizeof(val)));
1958             break;
1959 
1960         default:
1961             goto unimplemented;
1962         }
1963         break;
1964     case TARGET_SOL_SOCKET:
1965         switch (optname) {
1966         case TARGET_SO_RCVTIMEO:
1967         {
1968                 struct timeval tv;
1969 
1970                 optname = SO_RCVTIMEO;
1971 
1972 set_timeout:
1973                 if (optlen != sizeof(struct target_timeval)) {
1974                     return -TARGET_EINVAL;
1975                 }
1976 
1977                 if (copy_from_user_timeval(&tv, optval_addr)) {
1978                     return -TARGET_EFAULT;
1979                 }
1980 
1981                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1982                                 &tv, sizeof(tv)));
1983                 return ret;
1984         }
1985         case TARGET_SO_SNDTIMEO:
1986                 optname = SO_SNDTIMEO;
1987                 goto set_timeout;
1988         case TARGET_SO_ATTACH_FILTER:
1989         {
1990                 struct target_sock_fprog *tfprog;
1991                 struct target_sock_filter *tfilter;
1992                 struct sock_fprog fprog;
1993                 struct sock_filter *filter;
1994                 int i;
1995 
1996                 if (optlen != sizeof(*tfprog)) {
1997                     return -TARGET_EINVAL;
1998                 }
1999                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2000                     return -TARGET_EFAULT;
2001                 }
2002                 if (!lock_user_struct(VERIFY_READ, tfilter,
2003                                       tswapal(tfprog->filter), 0)) {
2004                     unlock_user_struct(tfprog, optval_addr, 1);
2005                     return -TARGET_EFAULT;
2006                 }
2007 
2008                 fprog.len = tswap16(tfprog->len);
2009                 filter = g_try_new(struct sock_filter, fprog.len);
2010                 if (filter == NULL) {
2011                     unlock_user_struct(tfilter, tfprog->filter, 1);
2012                     unlock_user_struct(tfprog, optval_addr, 1);
2013                     return -TARGET_ENOMEM;
2014                 }
2015                 for (i = 0; i < fprog.len; i++) {
2016                     filter[i].code = tswap16(tfilter[i].code);
2017                     filter[i].jt = tfilter[i].jt;
2018                     filter[i].jf = tfilter[i].jf;
2019                     filter[i].k = tswap32(tfilter[i].k);
2020                 }
2021                 fprog.filter = filter;
2022 
2023                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2024                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2025                 g_free(filter);
2026 
2027                 unlock_user_struct(tfilter, tfprog->filter, 1);
2028                 unlock_user_struct(tfprog, optval_addr, 1);
2029                 return ret;
2030         }
2031 	case TARGET_SO_BINDTODEVICE:
2032 	{
2033 		char *dev_ifname, *addr_ifname;
2034 
2035 		if (optlen > IFNAMSIZ - 1) {
2036 		    optlen = IFNAMSIZ - 1;
2037 		}
2038 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2039 		if (!dev_ifname) {
2040 		    return -TARGET_EFAULT;
2041 		}
2042 		optname = SO_BINDTODEVICE;
2043 		addr_ifname = alloca(IFNAMSIZ);
2044 		memcpy(addr_ifname, dev_ifname, optlen);
2045 		addr_ifname[optlen] = 0;
2046 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2047                                            addr_ifname, optlen));
2048 		unlock_user (dev_ifname, optval_addr, 0);
2049 		return ret;
2050 	}
2051         case TARGET_SO_LINGER:
2052         {
2053                 struct linger lg;
2054                 struct target_linger *tlg;
2055 
2056                 if (optlen != sizeof(struct target_linger)) {
2057                     return -TARGET_EINVAL;
2058                 }
2059                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2060                     return -TARGET_EFAULT;
2061                 }
2062                 __get_user(lg.l_onoff, &tlg->l_onoff);
2063                 __get_user(lg.l_linger, &tlg->l_linger);
2064                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2065                                 &lg, sizeof(lg)));
2066                 unlock_user_struct(tlg, optval_addr, 0);
2067                 return ret;
2068         }
2069             /* Options with 'int' argument.  */
2070         case TARGET_SO_DEBUG:
2071 		optname = SO_DEBUG;
2072 		break;
2073         case TARGET_SO_REUSEADDR:
2074 		optname = SO_REUSEADDR;
2075 		break;
2076 #ifdef SO_REUSEPORT
2077         case TARGET_SO_REUSEPORT:
2078                 optname = SO_REUSEPORT;
2079                 break;
2080 #endif
2081         case TARGET_SO_TYPE:
2082 		optname = SO_TYPE;
2083 		break;
2084         case TARGET_SO_ERROR:
2085 		optname = SO_ERROR;
2086 		break;
2087         case TARGET_SO_DONTROUTE:
2088 		optname = SO_DONTROUTE;
2089 		break;
2090         case TARGET_SO_BROADCAST:
2091 		optname = SO_BROADCAST;
2092 		break;
2093         case TARGET_SO_SNDBUF:
2094 		optname = SO_SNDBUF;
2095 		break;
2096         case TARGET_SO_SNDBUFFORCE:
2097                 optname = SO_SNDBUFFORCE;
2098                 break;
2099         case TARGET_SO_RCVBUF:
2100 		optname = SO_RCVBUF;
2101 		break;
2102         case TARGET_SO_RCVBUFFORCE:
2103                 optname = SO_RCVBUFFORCE;
2104                 break;
2105         case TARGET_SO_KEEPALIVE:
2106 		optname = SO_KEEPALIVE;
2107 		break;
2108         case TARGET_SO_OOBINLINE:
2109 		optname = SO_OOBINLINE;
2110 		break;
2111         case TARGET_SO_NO_CHECK:
2112 		optname = SO_NO_CHECK;
2113 		break;
2114         case TARGET_SO_PRIORITY:
2115 		optname = SO_PRIORITY;
2116 		break;
2117 #ifdef SO_BSDCOMPAT
2118         case TARGET_SO_BSDCOMPAT:
2119 		optname = SO_BSDCOMPAT;
2120 		break;
2121 #endif
2122         case TARGET_SO_PASSCRED:
2123 		optname = SO_PASSCRED;
2124 		break;
2125         case TARGET_SO_PASSSEC:
2126                 optname = SO_PASSSEC;
2127                 break;
2128         case TARGET_SO_TIMESTAMP:
2129 		optname = SO_TIMESTAMP;
2130 		break;
2131         case TARGET_SO_RCVLOWAT:
2132 		optname = SO_RCVLOWAT;
2133 		break;
2134         default:
2135             goto unimplemented;
2136         }
2137 	if (optlen < sizeof(uint32_t))
2138             return -TARGET_EINVAL;
2139 
2140 	if (get_user_u32(val, optval_addr))
2141             return -TARGET_EFAULT;
2142 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2143         break;
2144     default:
2145     unimplemented:
2146         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2147         ret = -TARGET_ENOPROTOOPT;
2148     }
2149     return ret;
2150 }
2151 
2152 /* do_getsockopt() Must return target values and target errnos. */
2153 static abi_long do_getsockopt(int sockfd, int level, int optname,
2154                               abi_ulong optval_addr, abi_ulong optlen)
2155 {
2156     abi_long ret;
2157     int len, val;
2158     socklen_t lv;
2159 
2160     switch(level) {
2161     case TARGET_SOL_SOCKET:
2162         level = SOL_SOCKET;
2163         switch (optname) {
2164         /* These don't just return a single integer */
2165         case TARGET_SO_RCVTIMEO:
2166         case TARGET_SO_SNDTIMEO:
2167         case TARGET_SO_PEERNAME:
2168             goto unimplemented;
2169         case TARGET_SO_PEERCRED: {
2170             struct ucred cr;
2171             socklen_t crlen;
2172             struct target_ucred *tcr;
2173 
2174             if (get_user_u32(len, optlen)) {
2175                 return -TARGET_EFAULT;
2176             }
2177             if (len < 0) {
2178                 return -TARGET_EINVAL;
2179             }
2180 
2181             crlen = sizeof(cr);
2182             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2183                                        &cr, &crlen));
2184             if (ret < 0) {
2185                 return ret;
2186             }
2187             if (len > crlen) {
2188                 len = crlen;
2189             }
2190             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2191                 return -TARGET_EFAULT;
2192             }
2193             __put_user(cr.pid, &tcr->pid);
2194             __put_user(cr.uid, &tcr->uid);
2195             __put_user(cr.gid, &tcr->gid);
2196             unlock_user_struct(tcr, optval_addr, 1);
2197             if (put_user_u32(len, optlen)) {
2198                 return -TARGET_EFAULT;
2199             }
2200             break;
2201         }
2202         case TARGET_SO_LINGER:
2203         {
2204             struct linger lg;
2205             socklen_t lglen;
2206             struct target_linger *tlg;
2207 
2208             if (get_user_u32(len, optlen)) {
2209                 return -TARGET_EFAULT;
2210             }
2211             if (len < 0) {
2212                 return -TARGET_EINVAL;
2213             }
2214 
2215             lglen = sizeof(lg);
2216             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2217                                        &lg, &lglen));
2218             if (ret < 0) {
2219                 return ret;
2220             }
2221             if (len > lglen) {
2222                 len = lglen;
2223             }
2224             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2225                 return -TARGET_EFAULT;
2226             }
2227             __put_user(lg.l_onoff, &tlg->l_onoff);
2228             __put_user(lg.l_linger, &tlg->l_linger);
2229             unlock_user_struct(tlg, optval_addr, 1);
2230             if (put_user_u32(len, optlen)) {
2231                 return -TARGET_EFAULT;
2232             }
2233             break;
2234         }
2235         /* Options with 'int' argument.  */
2236         case TARGET_SO_DEBUG:
2237             optname = SO_DEBUG;
2238             goto int_case;
2239         case TARGET_SO_REUSEADDR:
2240             optname = SO_REUSEADDR;
2241             goto int_case;
2242 #ifdef SO_REUSEPORT
2243         case TARGET_SO_REUSEPORT:
2244             optname = SO_REUSEPORT;
2245             goto int_case;
2246 #endif
2247         case TARGET_SO_TYPE:
2248             optname = SO_TYPE;
2249             goto int_case;
2250         case TARGET_SO_ERROR:
2251             optname = SO_ERROR;
2252             goto int_case;
2253         case TARGET_SO_DONTROUTE:
2254             optname = SO_DONTROUTE;
2255             goto int_case;
2256         case TARGET_SO_BROADCAST:
2257             optname = SO_BROADCAST;
2258             goto int_case;
2259         case TARGET_SO_SNDBUF:
2260             optname = SO_SNDBUF;
2261             goto int_case;
2262         case TARGET_SO_RCVBUF:
2263             optname = SO_RCVBUF;
2264             goto int_case;
2265         case TARGET_SO_KEEPALIVE:
2266             optname = SO_KEEPALIVE;
2267             goto int_case;
2268         case TARGET_SO_OOBINLINE:
2269             optname = SO_OOBINLINE;
2270             goto int_case;
2271         case TARGET_SO_NO_CHECK:
2272             optname = SO_NO_CHECK;
2273             goto int_case;
2274         case TARGET_SO_PRIORITY:
2275             optname = SO_PRIORITY;
2276             goto int_case;
2277 #ifdef SO_BSDCOMPAT
2278         case TARGET_SO_BSDCOMPAT:
2279             optname = SO_BSDCOMPAT;
2280             goto int_case;
2281 #endif
2282         case TARGET_SO_PASSCRED:
2283             optname = SO_PASSCRED;
2284             goto int_case;
2285         case TARGET_SO_TIMESTAMP:
2286             optname = SO_TIMESTAMP;
2287             goto int_case;
2288         case TARGET_SO_RCVLOWAT:
2289             optname = SO_RCVLOWAT;
2290             goto int_case;
2291         case TARGET_SO_ACCEPTCONN:
2292             optname = SO_ACCEPTCONN;
2293             goto int_case;
2294         default:
2295             goto int_case;
2296         }
2297         break;
2298     case SOL_TCP:
2299         /* TCP options all take an 'int' value.  */
2300     int_case:
2301         if (get_user_u32(len, optlen))
2302             return -TARGET_EFAULT;
2303         if (len < 0)
2304             return -TARGET_EINVAL;
2305         lv = sizeof(lv);
2306         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2307         if (ret < 0)
2308             return ret;
2309         if (optname == SO_TYPE) {
2310             val = host_to_target_sock_type(val);
2311         }
2312         if (len > lv)
2313             len = lv;
2314         if (len == 4) {
2315             if (put_user_u32(val, optval_addr))
2316                 return -TARGET_EFAULT;
2317         } else {
2318             if (put_user_u8(val, optval_addr))
2319                 return -TARGET_EFAULT;
2320         }
2321         if (put_user_u32(len, optlen))
2322             return -TARGET_EFAULT;
2323         break;
2324     case SOL_IP:
2325         switch(optname) {
2326         case IP_TOS:
2327         case IP_TTL:
2328         case IP_HDRINCL:
2329         case IP_ROUTER_ALERT:
2330         case IP_RECVOPTS:
2331         case IP_RETOPTS:
2332         case IP_PKTINFO:
2333         case IP_MTU_DISCOVER:
2334         case IP_RECVERR:
2335         case IP_RECVTOS:
2336 #ifdef IP_FREEBIND
2337         case IP_FREEBIND:
2338 #endif
2339         case IP_MULTICAST_TTL:
2340         case IP_MULTICAST_LOOP:
2341             if (get_user_u32(len, optlen))
2342                 return -TARGET_EFAULT;
2343             if (len < 0)
2344                 return -TARGET_EINVAL;
2345             lv = sizeof(lv);
2346             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2347             if (ret < 0)
2348                 return ret;
2349             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2350                 len = 1;
2351                 if (put_user_u32(len, optlen)
2352                     || put_user_u8(val, optval_addr))
2353                     return -TARGET_EFAULT;
2354             } else {
2355                 if (len > sizeof(int))
2356                     len = sizeof(int);
2357                 if (put_user_u32(len, optlen)
2358                     || put_user_u32(val, optval_addr))
2359                     return -TARGET_EFAULT;
2360             }
2361             break;
2362         default:
2363             ret = -TARGET_ENOPROTOOPT;
2364             break;
2365         }
2366         break;
2367     case SOL_IPV6:
2368         switch (optname) {
2369         case IPV6_MTU_DISCOVER:
2370         case IPV6_MTU:
2371         case IPV6_V6ONLY:
2372         case IPV6_RECVPKTINFO:
2373         case IPV6_UNICAST_HOPS:
2374         case IPV6_MULTICAST_HOPS:
2375         case IPV6_MULTICAST_LOOP:
2376         case IPV6_RECVERR:
2377         case IPV6_RECVHOPLIMIT:
2378         case IPV6_2292HOPLIMIT:
2379         case IPV6_CHECKSUM:
2380         case IPV6_ADDRFORM:
2381         case IPV6_2292PKTINFO:
2382         case IPV6_RECVTCLASS:
2383         case IPV6_RECVRTHDR:
2384         case IPV6_2292RTHDR:
2385         case IPV6_RECVHOPOPTS:
2386         case IPV6_2292HOPOPTS:
2387         case IPV6_RECVDSTOPTS:
2388         case IPV6_2292DSTOPTS:
2389         case IPV6_TCLASS:
2390 #ifdef IPV6_RECVPATHMTU
2391         case IPV6_RECVPATHMTU:
2392 #endif
2393 #ifdef IPV6_TRANSPARENT
2394         case IPV6_TRANSPARENT:
2395 #endif
2396 #ifdef IPV6_FREEBIND
2397         case IPV6_FREEBIND:
2398 #endif
2399 #ifdef IPV6_RECVORIGDSTADDR
2400         case IPV6_RECVORIGDSTADDR:
2401 #endif
2402             if (get_user_u32(len, optlen))
2403                 return -TARGET_EFAULT;
2404             if (len < 0)
2405                 return -TARGET_EINVAL;
2406             lv = sizeof(lv);
2407             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2408             if (ret < 0)
2409                 return ret;
2410             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2411                 len = 1;
2412                 if (put_user_u32(len, optlen)
2413                     || put_user_u8(val, optval_addr))
2414                     return -TARGET_EFAULT;
2415             } else {
2416                 if (len > sizeof(int))
2417                     len = sizeof(int);
2418                 if (put_user_u32(len, optlen)
2419                     || put_user_u32(val, optval_addr))
2420                     return -TARGET_EFAULT;
2421             }
2422             break;
2423         default:
2424             ret = -TARGET_ENOPROTOOPT;
2425             break;
2426         }
2427         break;
2428     default:
2429     unimplemented:
2430         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2431                  level, optname);
2432         ret = -TARGET_EOPNOTSUPP;
2433         break;
2434     }
2435     return ret;
2436 }
2437 
2438 /* Convert target low/high pair representing file offset into the host
2439  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2440  * as the kernel doesn't handle them either.
2441  */
2442 static void target_to_host_low_high(abi_ulong tlow,
2443                                     abi_ulong thigh,
2444                                     unsigned long *hlow,
2445                                     unsigned long *hhigh)
2446 {
2447     uint64_t off = tlow |
2448         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2449         TARGET_LONG_BITS / 2;
2450 
2451     *hlow = off;
2452     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2453 }
2454 
2455 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2456                                 abi_ulong count, int copy)
2457 {
2458     struct target_iovec *target_vec;
2459     struct iovec *vec;
2460     abi_ulong total_len, max_len;
2461     int i;
2462     int err = 0;
2463     bool bad_address = false;
2464 
2465     if (count == 0) {
2466         errno = 0;
2467         return NULL;
2468     }
2469     if (count > IOV_MAX) {
2470         errno = EINVAL;
2471         return NULL;
2472     }
2473 
2474     vec = g_try_new0(struct iovec, count);
2475     if (vec == NULL) {
2476         errno = ENOMEM;
2477         return NULL;
2478     }
2479 
2480     target_vec = lock_user(VERIFY_READ, target_addr,
2481                            count * sizeof(struct target_iovec), 1);
2482     if (target_vec == NULL) {
2483         err = EFAULT;
2484         goto fail2;
2485     }
2486 
2487     /* ??? If host page size > target page size, this will result in a
2488        value larger than what we can actually support.  */
2489     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2490     total_len = 0;
2491 
2492     for (i = 0; i < count; i++) {
2493         abi_ulong base = tswapal(target_vec[i].iov_base);
2494         abi_long len = tswapal(target_vec[i].iov_len);
2495 
2496         if (len < 0) {
2497             err = EINVAL;
2498             goto fail;
2499         } else if (len == 0) {
2500             /* Zero length pointer is ignored.  */
2501             vec[i].iov_base = 0;
2502         } else {
2503             vec[i].iov_base = lock_user(type, base, len, copy);
2504             /* If the first buffer pointer is bad, this is a fault.  But
2505              * subsequent bad buffers will result in a partial write; this
2506              * is realized by filling the vector with null pointers and
2507              * zero lengths. */
2508             if (!vec[i].iov_base) {
2509                 if (i == 0) {
2510                     err = EFAULT;
2511                     goto fail;
2512                 } else {
2513                     bad_address = true;
2514                 }
2515             }
2516             if (bad_address) {
2517                 len = 0;
2518             }
2519             if (len > max_len - total_len) {
2520                 len = max_len - total_len;
2521             }
2522         }
2523         vec[i].iov_len = len;
2524         total_len += len;
2525     }
2526 
2527     unlock_user(target_vec, target_addr, 0);
2528     return vec;
2529 
2530  fail:
2531     while (--i >= 0) {
2532         if (tswapal(target_vec[i].iov_len) > 0) {
2533             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2534         }
2535     }
2536     unlock_user(target_vec, target_addr, 0);
2537  fail2:
2538     g_free(vec);
2539     errno = err;
2540     return NULL;
2541 }
2542 
2543 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2544                          abi_ulong count, int copy)
2545 {
2546     struct target_iovec *target_vec;
2547     int i;
2548 
2549     target_vec = lock_user(VERIFY_READ, target_addr,
2550                            count * sizeof(struct target_iovec), 1);
2551     if (target_vec) {
2552         for (i = 0; i < count; i++) {
2553             abi_ulong base = tswapal(target_vec[i].iov_base);
2554             abi_long len = tswapal(target_vec[i].iov_len);
2555             if (len < 0) {
2556                 break;
2557             }
2558             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2559         }
2560         unlock_user(target_vec, target_addr, 0);
2561     }
2562 
2563     g_free(vec);
2564 }
2565 
2566 static inline int target_to_host_sock_type(int *type)
2567 {
2568     int host_type = 0;
2569     int target_type = *type;
2570 
2571     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2572     case TARGET_SOCK_DGRAM:
2573         host_type = SOCK_DGRAM;
2574         break;
2575     case TARGET_SOCK_STREAM:
2576         host_type = SOCK_STREAM;
2577         break;
2578     default:
2579         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2580         break;
2581     }
2582     if (target_type & TARGET_SOCK_CLOEXEC) {
2583 #if defined(SOCK_CLOEXEC)
2584         host_type |= SOCK_CLOEXEC;
2585 #else
2586         return -TARGET_EINVAL;
2587 #endif
2588     }
2589     if (target_type & TARGET_SOCK_NONBLOCK) {
2590 #if defined(SOCK_NONBLOCK)
2591         host_type |= SOCK_NONBLOCK;
2592 #elif !defined(O_NONBLOCK)
2593         return -TARGET_EINVAL;
2594 #endif
2595     }
2596     *type = host_type;
2597     return 0;
2598 }
2599 
2600 /* Try to emulate socket type flags after socket creation.  */
2601 static int sock_flags_fixup(int fd, int target_type)
2602 {
2603 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2604     if (target_type & TARGET_SOCK_NONBLOCK) {
2605         int flags = fcntl(fd, F_GETFL);
2606         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2607             close(fd);
2608             return -TARGET_EINVAL;
2609         }
2610     }
2611 #endif
2612     return fd;
2613 }
2614 
2615 /* do_socket() Must return target values and target errnos. */
2616 static abi_long do_socket(int domain, int type, int protocol)
2617 {
2618     int target_type = type;
2619     int ret;
2620 
2621     ret = target_to_host_sock_type(&type);
2622     if (ret) {
2623         return ret;
2624     }
2625 
2626     if (domain == PF_NETLINK && !(
2627 #ifdef CONFIG_RTNETLINK
2628          protocol == NETLINK_ROUTE ||
2629 #endif
2630          protocol == NETLINK_KOBJECT_UEVENT ||
2631          protocol == NETLINK_AUDIT)) {
2632         return -EPFNOSUPPORT;
2633     }
2634 
2635     if (domain == AF_PACKET ||
2636         (domain == AF_INET && type == SOCK_PACKET)) {
2637         protocol = tswap16(protocol);
2638     }
2639 
2640     ret = get_errno(socket(domain, type, protocol));
2641     if (ret >= 0) {
2642         ret = sock_flags_fixup(ret, target_type);
2643         if (type == SOCK_PACKET) {
2644             /* Manage an obsolete case :
2645              * if socket type is SOCK_PACKET, bind by name
2646              */
2647             fd_trans_register(ret, &target_packet_trans);
2648         } else if (domain == PF_NETLINK) {
2649             switch (protocol) {
2650 #ifdef CONFIG_RTNETLINK
2651             case NETLINK_ROUTE:
2652                 fd_trans_register(ret, &target_netlink_route_trans);
2653                 break;
2654 #endif
2655             case NETLINK_KOBJECT_UEVENT:
2656                 /* nothing to do: messages are strings */
2657                 break;
2658             case NETLINK_AUDIT:
2659                 fd_trans_register(ret, &target_netlink_audit_trans);
2660                 break;
2661             default:
2662                 g_assert_not_reached();
2663             }
2664         }
2665     }
2666     return ret;
2667 }
2668 
2669 /* do_bind() Must return target values and target errnos. */
2670 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2671                         socklen_t addrlen)
2672 {
2673     void *addr;
2674     abi_long ret;
2675 
2676     if ((int)addrlen < 0) {
2677         return -TARGET_EINVAL;
2678     }
2679 
2680     addr = alloca(addrlen+1);
2681 
2682     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2683     if (ret)
2684         return ret;
2685 
2686     return get_errno(bind(sockfd, addr, addrlen));
2687 }
2688 
2689 /* do_connect() Must return target values and target errnos. */
2690 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2691                            socklen_t addrlen)
2692 {
2693     void *addr;
2694     abi_long ret;
2695 
2696     if ((int)addrlen < 0) {
2697         return -TARGET_EINVAL;
2698     }
2699 
2700     addr = alloca(addrlen+1);
2701 
2702     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2703     if (ret)
2704         return ret;
2705 
2706     return get_errno(safe_connect(sockfd, addr, addrlen));
2707 }
2708 
2709 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2710 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2711                                       int flags, int send)
2712 {
2713     abi_long ret, len;
2714     struct msghdr msg;
2715     abi_ulong count;
2716     struct iovec *vec;
2717     abi_ulong target_vec;
2718 
2719     if (msgp->msg_name) {
2720         msg.msg_namelen = tswap32(msgp->msg_namelen);
2721         msg.msg_name = alloca(msg.msg_namelen+1);
2722         ret = target_to_host_sockaddr(fd, msg.msg_name,
2723                                       tswapal(msgp->msg_name),
2724                                       msg.msg_namelen);
2725         if (ret == -TARGET_EFAULT) {
2726             /* For connected sockets msg_name and msg_namelen must
2727              * be ignored, so returning EFAULT immediately is wrong.
2728              * Instead, pass a bad msg_name to the host kernel, and
2729              * let it decide whether to return EFAULT or not.
2730              */
2731             msg.msg_name = (void *)-1;
2732         } else if (ret) {
2733             goto out2;
2734         }
2735     } else {
2736         msg.msg_name = NULL;
2737         msg.msg_namelen = 0;
2738     }
2739     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2740     msg.msg_control = alloca(msg.msg_controllen);
2741     memset(msg.msg_control, 0, msg.msg_controllen);
2742 
2743     msg.msg_flags = tswap32(msgp->msg_flags);
2744 
2745     count = tswapal(msgp->msg_iovlen);
2746     target_vec = tswapal(msgp->msg_iov);
2747 
2748     if (count > IOV_MAX) {
2749         /* sendrcvmsg returns a different errno for this condition than
2750          * readv/writev, so we must catch it here before lock_iovec() does.
2751          */
2752         ret = -TARGET_EMSGSIZE;
2753         goto out2;
2754     }
2755 
2756     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2757                      target_vec, count, send);
2758     if (vec == NULL) {
2759         ret = -host_to_target_errno(errno);
2760         goto out2;
2761     }
2762     msg.msg_iovlen = count;
2763     msg.msg_iov = vec;
2764 
2765     if (send) {
2766         if (fd_trans_target_to_host_data(fd)) {
2767             void *host_msg;
2768 
2769             host_msg = g_malloc(msg.msg_iov->iov_len);
2770             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2771             ret = fd_trans_target_to_host_data(fd)(host_msg,
2772                                                    msg.msg_iov->iov_len);
2773             if (ret >= 0) {
2774                 msg.msg_iov->iov_base = host_msg;
2775                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2776             }
2777             g_free(host_msg);
2778         } else {
2779             ret = target_to_host_cmsg(&msg, msgp);
2780             if (ret == 0) {
2781                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2782             }
2783         }
2784     } else {
2785         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2786         if (!is_error(ret)) {
2787             len = ret;
2788             if (fd_trans_host_to_target_data(fd)) {
2789                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2790                                                MIN(msg.msg_iov->iov_len, len));
2791             } else {
2792                 ret = host_to_target_cmsg(msgp, &msg);
2793             }
2794             if (!is_error(ret)) {
2795                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2796                 msgp->msg_flags = tswap32(msg.msg_flags);
2797                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2798                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2799                                     msg.msg_name, msg.msg_namelen);
2800                     if (ret) {
2801                         goto out;
2802                     }
2803                 }
2804 
2805                 ret = len;
2806             }
2807         }
2808     }
2809 
2810 out:
2811     unlock_iovec(vec, target_vec, count, !send);
2812 out2:
2813     return ret;
2814 }
2815 
2816 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2817                                int flags, int send)
2818 {
2819     abi_long ret;
2820     struct target_msghdr *msgp;
2821 
2822     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2823                           msgp,
2824                           target_msg,
2825                           send ? 1 : 0)) {
2826         return -TARGET_EFAULT;
2827     }
2828     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2829     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2830     return ret;
2831 }
2832 
2833 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2834  * so it might not have this *mmsg-specific flag either.
2835  */
2836 #ifndef MSG_WAITFORONE
2837 #define MSG_WAITFORONE 0x10000
2838 #endif
2839 
2840 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2841                                 unsigned int vlen, unsigned int flags,
2842                                 int send)
2843 {
2844     struct target_mmsghdr *mmsgp;
2845     abi_long ret = 0;
2846     int i;
2847 
2848     if (vlen > UIO_MAXIOV) {
2849         vlen = UIO_MAXIOV;
2850     }
2851 
2852     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2853     if (!mmsgp) {
2854         return -TARGET_EFAULT;
2855     }
2856 
2857     for (i = 0; i < vlen; i++) {
2858         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2859         if (is_error(ret)) {
2860             break;
2861         }
2862         mmsgp[i].msg_len = tswap32(ret);
2863         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2864         if (flags & MSG_WAITFORONE) {
2865             flags |= MSG_DONTWAIT;
2866         }
2867     }
2868 
2869     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2870 
2871     /* Return number of datagrams sent if we sent any at all;
2872      * otherwise return the error.
2873      */
2874     if (i) {
2875         return i;
2876     }
2877     return ret;
2878 }
2879 
2880 /* do_accept4() Must return target values and target errnos. */
2881 static abi_long do_accept4(int fd, abi_ulong target_addr,
2882                            abi_ulong target_addrlen_addr, int flags)
2883 {
2884     socklen_t addrlen, ret_addrlen;
2885     void *addr;
2886     abi_long ret;
2887     int host_flags;
2888 
2889     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2890 
2891     if (target_addr == 0) {
2892         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2893     }
2894 
2895     /* linux returns EINVAL if addrlen pointer is invalid */
2896     if (get_user_u32(addrlen, target_addrlen_addr))
2897         return -TARGET_EINVAL;
2898 
2899     if ((int)addrlen < 0) {
2900         return -TARGET_EINVAL;
2901     }
2902 
2903     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2904         return -TARGET_EINVAL;
2905 
2906     addr = alloca(addrlen);
2907 
2908     ret_addrlen = addrlen;
2909     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2910     if (!is_error(ret)) {
2911         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2912         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2913             ret = -TARGET_EFAULT;
2914         }
2915     }
2916     return ret;
2917 }
2918 
2919 /* do_getpeername() Must return target values and target errnos. */
2920 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2921                                abi_ulong target_addrlen_addr)
2922 {
2923     socklen_t addrlen, ret_addrlen;
2924     void *addr;
2925     abi_long ret;
2926 
2927     if (get_user_u32(addrlen, target_addrlen_addr))
2928         return -TARGET_EFAULT;
2929 
2930     if ((int)addrlen < 0) {
2931         return -TARGET_EINVAL;
2932     }
2933 
2934     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2935         return -TARGET_EFAULT;
2936 
2937     addr = alloca(addrlen);
2938 
2939     ret_addrlen = addrlen;
2940     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2941     if (!is_error(ret)) {
2942         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2943         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2944             ret = -TARGET_EFAULT;
2945         }
2946     }
2947     return ret;
2948 }
2949 
2950 /* do_getsockname() Must return target values and target errnos. */
2951 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2952                                abi_ulong target_addrlen_addr)
2953 {
2954     socklen_t addrlen, ret_addrlen;
2955     void *addr;
2956     abi_long ret;
2957 
2958     if (get_user_u32(addrlen, target_addrlen_addr))
2959         return -TARGET_EFAULT;
2960 
2961     if ((int)addrlen < 0) {
2962         return -TARGET_EINVAL;
2963     }
2964 
2965     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2966         return -TARGET_EFAULT;
2967 
2968     addr = alloca(addrlen);
2969 
2970     ret_addrlen = addrlen;
2971     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2972     if (!is_error(ret)) {
2973         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2974         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2975             ret = -TARGET_EFAULT;
2976         }
2977     }
2978     return ret;
2979 }
2980 
2981 /* do_socketpair() Must return target values and target errnos. */
2982 static abi_long do_socketpair(int domain, int type, int protocol,
2983                               abi_ulong target_tab_addr)
2984 {
2985     int tab[2];
2986     abi_long ret;
2987 
2988     target_to_host_sock_type(&type);
2989 
2990     ret = get_errno(socketpair(domain, type, protocol, tab));
2991     if (!is_error(ret)) {
2992         if (put_user_s32(tab[0], target_tab_addr)
2993             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2994             ret = -TARGET_EFAULT;
2995     }
2996     return ret;
2997 }
2998 
2999 /* do_sendto() Must return target values and target errnos. */
3000 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3001                           abi_ulong target_addr, socklen_t addrlen)
3002 {
3003     void *addr;
3004     void *host_msg;
3005     void *copy_msg = NULL;
3006     abi_long ret;
3007 
3008     if ((int)addrlen < 0) {
3009         return -TARGET_EINVAL;
3010     }
3011 
3012     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3013     if (!host_msg)
3014         return -TARGET_EFAULT;
3015     if (fd_trans_target_to_host_data(fd)) {
3016         copy_msg = host_msg;
3017         host_msg = g_malloc(len);
3018         memcpy(host_msg, copy_msg, len);
3019         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3020         if (ret < 0) {
3021             goto fail;
3022         }
3023     }
3024     if (target_addr) {
3025         addr = alloca(addrlen+1);
3026         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3027         if (ret) {
3028             goto fail;
3029         }
3030         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3031     } else {
3032         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3033     }
3034 fail:
3035     if (copy_msg) {
3036         g_free(host_msg);
3037         host_msg = copy_msg;
3038     }
3039     unlock_user(host_msg, msg, 0);
3040     return ret;
3041 }
3042 
3043 /* do_recvfrom() Must return target values and target errnos. */
3044 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3045                             abi_ulong target_addr,
3046                             abi_ulong target_addrlen)
3047 {
3048     socklen_t addrlen, ret_addrlen;
3049     void *addr;
3050     void *host_msg;
3051     abi_long ret;
3052 
3053     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3054     if (!host_msg)
3055         return -TARGET_EFAULT;
3056     if (target_addr) {
3057         if (get_user_u32(addrlen, target_addrlen)) {
3058             ret = -TARGET_EFAULT;
3059             goto fail;
3060         }
3061         if ((int)addrlen < 0) {
3062             ret = -TARGET_EINVAL;
3063             goto fail;
3064         }
3065         addr = alloca(addrlen);
3066         ret_addrlen = addrlen;
3067         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3068                                       addr, &ret_addrlen));
3069     } else {
3070         addr = NULL; /* To keep compiler quiet.  */
3071         addrlen = 0; /* To keep compiler quiet.  */
3072         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3073     }
3074     if (!is_error(ret)) {
3075         if (fd_trans_host_to_target_data(fd)) {
3076             abi_long trans;
3077             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3078             if (is_error(trans)) {
3079                 ret = trans;
3080                 goto fail;
3081             }
3082         }
3083         if (target_addr) {
3084             host_to_target_sockaddr(target_addr, addr,
3085                                     MIN(addrlen, ret_addrlen));
3086             if (put_user_u32(ret_addrlen, target_addrlen)) {
3087                 ret = -TARGET_EFAULT;
3088                 goto fail;
3089             }
3090         }
3091         unlock_user(host_msg, msg, len);
3092     } else {
3093 fail:
3094         unlock_user(host_msg, msg, 0);
3095     }
3096     return ret;
3097 }
3098 
3099 #ifdef TARGET_NR_socketcall
3100 /* do_socketcall() must return target values and target errnos. */
3101 static abi_long do_socketcall(int num, abi_ulong vptr)
3102 {
3103     static const unsigned nargs[] = { /* number of arguments per operation */
3104         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3105         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3106         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3107         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3108         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3109         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3110         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3111         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3112         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3113         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3114         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3115         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3116         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3117         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3118         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3119         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3120         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3121         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3122         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3123         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3124     };
3125     abi_long a[6]; /* max 6 args */
3126     unsigned i;
3127 
3128     /* check the range of the first argument num */
3129     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3130     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3131         return -TARGET_EINVAL;
3132     }
3133     /* ensure we have space for args */
3134     if (nargs[num] > ARRAY_SIZE(a)) {
3135         return -TARGET_EINVAL;
3136     }
3137     /* collect the arguments in a[] according to nargs[] */
3138     for (i = 0; i < nargs[num]; ++i) {
3139         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3140             return -TARGET_EFAULT;
3141         }
3142     }
3143     /* now when we have the args, invoke the appropriate underlying function */
3144     switch (num) {
3145     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3146         return do_socket(a[0], a[1], a[2]);
3147     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3148         return do_bind(a[0], a[1], a[2]);
3149     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3150         return do_connect(a[0], a[1], a[2]);
3151     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3152         return get_errno(listen(a[0], a[1]));
3153     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3154         return do_accept4(a[0], a[1], a[2], 0);
3155     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3156         return do_getsockname(a[0], a[1], a[2]);
3157     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3158         return do_getpeername(a[0], a[1], a[2]);
3159     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3160         return do_socketpair(a[0], a[1], a[2], a[3]);
3161     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3162         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3163     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3164         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3165     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3166         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3167     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3168         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3169     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3170         return get_errno(shutdown(a[0], a[1]));
3171     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3172         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3173     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3174         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3175     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3176         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3177     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3178         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3179     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3180         return do_accept4(a[0], a[1], a[2], a[3]);
3181     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3182         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3183     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3184         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3185     default:
3186         gemu_log("Unsupported socketcall: %d\n", num);
3187         return -TARGET_EINVAL;
3188     }
3189 }
3190 #endif
3191 
3192 #define N_SHM_REGIONS	32
3193 
3194 static struct shm_region {
3195     abi_ulong start;
3196     abi_ulong size;
3197     bool in_use;
3198 } shm_regions[N_SHM_REGIONS];
3199 
3200 #ifndef TARGET_SEMID64_DS
3201 /* asm-generic version of this struct */
3202 struct target_semid64_ds
3203 {
3204   struct target_ipc_perm sem_perm;
3205   abi_ulong sem_otime;
3206 #if TARGET_ABI_BITS == 32
3207   abi_ulong __unused1;
3208 #endif
3209   abi_ulong sem_ctime;
3210 #if TARGET_ABI_BITS == 32
3211   abi_ulong __unused2;
3212 #endif
3213   abi_ulong sem_nsems;
3214   abi_ulong __unused3;
3215   abi_ulong __unused4;
3216 };
3217 #endif
3218 
3219 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3220                                                abi_ulong target_addr)
3221 {
3222     struct target_ipc_perm *target_ip;
3223     struct target_semid64_ds *target_sd;
3224 
3225     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3226         return -TARGET_EFAULT;
3227     target_ip = &(target_sd->sem_perm);
3228     host_ip->__key = tswap32(target_ip->__key);
3229     host_ip->uid = tswap32(target_ip->uid);
3230     host_ip->gid = tswap32(target_ip->gid);
3231     host_ip->cuid = tswap32(target_ip->cuid);
3232     host_ip->cgid = tswap32(target_ip->cgid);
3233 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3234     host_ip->mode = tswap32(target_ip->mode);
3235 #else
3236     host_ip->mode = tswap16(target_ip->mode);
3237 #endif
3238 #if defined(TARGET_PPC)
3239     host_ip->__seq = tswap32(target_ip->__seq);
3240 #else
3241     host_ip->__seq = tswap16(target_ip->__seq);
3242 #endif
3243     unlock_user_struct(target_sd, target_addr, 0);
3244     return 0;
3245 }
3246 
3247 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3248                                                struct ipc_perm *host_ip)
3249 {
3250     struct target_ipc_perm *target_ip;
3251     struct target_semid64_ds *target_sd;
3252 
3253     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3254         return -TARGET_EFAULT;
3255     target_ip = &(target_sd->sem_perm);
3256     target_ip->__key = tswap32(host_ip->__key);
3257     target_ip->uid = tswap32(host_ip->uid);
3258     target_ip->gid = tswap32(host_ip->gid);
3259     target_ip->cuid = tswap32(host_ip->cuid);
3260     target_ip->cgid = tswap32(host_ip->cgid);
3261 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3262     target_ip->mode = tswap32(host_ip->mode);
3263 #else
3264     target_ip->mode = tswap16(host_ip->mode);
3265 #endif
3266 #if defined(TARGET_PPC)
3267     target_ip->__seq = tswap32(host_ip->__seq);
3268 #else
3269     target_ip->__seq = tswap16(host_ip->__seq);
3270 #endif
3271     unlock_user_struct(target_sd, target_addr, 1);
3272     return 0;
3273 }
3274 
3275 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3276                                                abi_ulong target_addr)
3277 {
3278     struct target_semid64_ds *target_sd;
3279 
3280     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3281         return -TARGET_EFAULT;
3282     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3283         return -TARGET_EFAULT;
3284     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3285     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3286     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3287     unlock_user_struct(target_sd, target_addr, 0);
3288     return 0;
3289 }
3290 
3291 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3292                                                struct semid_ds *host_sd)
3293 {
3294     struct target_semid64_ds *target_sd;
3295 
3296     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3297         return -TARGET_EFAULT;
3298     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3299         return -TARGET_EFAULT;
3300     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3301     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3302     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3303     unlock_user_struct(target_sd, target_addr, 1);
3304     return 0;
3305 }
3306 
3307 struct target_seminfo {
3308     int semmap;
3309     int semmni;
3310     int semmns;
3311     int semmnu;
3312     int semmsl;
3313     int semopm;
3314     int semume;
3315     int semusz;
3316     int semvmx;
3317     int semaem;
3318 };
3319 
3320 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3321                                               struct seminfo *host_seminfo)
3322 {
3323     struct target_seminfo *target_seminfo;
3324     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3325         return -TARGET_EFAULT;
3326     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3327     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3328     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3329     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3330     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3331     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3332     __put_user(host_seminfo->semume, &target_seminfo->semume);
3333     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3334     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3335     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3336     unlock_user_struct(target_seminfo, target_addr, 1);
3337     return 0;
3338 }
3339 
3340 union semun {
3341 	int val;
3342 	struct semid_ds *buf;
3343 	unsigned short *array;
3344 	struct seminfo *__buf;
3345 };
3346 
3347 union target_semun {
3348 	int val;
3349 	abi_ulong buf;
3350 	abi_ulong array;
3351 	abi_ulong __buf;
3352 };
3353 
3354 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3355                                                abi_ulong target_addr)
3356 {
3357     int nsems;
3358     unsigned short *array;
3359     union semun semun;
3360     struct semid_ds semid_ds;
3361     int i, ret;
3362 
3363     semun.buf = &semid_ds;
3364 
3365     ret = semctl(semid, 0, IPC_STAT, semun);
3366     if (ret == -1)
3367         return get_errno(ret);
3368 
3369     nsems = semid_ds.sem_nsems;
3370 
3371     *host_array = g_try_new(unsigned short, nsems);
3372     if (!*host_array) {
3373         return -TARGET_ENOMEM;
3374     }
3375     array = lock_user(VERIFY_READ, target_addr,
3376                       nsems*sizeof(unsigned short), 1);
3377     if (!array) {
3378         g_free(*host_array);
3379         return -TARGET_EFAULT;
3380     }
3381 
3382     for(i=0; i<nsems; i++) {
3383         __get_user((*host_array)[i], &array[i]);
3384     }
3385     unlock_user(array, target_addr, 0);
3386 
3387     return 0;
3388 }
3389 
3390 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3391                                                unsigned short **host_array)
3392 {
3393     int nsems;
3394     unsigned short *array;
3395     union semun semun;
3396     struct semid_ds semid_ds;
3397     int i, ret;
3398 
3399     semun.buf = &semid_ds;
3400 
3401     ret = semctl(semid, 0, IPC_STAT, semun);
3402     if (ret == -1)
3403         return get_errno(ret);
3404 
3405     nsems = semid_ds.sem_nsems;
3406 
3407     array = lock_user(VERIFY_WRITE, target_addr,
3408                       nsems*sizeof(unsigned short), 0);
3409     if (!array)
3410         return -TARGET_EFAULT;
3411 
3412     for(i=0; i<nsems; i++) {
3413         __put_user((*host_array)[i], &array[i]);
3414     }
3415     g_free(*host_array);
3416     unlock_user(array, target_addr, 1);
3417 
3418     return 0;
3419 }
3420 
3421 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3422                                  abi_ulong target_arg)
3423 {
3424     union target_semun target_su = { .buf = target_arg };
3425     union semun arg;
3426     struct semid_ds dsarg;
3427     unsigned short *array = NULL;
3428     struct seminfo seminfo;
3429     abi_long ret = -TARGET_EINVAL;
3430     abi_long err;
3431     cmd &= 0xff;
3432 
3433     switch( cmd ) {
3434 	case GETVAL:
3435 	case SETVAL:
3436             /* In 64 bit cross-endian situations, we will erroneously pick up
3437              * the wrong half of the union for the "val" element.  To rectify
3438              * this, the entire 8-byte structure is byteswapped, followed by
3439 	     * a swap of the 4 byte val field. In other cases, the data is
3440 	     * already in proper host byte order. */
3441 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3442 		target_su.buf = tswapal(target_su.buf);
3443 		arg.val = tswap32(target_su.val);
3444 	    } else {
3445 		arg.val = target_su.val;
3446 	    }
3447             ret = get_errno(semctl(semid, semnum, cmd, arg));
3448             break;
3449 	case GETALL:
3450 	case SETALL:
3451             err = target_to_host_semarray(semid, &array, target_su.array);
3452             if (err)
3453                 return err;
3454             arg.array = array;
3455             ret = get_errno(semctl(semid, semnum, cmd, arg));
3456             err = host_to_target_semarray(semid, target_su.array, &array);
3457             if (err)
3458                 return err;
3459             break;
3460 	case IPC_STAT:
3461 	case IPC_SET:
3462 	case SEM_STAT:
3463             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3464             if (err)
3465                 return err;
3466             arg.buf = &dsarg;
3467             ret = get_errno(semctl(semid, semnum, cmd, arg));
3468             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3469             if (err)
3470                 return err;
3471             break;
3472 	case IPC_INFO:
3473 	case SEM_INFO:
3474             arg.__buf = &seminfo;
3475             ret = get_errno(semctl(semid, semnum, cmd, arg));
3476             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3477             if (err)
3478                 return err;
3479             break;
3480 	case IPC_RMID:
3481 	case GETPID:
3482 	case GETNCNT:
3483 	case GETZCNT:
3484             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3485             break;
3486     }
3487 
3488     return ret;
3489 }
3490 
3491 struct target_sembuf {
3492     unsigned short sem_num;
3493     short sem_op;
3494     short sem_flg;
3495 };
3496 
3497 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3498                                              abi_ulong target_addr,
3499                                              unsigned nsops)
3500 {
3501     struct target_sembuf *target_sembuf;
3502     int i;
3503 
3504     target_sembuf = lock_user(VERIFY_READ, target_addr,
3505                               nsops*sizeof(struct target_sembuf), 1);
3506     if (!target_sembuf)
3507         return -TARGET_EFAULT;
3508 
3509     for(i=0; i<nsops; i++) {
3510         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3511         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3512         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3513     }
3514 
3515     unlock_user(target_sembuf, target_addr, 0);
3516 
3517     return 0;
3518 }
3519 
3520 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3521 {
3522     struct sembuf sops[nsops];
3523 
3524     if (target_to_host_sembuf(sops, ptr, nsops))
3525         return -TARGET_EFAULT;
3526 
3527     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3528 }
3529 
3530 struct target_msqid_ds
3531 {
3532     struct target_ipc_perm msg_perm;
3533     abi_ulong msg_stime;
3534 #if TARGET_ABI_BITS == 32
3535     abi_ulong __unused1;
3536 #endif
3537     abi_ulong msg_rtime;
3538 #if TARGET_ABI_BITS == 32
3539     abi_ulong __unused2;
3540 #endif
3541     abi_ulong msg_ctime;
3542 #if TARGET_ABI_BITS == 32
3543     abi_ulong __unused3;
3544 #endif
3545     abi_ulong __msg_cbytes;
3546     abi_ulong msg_qnum;
3547     abi_ulong msg_qbytes;
3548     abi_ulong msg_lspid;
3549     abi_ulong msg_lrpid;
3550     abi_ulong __unused4;
3551     abi_ulong __unused5;
3552 };
3553 
3554 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3555                                                abi_ulong target_addr)
3556 {
3557     struct target_msqid_ds *target_md;
3558 
3559     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3560         return -TARGET_EFAULT;
3561     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3562         return -TARGET_EFAULT;
3563     host_md->msg_stime = tswapal(target_md->msg_stime);
3564     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3565     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3566     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3567     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3568     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3569     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3570     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3571     unlock_user_struct(target_md, target_addr, 0);
3572     return 0;
3573 }
3574 
3575 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3576                                                struct msqid_ds *host_md)
3577 {
3578     struct target_msqid_ds *target_md;
3579 
3580     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3581         return -TARGET_EFAULT;
3582     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3583         return -TARGET_EFAULT;
3584     target_md->msg_stime = tswapal(host_md->msg_stime);
3585     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3586     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3587     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3588     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3589     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3590     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3591     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3592     unlock_user_struct(target_md, target_addr, 1);
3593     return 0;
3594 }
3595 
3596 struct target_msginfo {
3597     int msgpool;
3598     int msgmap;
3599     int msgmax;
3600     int msgmnb;
3601     int msgmni;
3602     int msgssz;
3603     int msgtql;
3604     unsigned short int msgseg;
3605 };
3606 
3607 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3608                                               struct msginfo *host_msginfo)
3609 {
3610     struct target_msginfo *target_msginfo;
3611     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3612         return -TARGET_EFAULT;
3613     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3614     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3615     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3616     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3617     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3618     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3619     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3620     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3621     unlock_user_struct(target_msginfo, target_addr, 1);
3622     return 0;
3623 }
3624 
3625 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3626 {
3627     struct msqid_ds dsarg;
3628     struct msginfo msginfo;
3629     abi_long ret = -TARGET_EINVAL;
3630 
3631     cmd &= 0xff;
3632 
3633     switch (cmd) {
3634     case IPC_STAT:
3635     case IPC_SET:
3636     case MSG_STAT:
3637         if (target_to_host_msqid_ds(&dsarg,ptr))
3638             return -TARGET_EFAULT;
3639         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3640         if (host_to_target_msqid_ds(ptr,&dsarg))
3641             return -TARGET_EFAULT;
3642         break;
3643     case IPC_RMID:
3644         ret = get_errno(msgctl(msgid, cmd, NULL));
3645         break;
3646     case IPC_INFO:
3647     case MSG_INFO:
3648         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3649         if (host_to_target_msginfo(ptr, &msginfo))
3650             return -TARGET_EFAULT;
3651         break;
3652     }
3653 
3654     return ret;
3655 }
3656 
3657 struct target_msgbuf {
3658     abi_long mtype;
3659     char	mtext[1];
3660 };
3661 
3662 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3663                                  ssize_t msgsz, int msgflg)
3664 {
3665     struct target_msgbuf *target_mb;
3666     struct msgbuf *host_mb;
3667     abi_long ret = 0;
3668 
3669     if (msgsz < 0) {
3670         return -TARGET_EINVAL;
3671     }
3672 
3673     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3674         return -TARGET_EFAULT;
3675     host_mb = g_try_malloc(msgsz + sizeof(long));
3676     if (!host_mb) {
3677         unlock_user_struct(target_mb, msgp, 0);
3678         return -TARGET_ENOMEM;
3679     }
3680     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3681     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3682     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3683     g_free(host_mb);
3684     unlock_user_struct(target_mb, msgp, 0);
3685 
3686     return ret;
3687 }
3688 
3689 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3690                                  ssize_t msgsz, abi_long msgtyp,
3691                                  int msgflg)
3692 {
3693     struct target_msgbuf *target_mb;
3694     char *target_mtext;
3695     struct msgbuf *host_mb;
3696     abi_long ret = 0;
3697 
3698     if (msgsz < 0) {
3699         return -TARGET_EINVAL;
3700     }
3701 
3702     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3703         return -TARGET_EFAULT;
3704 
3705     host_mb = g_try_malloc(msgsz + sizeof(long));
3706     if (!host_mb) {
3707         ret = -TARGET_ENOMEM;
3708         goto end;
3709     }
3710     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3711 
3712     if (ret > 0) {
3713         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3714         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3715         if (!target_mtext) {
3716             ret = -TARGET_EFAULT;
3717             goto end;
3718         }
3719         memcpy(target_mb->mtext, host_mb->mtext, ret);
3720         unlock_user(target_mtext, target_mtext_addr, ret);
3721     }
3722 
3723     target_mb->mtype = tswapal(host_mb->mtype);
3724 
3725 end:
3726     if (target_mb)
3727         unlock_user_struct(target_mb, msgp, 1);
3728     g_free(host_mb);
3729     return ret;
3730 }
3731 
3732 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3733                                                abi_ulong target_addr)
3734 {
3735     struct target_shmid_ds *target_sd;
3736 
3737     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3738         return -TARGET_EFAULT;
3739     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3740         return -TARGET_EFAULT;
3741     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3742     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3743     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3744     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3745     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3746     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3747     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3748     unlock_user_struct(target_sd, target_addr, 0);
3749     return 0;
3750 }
3751 
3752 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3753                                                struct shmid_ds *host_sd)
3754 {
3755     struct target_shmid_ds *target_sd;
3756 
3757     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3758         return -TARGET_EFAULT;
3759     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3760         return -TARGET_EFAULT;
3761     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3762     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3763     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3764     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3765     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3766     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3767     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3768     unlock_user_struct(target_sd, target_addr, 1);
3769     return 0;
3770 }
3771 
3772 struct  target_shminfo {
3773     abi_ulong shmmax;
3774     abi_ulong shmmin;
3775     abi_ulong shmmni;
3776     abi_ulong shmseg;
3777     abi_ulong shmall;
3778 };
3779 
3780 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3781                                               struct shminfo *host_shminfo)
3782 {
3783     struct target_shminfo *target_shminfo;
3784     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3785         return -TARGET_EFAULT;
3786     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3787     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3788     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3789     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3790     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3791     unlock_user_struct(target_shminfo, target_addr, 1);
3792     return 0;
3793 }
3794 
3795 struct target_shm_info {
3796     int used_ids;
3797     abi_ulong shm_tot;
3798     abi_ulong shm_rss;
3799     abi_ulong shm_swp;
3800     abi_ulong swap_attempts;
3801     abi_ulong swap_successes;
3802 };
3803 
3804 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3805                                                struct shm_info *host_shm_info)
3806 {
3807     struct target_shm_info *target_shm_info;
3808     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3809         return -TARGET_EFAULT;
3810     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3811     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3812     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3813     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3814     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3815     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3816     unlock_user_struct(target_shm_info, target_addr, 1);
3817     return 0;
3818 }
3819 
3820 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3821 {
3822     struct shmid_ds dsarg;
3823     struct shminfo shminfo;
3824     struct shm_info shm_info;
3825     abi_long ret = -TARGET_EINVAL;
3826 
3827     cmd &= 0xff;
3828 
3829     switch(cmd) {
3830     case IPC_STAT:
3831     case IPC_SET:
3832     case SHM_STAT:
3833         if (target_to_host_shmid_ds(&dsarg, buf))
3834             return -TARGET_EFAULT;
3835         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3836         if (host_to_target_shmid_ds(buf, &dsarg))
3837             return -TARGET_EFAULT;
3838         break;
3839     case IPC_INFO:
3840         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3841         if (host_to_target_shminfo(buf, &shminfo))
3842             return -TARGET_EFAULT;
3843         break;
3844     case SHM_INFO:
3845         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3846         if (host_to_target_shm_info(buf, &shm_info))
3847             return -TARGET_EFAULT;
3848         break;
3849     case IPC_RMID:
3850     case SHM_LOCK:
3851     case SHM_UNLOCK:
3852         ret = get_errno(shmctl(shmid, cmd, NULL));
3853         break;
3854     }
3855 
3856     return ret;
3857 }
3858 
3859 #ifndef TARGET_FORCE_SHMLBA
3860 /* For most architectures, SHMLBA is the same as the page size;
3861  * some architectures have larger values, in which case they should
3862  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3863  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3864  * and defining its own value for SHMLBA.
3865  *
3866  * The kernel also permits SHMLBA to be set by the architecture to a
3867  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3868  * this means that addresses are rounded to the large size if
3869  * SHM_RND is set but addresses not aligned to that size are not rejected
3870  * as long as they are at least page-aligned. Since the only architecture
3871  * which uses this is ia64 this code doesn't provide for that oddity.
3872  */
3873 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3874 {
3875     return TARGET_PAGE_SIZE;
3876 }
3877 #endif
3878 
3879 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3880                                  int shmid, abi_ulong shmaddr, int shmflg)
3881 {
3882     abi_long raddr;
3883     void *host_raddr;
3884     struct shmid_ds shm_info;
3885     int i,ret;
3886     abi_ulong shmlba;
3887 
3888     /* find out the length of the shared memory segment */
3889     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3890     if (is_error(ret)) {
3891         /* can't get length, bail out */
3892         return ret;
3893     }
3894 
3895     shmlba = target_shmlba(cpu_env);
3896 
3897     if (shmaddr & (shmlba - 1)) {
3898         if (shmflg & SHM_RND) {
3899             shmaddr &= ~(shmlba - 1);
3900         } else {
3901             return -TARGET_EINVAL;
3902         }
3903     }
3904     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3905         return -TARGET_EINVAL;
3906     }
3907 
3908     mmap_lock();
3909 
3910     if (shmaddr)
3911         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3912     else {
3913         abi_ulong mmap_start;
3914 
3915         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3916 
3917         if (mmap_start == -1) {
3918             errno = ENOMEM;
3919             host_raddr = (void *)-1;
3920         } else
3921             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3922     }
3923 
3924     if (host_raddr == (void *)-1) {
3925         mmap_unlock();
3926         return get_errno((long)host_raddr);
3927     }
3928     raddr=h2g((unsigned long)host_raddr);
3929 
3930     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3931                    PAGE_VALID | PAGE_READ |
3932                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3933 
3934     for (i = 0; i < N_SHM_REGIONS; i++) {
3935         if (!shm_regions[i].in_use) {
3936             shm_regions[i].in_use = true;
3937             shm_regions[i].start = raddr;
3938             shm_regions[i].size = shm_info.shm_segsz;
3939             break;
3940         }
3941     }
3942 
3943     mmap_unlock();
3944     return raddr;
3945 
3946 }
3947 
3948 static inline abi_long do_shmdt(abi_ulong shmaddr)
3949 {
3950     int i;
3951     abi_long rv;
3952 
3953     mmap_lock();
3954 
3955     for (i = 0; i < N_SHM_REGIONS; ++i) {
3956         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3957             shm_regions[i].in_use = false;
3958             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3959             break;
3960         }
3961     }
3962     rv = get_errno(shmdt(g2h(shmaddr)));
3963 
3964     mmap_unlock();
3965 
3966     return rv;
3967 }
3968 
3969 #ifdef TARGET_NR_ipc
3970 /* ??? This only works with linear mappings.  */
3971 /* do_ipc() must return target values and target errnos. */
3972 static abi_long do_ipc(CPUArchState *cpu_env,
3973                        unsigned int call, abi_long first,
3974                        abi_long second, abi_long third,
3975                        abi_long ptr, abi_long fifth)
3976 {
3977     int version;
3978     abi_long ret = 0;
3979 
3980     version = call >> 16;
3981     call &= 0xffff;
3982 
3983     switch (call) {
3984     case IPCOP_semop:
3985         ret = do_semop(first, ptr, second);
3986         break;
3987 
3988     case IPCOP_semget:
3989         ret = get_errno(semget(first, second, third));
3990         break;
3991 
3992     case IPCOP_semctl: {
3993         /* The semun argument to semctl is passed by value, so dereference the
3994          * ptr argument. */
3995         abi_ulong atptr;
3996         get_user_ual(atptr, ptr);
3997         ret = do_semctl(first, second, third, atptr);
3998         break;
3999     }
4000 
4001     case IPCOP_msgget:
4002         ret = get_errno(msgget(first, second));
4003         break;
4004 
4005     case IPCOP_msgsnd:
4006         ret = do_msgsnd(first, ptr, second, third);
4007         break;
4008 
4009     case IPCOP_msgctl:
4010         ret = do_msgctl(first, second, ptr);
4011         break;
4012 
4013     case IPCOP_msgrcv:
4014         switch (version) {
4015         case 0:
4016             {
4017                 struct target_ipc_kludge {
4018                     abi_long msgp;
4019                     abi_long msgtyp;
4020                 } *tmp;
4021 
4022                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4023                     ret = -TARGET_EFAULT;
4024                     break;
4025                 }
4026 
4027                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4028 
4029                 unlock_user_struct(tmp, ptr, 0);
4030                 break;
4031             }
4032         default:
4033             ret = do_msgrcv(first, ptr, second, fifth, third);
4034         }
4035         break;
4036 
4037     case IPCOP_shmat:
4038         switch (version) {
4039         default:
4040         {
4041             abi_ulong raddr;
4042             raddr = do_shmat(cpu_env, first, ptr, second);
4043             if (is_error(raddr))
4044                 return get_errno(raddr);
4045             if (put_user_ual(raddr, third))
4046                 return -TARGET_EFAULT;
4047             break;
4048         }
4049         case 1:
4050             ret = -TARGET_EINVAL;
4051             break;
4052         }
4053 	break;
4054     case IPCOP_shmdt:
4055         ret = do_shmdt(ptr);
4056 	break;
4057 
4058     case IPCOP_shmget:
4059 	/* IPC_* flag values are the same on all linux platforms */
4060 	ret = get_errno(shmget(first, second, third));
4061 	break;
4062 
4063 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4064     case IPCOP_shmctl:
4065         ret = do_shmctl(first, second, ptr);
4066         break;
4067     default:
4068 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4069 	ret = -TARGET_ENOSYS;
4070 	break;
4071     }
4072     return ret;
4073 }
4074 #endif
4075 
4076 /* kernel structure types definitions */
4077 
4078 #define STRUCT(name, ...) STRUCT_ ## name,
4079 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4080 enum {
4081 #include "syscall_types.h"
4082 STRUCT_MAX
4083 };
4084 #undef STRUCT
4085 #undef STRUCT_SPECIAL
4086 
4087 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4088 #define STRUCT_SPECIAL(name)
4089 #include "syscall_types.h"
4090 #undef STRUCT
4091 #undef STRUCT_SPECIAL
4092 
4093 typedef struct IOCTLEntry IOCTLEntry;
4094 
4095 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4096                              int fd, int cmd, abi_long arg);
4097 
4098 struct IOCTLEntry {
4099     int target_cmd;
4100     unsigned int host_cmd;
4101     const char *name;
4102     int access;
4103     do_ioctl_fn *do_ioctl;
4104     const argtype arg_type[5];
4105 };
4106 
4107 #define IOC_R 0x0001
4108 #define IOC_W 0x0002
4109 #define IOC_RW (IOC_R | IOC_W)
4110 
4111 #define MAX_STRUCT_SIZE 4096
4112 
4113 #ifdef CONFIG_FIEMAP
4114 /* So fiemap access checks don't overflow on 32 bit systems.
4115  * This is very slightly smaller than the limit imposed by
4116  * the underlying kernel.
4117  */
4118 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4119                             / sizeof(struct fiemap_extent))
4120 
4121 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4122                                        int fd, int cmd, abi_long arg)
4123 {
4124     /* The parameter for this ioctl is a struct fiemap followed
4125      * by an array of struct fiemap_extent whose size is set
4126      * in fiemap->fm_extent_count. The array is filled in by the
4127      * ioctl.
4128      */
4129     int target_size_in, target_size_out;
4130     struct fiemap *fm;
4131     const argtype *arg_type = ie->arg_type;
4132     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4133     void *argptr, *p;
4134     abi_long ret;
4135     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4136     uint32_t outbufsz;
4137     int free_fm = 0;
4138 
4139     assert(arg_type[0] == TYPE_PTR);
4140     assert(ie->access == IOC_RW);
4141     arg_type++;
4142     target_size_in = thunk_type_size(arg_type, 0);
4143     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4144     if (!argptr) {
4145         return -TARGET_EFAULT;
4146     }
4147     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4148     unlock_user(argptr, arg, 0);
4149     fm = (struct fiemap *)buf_temp;
4150     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4151         return -TARGET_EINVAL;
4152     }
4153 
4154     outbufsz = sizeof (*fm) +
4155         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4156 
4157     if (outbufsz > MAX_STRUCT_SIZE) {
4158         /* We can't fit all the extents into the fixed size buffer.
4159          * Allocate one that is large enough and use it instead.
4160          */
4161         fm = g_try_malloc(outbufsz);
4162         if (!fm) {
4163             return -TARGET_ENOMEM;
4164         }
4165         memcpy(fm, buf_temp, sizeof(struct fiemap));
4166         free_fm = 1;
4167     }
4168     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4169     if (!is_error(ret)) {
4170         target_size_out = target_size_in;
4171         /* An extent_count of 0 means we were only counting the extents
4172          * so there are no structs to copy
4173          */
4174         if (fm->fm_extent_count != 0) {
4175             target_size_out += fm->fm_mapped_extents * extent_size;
4176         }
4177         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4178         if (!argptr) {
4179             ret = -TARGET_EFAULT;
4180         } else {
4181             /* Convert the struct fiemap */
4182             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4183             if (fm->fm_extent_count != 0) {
4184                 p = argptr + target_size_in;
4185                 /* ...and then all the struct fiemap_extents */
4186                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4187                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4188                                   THUNK_TARGET);
4189                     p += extent_size;
4190                 }
4191             }
4192             unlock_user(argptr, arg, target_size_out);
4193         }
4194     }
4195     if (free_fm) {
4196         g_free(fm);
4197     }
4198     return ret;
4199 }
4200 #endif
4201 
4202 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4203                                 int fd, int cmd, abi_long arg)
4204 {
4205     const argtype *arg_type = ie->arg_type;
4206     int target_size;
4207     void *argptr;
4208     int ret;
4209     struct ifconf *host_ifconf;
4210     uint32_t outbufsz;
4211     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4212     int target_ifreq_size;
4213     int nb_ifreq;
4214     int free_buf = 0;
4215     int i;
4216     int target_ifc_len;
4217     abi_long target_ifc_buf;
4218     int host_ifc_len;
4219     char *host_ifc_buf;
4220 
4221     assert(arg_type[0] == TYPE_PTR);
4222     assert(ie->access == IOC_RW);
4223 
4224     arg_type++;
4225     target_size = thunk_type_size(arg_type, 0);
4226 
4227     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4228     if (!argptr)
4229         return -TARGET_EFAULT;
4230     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4231     unlock_user(argptr, arg, 0);
4232 
4233     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4234     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4235     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4236 
4237     if (target_ifc_buf != 0) {
4238         target_ifc_len = host_ifconf->ifc_len;
4239         nb_ifreq = target_ifc_len / target_ifreq_size;
4240         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4241 
4242         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4243         if (outbufsz > MAX_STRUCT_SIZE) {
4244             /*
4245              * We can't fit all the extents into the fixed size buffer.
4246              * Allocate one that is large enough and use it instead.
4247              */
4248             host_ifconf = malloc(outbufsz);
4249             if (!host_ifconf) {
4250                 return -TARGET_ENOMEM;
4251             }
4252             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4253             free_buf = 1;
4254         }
4255         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4256 
4257         host_ifconf->ifc_len = host_ifc_len;
4258     } else {
4259       host_ifc_buf = NULL;
4260     }
4261     host_ifconf->ifc_buf = host_ifc_buf;
4262 
4263     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4264     if (!is_error(ret)) {
4265 	/* convert host ifc_len to target ifc_len */
4266 
4267         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4268         target_ifc_len = nb_ifreq * target_ifreq_size;
4269         host_ifconf->ifc_len = target_ifc_len;
4270 
4271 	/* restore target ifc_buf */
4272 
4273         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4274 
4275 	/* copy struct ifconf to target user */
4276 
4277         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4278         if (!argptr)
4279             return -TARGET_EFAULT;
4280         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4281         unlock_user(argptr, arg, target_size);
4282 
4283         if (target_ifc_buf != 0) {
4284             /* copy ifreq[] to target user */
4285             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4286             for (i = 0; i < nb_ifreq ; i++) {
4287                 thunk_convert(argptr + i * target_ifreq_size,
4288                               host_ifc_buf + i * sizeof(struct ifreq),
4289                               ifreq_arg_type, THUNK_TARGET);
4290             }
4291             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4292         }
4293     }
4294 
4295     if (free_buf) {
4296         free(host_ifconf);
4297     }
4298 
4299     return ret;
4300 }
4301 
4302 #if defined(CONFIG_USBFS)
4303 #if HOST_LONG_BITS > 64
4304 #error USBDEVFS thunks do not support >64 bit hosts yet.
4305 #endif
4306 struct live_urb {
4307     uint64_t target_urb_adr;
4308     uint64_t target_buf_adr;
4309     char *target_buf_ptr;
4310     struct usbdevfs_urb host_urb;
4311 };
4312 
4313 static GHashTable *usbdevfs_urb_hashtable(void)
4314 {
4315     static GHashTable *urb_hashtable;
4316 
4317     if (!urb_hashtable) {
4318         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4319     }
4320     return urb_hashtable;
4321 }
4322 
4323 static void urb_hashtable_insert(struct live_urb *urb)
4324 {
4325     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4326     g_hash_table_insert(urb_hashtable, urb, urb);
4327 }
4328 
4329 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4330 {
4331     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4332     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4333 }
4334 
4335 static void urb_hashtable_remove(struct live_urb *urb)
4336 {
4337     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4338     g_hash_table_remove(urb_hashtable, urb);
4339 }
4340 
4341 static abi_long
4342 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4343                           int fd, int cmd, abi_long arg)
4344 {
4345     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4346     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4347     struct live_urb *lurb;
4348     void *argptr;
4349     uint64_t hurb;
4350     int target_size;
4351     uintptr_t target_urb_adr;
4352     abi_long ret;
4353 
4354     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4355 
4356     memset(buf_temp, 0, sizeof(uint64_t));
4357     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4358     if (is_error(ret)) {
4359         return ret;
4360     }
4361 
4362     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4363     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4364     if (!lurb->target_urb_adr) {
4365         return -TARGET_EFAULT;
4366     }
4367     urb_hashtable_remove(lurb);
4368     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4369         lurb->host_urb.buffer_length);
4370     lurb->target_buf_ptr = NULL;
4371 
4372     /* restore the guest buffer pointer */
4373     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4374 
4375     /* update the guest urb struct */
4376     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4377     if (!argptr) {
4378         g_free(lurb);
4379         return -TARGET_EFAULT;
4380     }
4381     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4382     unlock_user(argptr, lurb->target_urb_adr, target_size);
4383 
4384     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4385     /* write back the urb handle */
4386     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4387     if (!argptr) {
4388         g_free(lurb);
4389         return -TARGET_EFAULT;
4390     }
4391 
4392     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4393     target_urb_adr = lurb->target_urb_adr;
4394     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4395     unlock_user(argptr, arg, target_size);
4396 
4397     g_free(lurb);
4398     return ret;
4399 }
4400 
4401 static abi_long
4402 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4403                              uint8_t *buf_temp __attribute__((unused)),
4404                              int fd, int cmd, abi_long arg)
4405 {
4406     struct live_urb *lurb;
4407 
4408     /* map target address back to host URB with metadata. */
4409     lurb = urb_hashtable_lookup(arg);
4410     if (!lurb) {
4411         return -TARGET_EFAULT;
4412     }
4413     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4414 }
4415 
4416 static abi_long
4417 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4418                             int fd, int cmd, abi_long arg)
4419 {
4420     const argtype *arg_type = ie->arg_type;
4421     int target_size;
4422     abi_long ret;
4423     void *argptr;
4424     int rw_dir;
4425     struct live_urb *lurb;
4426 
4427     /*
4428      * each submitted URB needs to map to a unique ID for the
4429      * kernel, and that unique ID needs to be a pointer to
4430      * host memory.  hence, we need to malloc for each URB.
4431      * isochronous transfers have a variable length struct.
4432      */
4433     arg_type++;
4434     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4435 
4436     /* construct host copy of urb and metadata */
4437     lurb = g_try_malloc0(sizeof(struct live_urb));
4438     if (!lurb) {
4439         return -TARGET_ENOMEM;
4440     }
4441 
4442     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4443     if (!argptr) {
4444         g_free(lurb);
4445         return -TARGET_EFAULT;
4446     }
4447     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4448     unlock_user(argptr, arg, 0);
4449 
4450     lurb->target_urb_adr = arg;
4451     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4452 
4453     /* buffer space used depends on endpoint type so lock the entire buffer */
4454     /* control type urbs should check the buffer contents for true direction */
4455     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4456     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4457         lurb->host_urb.buffer_length, 1);
4458     if (lurb->target_buf_ptr == NULL) {
4459         g_free(lurb);
4460         return -TARGET_EFAULT;
4461     }
4462 
4463     /* update buffer pointer in host copy */
4464     lurb->host_urb.buffer = lurb->target_buf_ptr;
4465 
4466     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4467     if (is_error(ret)) {
4468         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4469         g_free(lurb);
4470     } else {
4471         urb_hashtable_insert(lurb);
4472     }
4473 
4474     return ret;
4475 }
4476 #endif /* CONFIG_USBFS */
4477 
4478 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4479                             int cmd, abi_long arg)
4480 {
4481     void *argptr;
4482     struct dm_ioctl *host_dm;
4483     abi_long guest_data;
4484     uint32_t guest_data_size;
4485     int target_size;
4486     const argtype *arg_type = ie->arg_type;
4487     abi_long ret;
4488     void *big_buf = NULL;
4489     char *host_data;
4490 
4491     arg_type++;
4492     target_size = thunk_type_size(arg_type, 0);
4493     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4494     if (!argptr) {
4495         ret = -TARGET_EFAULT;
4496         goto out;
4497     }
4498     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4499     unlock_user(argptr, arg, 0);
4500 
4501     /* buf_temp is too small, so fetch things into a bigger buffer */
4502     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4503     memcpy(big_buf, buf_temp, target_size);
4504     buf_temp = big_buf;
4505     host_dm = big_buf;
4506 
4507     guest_data = arg + host_dm->data_start;
4508     if ((guest_data - arg) < 0) {
4509         ret = -TARGET_EINVAL;
4510         goto out;
4511     }
4512     guest_data_size = host_dm->data_size - host_dm->data_start;
4513     host_data = (char*)host_dm + host_dm->data_start;
4514 
4515     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4516     if (!argptr) {
4517         ret = -TARGET_EFAULT;
4518         goto out;
4519     }
4520 
4521     switch (ie->host_cmd) {
4522     case DM_REMOVE_ALL:
4523     case DM_LIST_DEVICES:
4524     case DM_DEV_CREATE:
4525     case DM_DEV_REMOVE:
4526     case DM_DEV_SUSPEND:
4527     case DM_DEV_STATUS:
4528     case DM_DEV_WAIT:
4529     case DM_TABLE_STATUS:
4530     case DM_TABLE_CLEAR:
4531     case DM_TABLE_DEPS:
4532     case DM_LIST_VERSIONS:
4533         /* no input data */
4534         break;
4535     case DM_DEV_RENAME:
4536     case DM_DEV_SET_GEOMETRY:
4537         /* data contains only strings */
4538         memcpy(host_data, argptr, guest_data_size);
4539         break;
4540     case DM_TARGET_MSG:
4541         memcpy(host_data, argptr, guest_data_size);
4542         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4543         break;
4544     case DM_TABLE_LOAD:
4545     {
4546         void *gspec = argptr;
4547         void *cur_data = host_data;
4548         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4549         int spec_size = thunk_type_size(arg_type, 0);
4550         int i;
4551 
4552         for (i = 0; i < host_dm->target_count; i++) {
4553             struct dm_target_spec *spec = cur_data;
4554             uint32_t next;
4555             int slen;
4556 
4557             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4558             slen = strlen((char*)gspec + spec_size) + 1;
4559             next = spec->next;
4560             spec->next = sizeof(*spec) + slen;
4561             strcpy((char*)&spec[1], gspec + spec_size);
4562             gspec += next;
4563             cur_data += spec->next;
4564         }
4565         break;
4566     }
4567     default:
4568         ret = -TARGET_EINVAL;
4569         unlock_user(argptr, guest_data, 0);
4570         goto out;
4571     }
4572     unlock_user(argptr, guest_data, 0);
4573 
4574     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4575     if (!is_error(ret)) {
4576         guest_data = arg + host_dm->data_start;
4577         guest_data_size = host_dm->data_size - host_dm->data_start;
4578         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4579         switch (ie->host_cmd) {
4580         case DM_REMOVE_ALL:
4581         case DM_DEV_CREATE:
4582         case DM_DEV_REMOVE:
4583         case DM_DEV_RENAME:
4584         case DM_DEV_SUSPEND:
4585         case DM_DEV_STATUS:
4586         case DM_TABLE_LOAD:
4587         case DM_TABLE_CLEAR:
4588         case DM_TARGET_MSG:
4589         case DM_DEV_SET_GEOMETRY:
4590             /* no return data */
4591             break;
4592         case DM_LIST_DEVICES:
4593         {
4594             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4595             uint32_t remaining_data = guest_data_size;
4596             void *cur_data = argptr;
4597             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4598             int nl_size = 12; /* can't use thunk_size due to alignment */
4599 
4600             while (1) {
4601                 uint32_t next = nl->next;
4602                 if (next) {
4603                     nl->next = nl_size + (strlen(nl->name) + 1);
4604                 }
4605                 if (remaining_data < nl->next) {
4606                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4607                     break;
4608                 }
4609                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4610                 strcpy(cur_data + nl_size, nl->name);
4611                 cur_data += nl->next;
4612                 remaining_data -= nl->next;
4613                 if (!next) {
4614                     break;
4615                 }
4616                 nl = (void*)nl + next;
4617             }
4618             break;
4619         }
4620         case DM_DEV_WAIT:
4621         case DM_TABLE_STATUS:
4622         {
4623             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4624             void *cur_data = argptr;
4625             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4626             int spec_size = thunk_type_size(arg_type, 0);
4627             int i;
4628 
4629             for (i = 0; i < host_dm->target_count; i++) {
4630                 uint32_t next = spec->next;
4631                 int slen = strlen((char*)&spec[1]) + 1;
4632                 spec->next = (cur_data - argptr) + spec_size + slen;
4633                 if (guest_data_size < spec->next) {
4634                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4635                     break;
4636                 }
4637                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4638                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4639                 cur_data = argptr + spec->next;
4640                 spec = (void*)host_dm + host_dm->data_start + next;
4641             }
4642             break;
4643         }
4644         case DM_TABLE_DEPS:
4645         {
4646             void *hdata = (void*)host_dm + host_dm->data_start;
4647             int count = *(uint32_t*)hdata;
4648             uint64_t *hdev = hdata + 8;
4649             uint64_t *gdev = argptr + 8;
4650             int i;
4651 
4652             *(uint32_t*)argptr = tswap32(count);
4653             for (i = 0; i < count; i++) {
4654                 *gdev = tswap64(*hdev);
4655                 gdev++;
4656                 hdev++;
4657             }
4658             break;
4659         }
4660         case DM_LIST_VERSIONS:
4661         {
4662             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4663             uint32_t remaining_data = guest_data_size;
4664             void *cur_data = argptr;
4665             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4666             int vers_size = thunk_type_size(arg_type, 0);
4667 
4668             while (1) {
4669                 uint32_t next = vers->next;
4670                 if (next) {
4671                     vers->next = vers_size + (strlen(vers->name) + 1);
4672                 }
4673                 if (remaining_data < vers->next) {
4674                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4675                     break;
4676                 }
4677                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4678                 strcpy(cur_data + vers_size, vers->name);
4679                 cur_data += vers->next;
4680                 remaining_data -= vers->next;
4681                 if (!next) {
4682                     break;
4683                 }
4684                 vers = (void*)vers + next;
4685             }
4686             break;
4687         }
4688         default:
4689             unlock_user(argptr, guest_data, 0);
4690             ret = -TARGET_EINVAL;
4691             goto out;
4692         }
4693         unlock_user(argptr, guest_data, guest_data_size);
4694 
4695         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4696         if (!argptr) {
4697             ret = -TARGET_EFAULT;
4698             goto out;
4699         }
4700         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4701         unlock_user(argptr, arg, target_size);
4702     }
4703 out:
4704     g_free(big_buf);
4705     return ret;
4706 }
4707 
4708 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4709                                int cmd, abi_long arg)
4710 {
4711     void *argptr;
4712     int target_size;
4713     const argtype *arg_type = ie->arg_type;
4714     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4715     abi_long ret;
4716 
4717     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4718     struct blkpg_partition host_part;
4719 
4720     /* Read and convert blkpg */
4721     arg_type++;
4722     target_size = thunk_type_size(arg_type, 0);
4723     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4724     if (!argptr) {
4725         ret = -TARGET_EFAULT;
4726         goto out;
4727     }
4728     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4729     unlock_user(argptr, arg, 0);
4730 
4731     switch (host_blkpg->op) {
4732     case BLKPG_ADD_PARTITION:
4733     case BLKPG_DEL_PARTITION:
4734         /* payload is struct blkpg_partition */
4735         break;
4736     default:
4737         /* Unknown opcode */
4738         ret = -TARGET_EINVAL;
4739         goto out;
4740     }
4741 
4742     /* Read and convert blkpg->data */
4743     arg = (abi_long)(uintptr_t)host_blkpg->data;
4744     target_size = thunk_type_size(part_arg_type, 0);
4745     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4746     if (!argptr) {
4747         ret = -TARGET_EFAULT;
4748         goto out;
4749     }
4750     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4751     unlock_user(argptr, arg, 0);
4752 
4753     /* Swizzle the data pointer to our local copy and call! */
4754     host_blkpg->data = &host_part;
4755     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4756 
4757 out:
4758     return ret;
4759 }
4760 
4761 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4762                                 int fd, int cmd, abi_long arg)
4763 {
4764     const argtype *arg_type = ie->arg_type;
4765     const StructEntry *se;
4766     const argtype *field_types;
4767     const int *dst_offsets, *src_offsets;
4768     int target_size;
4769     void *argptr;
4770     abi_ulong *target_rt_dev_ptr = NULL;
4771     unsigned long *host_rt_dev_ptr = NULL;
4772     abi_long ret;
4773     int i;
4774 
4775     assert(ie->access == IOC_W);
4776     assert(*arg_type == TYPE_PTR);
4777     arg_type++;
4778     assert(*arg_type == TYPE_STRUCT);
4779     target_size = thunk_type_size(arg_type, 0);
4780     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4781     if (!argptr) {
4782         return -TARGET_EFAULT;
4783     }
4784     arg_type++;
4785     assert(*arg_type == (int)STRUCT_rtentry);
4786     se = struct_entries + *arg_type++;
4787     assert(se->convert[0] == NULL);
4788     /* convert struct here to be able to catch rt_dev string */
4789     field_types = se->field_types;
4790     dst_offsets = se->field_offsets[THUNK_HOST];
4791     src_offsets = se->field_offsets[THUNK_TARGET];
4792     for (i = 0; i < se->nb_fields; i++) {
4793         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4794             assert(*field_types == TYPE_PTRVOID);
4795             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4796             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4797             if (*target_rt_dev_ptr != 0) {
4798                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4799                                                   tswapal(*target_rt_dev_ptr));
4800                 if (!*host_rt_dev_ptr) {
4801                     unlock_user(argptr, arg, 0);
4802                     return -TARGET_EFAULT;
4803                 }
4804             } else {
4805                 *host_rt_dev_ptr = 0;
4806             }
4807             field_types++;
4808             continue;
4809         }
4810         field_types = thunk_convert(buf_temp + dst_offsets[i],
4811                                     argptr + src_offsets[i],
4812                                     field_types, THUNK_HOST);
4813     }
4814     unlock_user(argptr, arg, 0);
4815 
4816     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4817 
4818     assert(host_rt_dev_ptr != NULL);
4819     assert(target_rt_dev_ptr != NULL);
4820     if (*host_rt_dev_ptr != 0) {
4821         unlock_user((void *)*host_rt_dev_ptr,
4822                     *target_rt_dev_ptr, 0);
4823     }
4824     return ret;
4825 }
4826 
4827 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4828                                      int fd, int cmd, abi_long arg)
4829 {
4830     int sig = target_to_host_signal(arg);
4831     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4832 }
4833 
4834 #ifdef TIOCGPTPEER
4835 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4836                                      int fd, int cmd, abi_long arg)
4837 {
4838     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4839     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4840 }
4841 #endif
4842 
4843 static IOCTLEntry ioctl_entries[] = {
4844 #define IOCTL(cmd, access, ...) \
4845     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4846 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4847     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4848 #define IOCTL_IGNORE(cmd) \
4849     { TARGET_ ## cmd, 0, #cmd },
4850 #include "ioctls.h"
4851     { 0, 0, },
4852 };
4853 
4854 /* ??? Implement proper locking for ioctls.  */
4855 /* do_ioctl() Must return target values and target errnos. */
4856 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4857 {
4858     const IOCTLEntry *ie;
4859     const argtype *arg_type;
4860     abi_long ret;
4861     uint8_t buf_temp[MAX_STRUCT_SIZE];
4862     int target_size;
4863     void *argptr;
4864 
4865     ie = ioctl_entries;
4866     for(;;) {
4867         if (ie->target_cmd == 0) {
4868             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4869             return -TARGET_ENOSYS;
4870         }
4871         if (ie->target_cmd == cmd)
4872             break;
4873         ie++;
4874     }
4875     arg_type = ie->arg_type;
4876     if (ie->do_ioctl) {
4877         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4878     } else if (!ie->host_cmd) {
4879         /* Some architectures define BSD ioctls in their headers
4880            that are not implemented in Linux.  */
4881         return -TARGET_ENOSYS;
4882     }
4883 
4884     switch(arg_type[0]) {
4885     case TYPE_NULL:
4886         /* no argument */
4887         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4888         break;
4889     case TYPE_PTRVOID:
4890     case TYPE_INT:
4891         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4892         break;
4893     case TYPE_PTR:
4894         arg_type++;
4895         target_size = thunk_type_size(arg_type, 0);
4896         switch(ie->access) {
4897         case IOC_R:
4898             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4899             if (!is_error(ret)) {
4900                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4901                 if (!argptr)
4902                     return -TARGET_EFAULT;
4903                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4904                 unlock_user(argptr, arg, target_size);
4905             }
4906             break;
4907         case IOC_W:
4908             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4909             if (!argptr)
4910                 return -TARGET_EFAULT;
4911             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4912             unlock_user(argptr, arg, 0);
4913             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4914             break;
4915         default:
4916         case IOC_RW:
4917             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4918             if (!argptr)
4919                 return -TARGET_EFAULT;
4920             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4921             unlock_user(argptr, arg, 0);
4922             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4923             if (!is_error(ret)) {
4924                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4925                 if (!argptr)
4926                     return -TARGET_EFAULT;
4927                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4928                 unlock_user(argptr, arg, target_size);
4929             }
4930             break;
4931         }
4932         break;
4933     default:
4934         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4935                  (long)cmd, arg_type[0]);
4936         ret = -TARGET_ENOSYS;
4937         break;
4938     }
4939     return ret;
4940 }
4941 
4942 static const bitmask_transtbl iflag_tbl[] = {
4943         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4944         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4945         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4946         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4947         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4948         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4949         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4950         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4951         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4952         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4953         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4954         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4955         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4956         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4957         { 0, 0, 0, 0 }
4958 };
4959 
4960 static const bitmask_transtbl oflag_tbl[] = {
4961 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4962 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4963 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4964 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4965 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4966 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4967 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4968 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4969 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4970 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4971 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4972 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4973 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4974 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4975 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4976 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4977 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4978 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4979 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4980 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4981 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4982 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4983 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4984 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4985 	{ 0, 0, 0, 0 }
4986 };
4987 
4988 static const bitmask_transtbl cflag_tbl[] = {
4989 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4990 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4991 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4992 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4993 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4994 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4995 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4996 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4997 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4998 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4999 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5000 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5001 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5002 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5003 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5004 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5005 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5006 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5007 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5008 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5009 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5010 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5011 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5012 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5013 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5014 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5015 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5016 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5017 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5018 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5019 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5020 	{ 0, 0, 0, 0 }
5021 };
5022 
5023 static const bitmask_transtbl lflag_tbl[] = {
5024 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5025 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5026 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5027 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5028 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5029 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5030 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5031 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5032 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5033 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5034 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5035 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5036 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5037 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5038 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5039 	{ 0, 0, 0, 0 }
5040 };
5041 
5042 static void target_to_host_termios (void *dst, const void *src)
5043 {
5044     struct host_termios *host = dst;
5045     const struct target_termios *target = src;
5046 
5047     host->c_iflag =
5048         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5049     host->c_oflag =
5050         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5051     host->c_cflag =
5052         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5053     host->c_lflag =
5054         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5055     host->c_line = target->c_line;
5056 
5057     memset(host->c_cc, 0, sizeof(host->c_cc));
5058     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5059     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5060     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5061     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5062     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5063     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5064     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5065     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5066     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5067     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5068     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5069     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5070     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5071     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5072     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5073     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5074     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5075 }
5076 
5077 static void host_to_target_termios (void *dst, const void *src)
5078 {
5079     struct target_termios *target = dst;
5080     const struct host_termios *host = src;
5081 
5082     target->c_iflag =
5083         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5084     target->c_oflag =
5085         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5086     target->c_cflag =
5087         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5088     target->c_lflag =
5089         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5090     target->c_line = host->c_line;
5091 
5092     memset(target->c_cc, 0, sizeof(target->c_cc));
5093     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5094     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5095     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5096     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5097     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5098     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5099     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5100     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5101     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5102     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5103     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5104     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5105     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5106     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5107     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5108     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5109     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5110 }
5111 
5112 static const StructEntry struct_termios_def = {
5113     .convert = { host_to_target_termios, target_to_host_termios },
5114     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5115     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5116 };
5117 
5118 static bitmask_transtbl mmap_flags_tbl[] = {
5119     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5120     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5121     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5122     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5123       MAP_ANONYMOUS, MAP_ANONYMOUS },
5124     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5125       MAP_GROWSDOWN, MAP_GROWSDOWN },
5126     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5127       MAP_DENYWRITE, MAP_DENYWRITE },
5128     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5129       MAP_EXECUTABLE, MAP_EXECUTABLE },
5130     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5131     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5132       MAP_NORESERVE, MAP_NORESERVE },
5133     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5134     /* MAP_STACK had been ignored by the kernel for quite some time.
5135        Recognize it for the target insofar as we do not want to pass
5136        it through to the host.  */
5137     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5138     { 0, 0, 0, 0 }
5139 };
5140 
5141 #if defined(TARGET_I386)
5142 
5143 /* NOTE: there is really one LDT for all the threads */
5144 static uint8_t *ldt_table;
5145 
5146 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5147 {
5148     int size;
5149     void *p;
5150 
5151     if (!ldt_table)
5152         return 0;
5153     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5154     if (size > bytecount)
5155         size = bytecount;
5156     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5157     if (!p)
5158         return -TARGET_EFAULT;
5159     /* ??? Should this by byteswapped?  */
5160     memcpy(p, ldt_table, size);
5161     unlock_user(p, ptr, size);
5162     return size;
5163 }
5164 
5165 /* XXX: add locking support */
5166 static abi_long write_ldt(CPUX86State *env,
5167                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5168 {
5169     struct target_modify_ldt_ldt_s ldt_info;
5170     struct target_modify_ldt_ldt_s *target_ldt_info;
5171     int seg_32bit, contents, read_exec_only, limit_in_pages;
5172     int seg_not_present, useable, lm;
5173     uint32_t *lp, entry_1, entry_2;
5174 
5175     if (bytecount != sizeof(ldt_info))
5176         return -TARGET_EINVAL;
5177     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5178         return -TARGET_EFAULT;
5179     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5180     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5181     ldt_info.limit = tswap32(target_ldt_info->limit);
5182     ldt_info.flags = tswap32(target_ldt_info->flags);
5183     unlock_user_struct(target_ldt_info, ptr, 0);
5184 
5185     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5186         return -TARGET_EINVAL;
5187     seg_32bit = ldt_info.flags & 1;
5188     contents = (ldt_info.flags >> 1) & 3;
5189     read_exec_only = (ldt_info.flags >> 3) & 1;
5190     limit_in_pages = (ldt_info.flags >> 4) & 1;
5191     seg_not_present = (ldt_info.flags >> 5) & 1;
5192     useable = (ldt_info.flags >> 6) & 1;
5193 #ifdef TARGET_ABI32
5194     lm = 0;
5195 #else
5196     lm = (ldt_info.flags >> 7) & 1;
5197 #endif
5198     if (contents == 3) {
5199         if (oldmode)
5200             return -TARGET_EINVAL;
5201         if (seg_not_present == 0)
5202             return -TARGET_EINVAL;
5203     }
5204     /* allocate the LDT */
5205     if (!ldt_table) {
5206         env->ldt.base = target_mmap(0,
5207                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5208                                     PROT_READ|PROT_WRITE,
5209                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5210         if (env->ldt.base == -1)
5211             return -TARGET_ENOMEM;
5212         memset(g2h(env->ldt.base), 0,
5213                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5214         env->ldt.limit = 0xffff;
5215         ldt_table = g2h(env->ldt.base);
5216     }
5217 
5218     /* NOTE: same code as Linux kernel */
5219     /* Allow LDTs to be cleared by the user. */
5220     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5221         if (oldmode ||
5222             (contents == 0		&&
5223              read_exec_only == 1	&&
5224              seg_32bit == 0		&&
5225              limit_in_pages == 0	&&
5226              seg_not_present == 1	&&
5227              useable == 0 )) {
5228             entry_1 = 0;
5229             entry_2 = 0;
5230             goto install;
5231         }
5232     }
5233 
5234     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5235         (ldt_info.limit & 0x0ffff);
5236     entry_2 = (ldt_info.base_addr & 0xff000000) |
5237         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5238         (ldt_info.limit & 0xf0000) |
5239         ((read_exec_only ^ 1) << 9) |
5240         (contents << 10) |
5241         ((seg_not_present ^ 1) << 15) |
5242         (seg_32bit << 22) |
5243         (limit_in_pages << 23) |
5244         (lm << 21) |
5245         0x7000;
5246     if (!oldmode)
5247         entry_2 |= (useable << 20);
5248 
5249     /* Install the new entry ...  */
5250 install:
5251     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5252     lp[0] = tswap32(entry_1);
5253     lp[1] = tswap32(entry_2);
5254     return 0;
5255 }
5256 
5257 /* specific and weird i386 syscalls */
5258 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5259                               unsigned long bytecount)
5260 {
5261     abi_long ret;
5262 
5263     switch (func) {
5264     case 0:
5265         ret = read_ldt(ptr, bytecount);
5266         break;
5267     case 1:
5268         ret = write_ldt(env, ptr, bytecount, 1);
5269         break;
5270     case 0x11:
5271         ret = write_ldt(env, ptr, bytecount, 0);
5272         break;
5273     default:
5274         ret = -TARGET_ENOSYS;
5275         break;
5276     }
5277     return ret;
5278 }
5279 
5280 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5281 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5282 {
5283     uint64_t *gdt_table = g2h(env->gdt.base);
5284     struct target_modify_ldt_ldt_s ldt_info;
5285     struct target_modify_ldt_ldt_s *target_ldt_info;
5286     int seg_32bit, contents, read_exec_only, limit_in_pages;
5287     int seg_not_present, useable, lm;
5288     uint32_t *lp, entry_1, entry_2;
5289     int i;
5290 
5291     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5292     if (!target_ldt_info)
5293         return -TARGET_EFAULT;
5294     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5295     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5296     ldt_info.limit = tswap32(target_ldt_info->limit);
5297     ldt_info.flags = tswap32(target_ldt_info->flags);
5298     if (ldt_info.entry_number == -1) {
5299         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5300             if (gdt_table[i] == 0) {
5301                 ldt_info.entry_number = i;
5302                 target_ldt_info->entry_number = tswap32(i);
5303                 break;
5304             }
5305         }
5306     }
5307     unlock_user_struct(target_ldt_info, ptr, 1);
5308 
5309     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5310         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5311            return -TARGET_EINVAL;
5312     seg_32bit = ldt_info.flags & 1;
5313     contents = (ldt_info.flags >> 1) & 3;
5314     read_exec_only = (ldt_info.flags >> 3) & 1;
5315     limit_in_pages = (ldt_info.flags >> 4) & 1;
5316     seg_not_present = (ldt_info.flags >> 5) & 1;
5317     useable = (ldt_info.flags >> 6) & 1;
5318 #ifdef TARGET_ABI32
5319     lm = 0;
5320 #else
5321     lm = (ldt_info.flags >> 7) & 1;
5322 #endif
5323 
5324     if (contents == 3) {
5325         if (seg_not_present == 0)
5326             return -TARGET_EINVAL;
5327     }
5328 
5329     /* NOTE: same code as Linux kernel */
5330     /* Allow LDTs to be cleared by the user. */
5331     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5332         if ((contents == 0             &&
5333              read_exec_only == 1       &&
5334              seg_32bit == 0            &&
5335              limit_in_pages == 0       &&
5336              seg_not_present == 1      &&
5337              useable == 0 )) {
5338             entry_1 = 0;
5339             entry_2 = 0;
5340             goto install;
5341         }
5342     }
5343 
5344     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5345         (ldt_info.limit & 0x0ffff);
5346     entry_2 = (ldt_info.base_addr & 0xff000000) |
5347         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5348         (ldt_info.limit & 0xf0000) |
5349         ((read_exec_only ^ 1) << 9) |
5350         (contents << 10) |
5351         ((seg_not_present ^ 1) << 15) |
5352         (seg_32bit << 22) |
5353         (limit_in_pages << 23) |
5354         (useable << 20) |
5355         (lm << 21) |
5356         0x7000;
5357 
5358     /* Install the new entry ...  */
5359 install:
5360     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5361     lp[0] = tswap32(entry_1);
5362     lp[1] = tswap32(entry_2);
5363     return 0;
5364 }
5365 
5366 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5367 {
5368     struct target_modify_ldt_ldt_s *target_ldt_info;
5369     uint64_t *gdt_table = g2h(env->gdt.base);
5370     uint32_t base_addr, limit, flags;
5371     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5372     int seg_not_present, useable, lm;
5373     uint32_t *lp, entry_1, entry_2;
5374 
5375     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5376     if (!target_ldt_info)
5377         return -TARGET_EFAULT;
5378     idx = tswap32(target_ldt_info->entry_number);
5379     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5380         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5381         unlock_user_struct(target_ldt_info, ptr, 1);
5382         return -TARGET_EINVAL;
5383     }
5384     lp = (uint32_t *)(gdt_table + idx);
5385     entry_1 = tswap32(lp[0]);
5386     entry_2 = tswap32(lp[1]);
5387 
5388     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5389     contents = (entry_2 >> 10) & 3;
5390     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5391     seg_32bit = (entry_2 >> 22) & 1;
5392     limit_in_pages = (entry_2 >> 23) & 1;
5393     useable = (entry_2 >> 20) & 1;
5394 #ifdef TARGET_ABI32
5395     lm = 0;
5396 #else
5397     lm = (entry_2 >> 21) & 1;
5398 #endif
5399     flags = (seg_32bit << 0) | (contents << 1) |
5400         (read_exec_only << 3) | (limit_in_pages << 4) |
5401         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5402     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5403     base_addr = (entry_1 >> 16) |
5404         (entry_2 & 0xff000000) |
5405         ((entry_2 & 0xff) << 16);
5406     target_ldt_info->base_addr = tswapal(base_addr);
5407     target_ldt_info->limit = tswap32(limit);
5408     target_ldt_info->flags = tswap32(flags);
5409     unlock_user_struct(target_ldt_info, ptr, 1);
5410     return 0;
5411 }
5412 #endif /* TARGET_I386 && TARGET_ABI32 */
5413 
5414 #ifndef TARGET_ABI32
5415 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5416 {
5417     abi_long ret = 0;
5418     abi_ulong val;
5419     int idx;
5420 
5421     switch(code) {
5422     case TARGET_ARCH_SET_GS:
5423     case TARGET_ARCH_SET_FS:
5424         if (code == TARGET_ARCH_SET_GS)
5425             idx = R_GS;
5426         else
5427             idx = R_FS;
5428         cpu_x86_load_seg(env, idx, 0);
5429         env->segs[idx].base = addr;
5430         break;
5431     case TARGET_ARCH_GET_GS:
5432     case TARGET_ARCH_GET_FS:
5433         if (code == TARGET_ARCH_GET_GS)
5434             idx = R_GS;
5435         else
5436             idx = R_FS;
5437         val = env->segs[idx].base;
5438         if (put_user(val, addr, abi_ulong))
5439             ret = -TARGET_EFAULT;
5440         break;
5441     default:
5442         ret = -TARGET_EINVAL;
5443         break;
5444     }
5445     return ret;
5446 }
5447 #endif
5448 
5449 #endif /* defined(TARGET_I386) */
5450 
5451 #define NEW_STACK_SIZE 0x40000
5452 
5453 
5454 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5455 typedef struct {
5456     CPUArchState *env;
5457     pthread_mutex_t mutex;
5458     pthread_cond_t cond;
5459     pthread_t thread;
5460     uint32_t tid;
5461     abi_ulong child_tidptr;
5462     abi_ulong parent_tidptr;
5463     sigset_t sigmask;
5464 } new_thread_info;
5465 
5466 static void *clone_func(void *arg)
5467 {
5468     new_thread_info *info = arg;
5469     CPUArchState *env;
5470     CPUState *cpu;
5471     TaskState *ts;
5472 
5473     rcu_register_thread();
5474     tcg_register_thread();
5475     env = info->env;
5476     cpu = ENV_GET_CPU(env);
5477     thread_cpu = cpu;
5478     ts = (TaskState *)cpu->opaque;
5479     info->tid = sys_gettid();
5480     task_settid(ts);
5481     if (info->child_tidptr)
5482         put_user_u32(info->tid, info->child_tidptr);
5483     if (info->parent_tidptr)
5484         put_user_u32(info->tid, info->parent_tidptr);
5485     /* Enable signals.  */
5486     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5487     /* Signal to the parent that we're ready.  */
5488     pthread_mutex_lock(&info->mutex);
5489     pthread_cond_broadcast(&info->cond);
5490     pthread_mutex_unlock(&info->mutex);
5491     /* Wait until the parent has finished initializing the tls state.  */
5492     pthread_mutex_lock(&clone_lock);
5493     pthread_mutex_unlock(&clone_lock);
5494     cpu_loop(env);
5495     /* never exits */
5496     return NULL;
5497 }
5498 
5499 /* do_fork() Must return host values and target errnos (unlike most
5500    do_*() functions). */
5501 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5502                    abi_ulong parent_tidptr, target_ulong newtls,
5503                    abi_ulong child_tidptr)
5504 {
5505     CPUState *cpu = ENV_GET_CPU(env);
5506     int ret;
5507     TaskState *ts;
5508     CPUState *new_cpu;
5509     CPUArchState *new_env;
5510     sigset_t sigmask;
5511 
5512     flags &= ~CLONE_IGNORED_FLAGS;
5513 
5514     /* Emulate vfork() with fork() */
5515     if (flags & CLONE_VFORK)
5516         flags &= ~(CLONE_VFORK | CLONE_VM);
5517 
5518     if (flags & CLONE_VM) {
5519         TaskState *parent_ts = (TaskState *)cpu->opaque;
5520         new_thread_info info;
5521         pthread_attr_t attr;
5522 
5523         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5524             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5525             return -TARGET_EINVAL;
5526         }
5527 
5528         ts = g_new0(TaskState, 1);
5529         init_task_state(ts);
5530 
5531         /* Grab a mutex so that thread setup appears atomic.  */
5532         pthread_mutex_lock(&clone_lock);
5533 
5534         /* we create a new CPU instance. */
5535         new_env = cpu_copy(env);
5536         /* Init regs that differ from the parent.  */
5537         cpu_clone_regs(new_env, newsp);
5538         new_cpu = ENV_GET_CPU(new_env);
5539         new_cpu->opaque = ts;
5540         ts->bprm = parent_ts->bprm;
5541         ts->info = parent_ts->info;
5542         ts->signal_mask = parent_ts->signal_mask;
5543 
5544         if (flags & CLONE_CHILD_CLEARTID) {
5545             ts->child_tidptr = child_tidptr;
5546         }
5547 
5548         if (flags & CLONE_SETTLS) {
5549             cpu_set_tls (new_env, newtls);
5550         }
5551 
5552         memset(&info, 0, sizeof(info));
5553         pthread_mutex_init(&info.mutex, NULL);
5554         pthread_mutex_lock(&info.mutex);
5555         pthread_cond_init(&info.cond, NULL);
5556         info.env = new_env;
5557         if (flags & CLONE_CHILD_SETTID) {
5558             info.child_tidptr = child_tidptr;
5559         }
5560         if (flags & CLONE_PARENT_SETTID) {
5561             info.parent_tidptr = parent_tidptr;
5562         }
5563 
5564         ret = pthread_attr_init(&attr);
5565         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5566         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5567         /* It is not safe to deliver signals until the child has finished
5568            initializing, so temporarily block all signals.  */
5569         sigfillset(&sigmask);
5570         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5571 
5572         /* If this is our first additional thread, we need to ensure we
5573          * generate code for parallel execution and flush old translations.
5574          */
5575         if (!parallel_cpus) {
5576             parallel_cpus = true;
5577             tb_flush(cpu);
5578         }
5579 
5580         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5581         /* TODO: Free new CPU state if thread creation failed.  */
5582 
5583         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5584         pthread_attr_destroy(&attr);
5585         if (ret == 0) {
5586             /* Wait for the child to initialize.  */
5587             pthread_cond_wait(&info.cond, &info.mutex);
5588             ret = info.tid;
5589         } else {
5590             ret = -1;
5591         }
5592         pthread_mutex_unlock(&info.mutex);
5593         pthread_cond_destroy(&info.cond);
5594         pthread_mutex_destroy(&info.mutex);
5595         pthread_mutex_unlock(&clone_lock);
5596     } else {
5597         /* if no CLONE_VM, we consider it is a fork */
5598         if (flags & CLONE_INVALID_FORK_FLAGS) {
5599             return -TARGET_EINVAL;
5600         }
5601 
5602         /* We can't support custom termination signals */
5603         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5604             return -TARGET_EINVAL;
5605         }
5606 
5607         if (block_signals()) {
5608             return -TARGET_ERESTARTSYS;
5609         }
5610 
5611         fork_start();
5612         ret = fork();
5613         if (ret == 0) {
5614             /* Child Process.  */
5615             cpu_clone_regs(env, newsp);
5616             fork_end(1);
5617             /* There is a race condition here.  The parent process could
5618                theoretically read the TID in the child process before the child
5619                tid is set.  This would require using either ptrace
5620                (not implemented) or having *_tidptr to point at a shared memory
5621                mapping.  We can't repeat the spinlock hack used above because
5622                the child process gets its own copy of the lock.  */
5623             if (flags & CLONE_CHILD_SETTID)
5624                 put_user_u32(sys_gettid(), child_tidptr);
5625             if (flags & CLONE_PARENT_SETTID)
5626                 put_user_u32(sys_gettid(), parent_tidptr);
5627             ts = (TaskState *)cpu->opaque;
5628             if (flags & CLONE_SETTLS)
5629                 cpu_set_tls (env, newtls);
5630             if (flags & CLONE_CHILD_CLEARTID)
5631                 ts->child_tidptr = child_tidptr;
5632         } else {
5633             fork_end(0);
5634         }
5635     }
5636     return ret;
5637 }
5638 
5639 /* warning : doesn't handle linux specific flags... */
5640 static int target_to_host_fcntl_cmd(int cmd)
5641 {
5642     int ret;
5643 
5644     switch(cmd) {
5645     case TARGET_F_DUPFD:
5646     case TARGET_F_GETFD:
5647     case TARGET_F_SETFD:
5648     case TARGET_F_GETFL:
5649     case TARGET_F_SETFL:
5650         ret = cmd;
5651         break;
5652     case TARGET_F_GETLK:
5653         ret = F_GETLK64;
5654         break;
5655     case TARGET_F_SETLK:
5656         ret = F_SETLK64;
5657         break;
5658     case TARGET_F_SETLKW:
5659         ret = F_SETLKW64;
5660         break;
5661     case TARGET_F_GETOWN:
5662         ret = F_GETOWN;
5663         break;
5664     case TARGET_F_SETOWN:
5665         ret = F_SETOWN;
5666         break;
5667     case TARGET_F_GETSIG:
5668         ret = F_GETSIG;
5669         break;
5670     case TARGET_F_SETSIG:
5671         ret = F_SETSIG;
5672         break;
5673 #if TARGET_ABI_BITS == 32
5674     case TARGET_F_GETLK64:
5675         ret = F_GETLK64;
5676         break;
5677     case TARGET_F_SETLK64:
5678         ret = F_SETLK64;
5679         break;
5680     case TARGET_F_SETLKW64:
5681         ret = F_SETLKW64;
5682         break;
5683 #endif
5684     case TARGET_F_SETLEASE:
5685         ret = F_SETLEASE;
5686         break;
5687     case TARGET_F_GETLEASE:
5688         ret = F_GETLEASE;
5689         break;
5690 #ifdef F_DUPFD_CLOEXEC
5691     case TARGET_F_DUPFD_CLOEXEC:
5692         ret = F_DUPFD_CLOEXEC;
5693         break;
5694 #endif
5695     case TARGET_F_NOTIFY:
5696         ret = F_NOTIFY;
5697         break;
5698 #ifdef F_GETOWN_EX
5699     case TARGET_F_GETOWN_EX:
5700         ret = F_GETOWN_EX;
5701         break;
5702 #endif
5703 #ifdef F_SETOWN_EX
5704     case TARGET_F_SETOWN_EX:
5705         ret = F_SETOWN_EX;
5706         break;
5707 #endif
5708 #ifdef F_SETPIPE_SZ
5709     case TARGET_F_SETPIPE_SZ:
5710         ret = F_SETPIPE_SZ;
5711         break;
5712     case TARGET_F_GETPIPE_SZ:
5713         ret = F_GETPIPE_SZ;
5714         break;
5715 #endif
5716     default:
5717         ret = -TARGET_EINVAL;
5718         break;
5719     }
5720 
5721 #if defined(__powerpc64__)
5722     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5723      * is not supported by kernel. The glibc fcntl call actually adjusts
5724      * them to 5, 6 and 7 before making the syscall(). Since we make the
5725      * syscall directly, adjust to what is supported by the kernel.
5726      */
5727     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5728         ret -= F_GETLK64 - 5;
5729     }
5730 #endif
5731 
5732     return ret;
5733 }
5734 
5735 #define FLOCK_TRANSTBL \
5736     switch (type) { \
5737     TRANSTBL_CONVERT(F_RDLCK); \
5738     TRANSTBL_CONVERT(F_WRLCK); \
5739     TRANSTBL_CONVERT(F_UNLCK); \
5740     TRANSTBL_CONVERT(F_EXLCK); \
5741     TRANSTBL_CONVERT(F_SHLCK); \
5742     }
5743 
5744 static int target_to_host_flock(int type)
5745 {
5746 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5747     FLOCK_TRANSTBL
5748 #undef  TRANSTBL_CONVERT
5749     return -TARGET_EINVAL;
5750 }
5751 
5752 static int host_to_target_flock(int type)
5753 {
5754 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5755     FLOCK_TRANSTBL
5756 #undef  TRANSTBL_CONVERT
5757     /* if we don't know how to convert the value coming
5758      * from the host we copy to the target field as-is
5759      */
5760     return type;
5761 }
5762 
5763 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5764                                             abi_ulong target_flock_addr)
5765 {
5766     struct target_flock *target_fl;
5767     int l_type;
5768 
5769     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5770         return -TARGET_EFAULT;
5771     }
5772 
5773     __get_user(l_type, &target_fl->l_type);
5774     l_type = target_to_host_flock(l_type);
5775     if (l_type < 0) {
5776         return l_type;
5777     }
5778     fl->l_type = l_type;
5779     __get_user(fl->l_whence, &target_fl->l_whence);
5780     __get_user(fl->l_start, &target_fl->l_start);
5781     __get_user(fl->l_len, &target_fl->l_len);
5782     __get_user(fl->l_pid, &target_fl->l_pid);
5783     unlock_user_struct(target_fl, target_flock_addr, 0);
5784     return 0;
5785 }
5786 
5787 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5788                                           const struct flock64 *fl)
5789 {
5790     struct target_flock *target_fl;
5791     short l_type;
5792 
5793     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5794         return -TARGET_EFAULT;
5795     }
5796 
5797     l_type = host_to_target_flock(fl->l_type);
5798     __put_user(l_type, &target_fl->l_type);
5799     __put_user(fl->l_whence, &target_fl->l_whence);
5800     __put_user(fl->l_start, &target_fl->l_start);
5801     __put_user(fl->l_len, &target_fl->l_len);
5802     __put_user(fl->l_pid, &target_fl->l_pid);
5803     unlock_user_struct(target_fl, target_flock_addr, 1);
5804     return 0;
5805 }
5806 
5807 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5808 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5809 
5810 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5811 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5812                                                    abi_ulong target_flock_addr)
5813 {
5814     struct target_oabi_flock64 *target_fl;
5815     int l_type;
5816 
5817     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5818         return -TARGET_EFAULT;
5819     }
5820 
5821     __get_user(l_type, &target_fl->l_type);
5822     l_type = target_to_host_flock(l_type);
5823     if (l_type < 0) {
5824         return l_type;
5825     }
5826     fl->l_type = l_type;
5827     __get_user(fl->l_whence, &target_fl->l_whence);
5828     __get_user(fl->l_start, &target_fl->l_start);
5829     __get_user(fl->l_len, &target_fl->l_len);
5830     __get_user(fl->l_pid, &target_fl->l_pid);
5831     unlock_user_struct(target_fl, target_flock_addr, 0);
5832     return 0;
5833 }
5834 
5835 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5836                                                  const struct flock64 *fl)
5837 {
5838     struct target_oabi_flock64 *target_fl;
5839     short l_type;
5840 
5841     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5842         return -TARGET_EFAULT;
5843     }
5844 
5845     l_type = host_to_target_flock(fl->l_type);
5846     __put_user(l_type, &target_fl->l_type);
5847     __put_user(fl->l_whence, &target_fl->l_whence);
5848     __put_user(fl->l_start, &target_fl->l_start);
5849     __put_user(fl->l_len, &target_fl->l_len);
5850     __put_user(fl->l_pid, &target_fl->l_pid);
5851     unlock_user_struct(target_fl, target_flock_addr, 1);
5852     return 0;
5853 }
5854 #endif
5855 
5856 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5857                                               abi_ulong target_flock_addr)
5858 {
5859     struct target_flock64 *target_fl;
5860     int l_type;
5861 
5862     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5863         return -TARGET_EFAULT;
5864     }
5865 
5866     __get_user(l_type, &target_fl->l_type);
5867     l_type = target_to_host_flock(l_type);
5868     if (l_type < 0) {
5869         return l_type;
5870     }
5871     fl->l_type = l_type;
5872     __get_user(fl->l_whence, &target_fl->l_whence);
5873     __get_user(fl->l_start, &target_fl->l_start);
5874     __get_user(fl->l_len, &target_fl->l_len);
5875     __get_user(fl->l_pid, &target_fl->l_pid);
5876     unlock_user_struct(target_fl, target_flock_addr, 0);
5877     return 0;
5878 }
5879 
5880 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5881                                             const struct flock64 *fl)
5882 {
5883     struct target_flock64 *target_fl;
5884     short l_type;
5885 
5886     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5887         return -TARGET_EFAULT;
5888     }
5889 
5890     l_type = host_to_target_flock(fl->l_type);
5891     __put_user(l_type, &target_fl->l_type);
5892     __put_user(fl->l_whence, &target_fl->l_whence);
5893     __put_user(fl->l_start, &target_fl->l_start);
5894     __put_user(fl->l_len, &target_fl->l_len);
5895     __put_user(fl->l_pid, &target_fl->l_pid);
5896     unlock_user_struct(target_fl, target_flock_addr, 1);
5897     return 0;
5898 }
5899 
5900 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5901 {
5902     struct flock64 fl64;
5903 #ifdef F_GETOWN_EX
5904     struct f_owner_ex fox;
5905     struct target_f_owner_ex *target_fox;
5906 #endif
5907     abi_long ret;
5908     int host_cmd = target_to_host_fcntl_cmd(cmd);
5909 
5910     if (host_cmd == -TARGET_EINVAL)
5911 	    return host_cmd;
5912 
5913     switch(cmd) {
5914     case TARGET_F_GETLK:
5915         ret = copy_from_user_flock(&fl64, arg);
5916         if (ret) {
5917             return ret;
5918         }
5919         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5920         if (ret == 0) {
5921             ret = copy_to_user_flock(arg, &fl64);
5922         }
5923         break;
5924 
5925     case TARGET_F_SETLK:
5926     case TARGET_F_SETLKW:
5927         ret = copy_from_user_flock(&fl64, arg);
5928         if (ret) {
5929             return ret;
5930         }
5931         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5932         break;
5933 
5934     case TARGET_F_GETLK64:
5935         ret = copy_from_user_flock64(&fl64, arg);
5936         if (ret) {
5937             return ret;
5938         }
5939         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5940         if (ret == 0) {
5941             ret = copy_to_user_flock64(arg, &fl64);
5942         }
5943         break;
5944     case TARGET_F_SETLK64:
5945     case TARGET_F_SETLKW64:
5946         ret = copy_from_user_flock64(&fl64, arg);
5947         if (ret) {
5948             return ret;
5949         }
5950         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5951         break;
5952 
5953     case TARGET_F_GETFL:
5954         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5955         if (ret >= 0) {
5956             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5957         }
5958         break;
5959 
5960     case TARGET_F_SETFL:
5961         ret = get_errno(safe_fcntl(fd, host_cmd,
5962                                    target_to_host_bitmask(arg,
5963                                                           fcntl_flags_tbl)));
5964         break;
5965 
5966 #ifdef F_GETOWN_EX
5967     case TARGET_F_GETOWN_EX:
5968         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5969         if (ret >= 0) {
5970             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5971                 return -TARGET_EFAULT;
5972             target_fox->type = tswap32(fox.type);
5973             target_fox->pid = tswap32(fox.pid);
5974             unlock_user_struct(target_fox, arg, 1);
5975         }
5976         break;
5977 #endif
5978 
5979 #ifdef F_SETOWN_EX
5980     case TARGET_F_SETOWN_EX:
5981         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5982             return -TARGET_EFAULT;
5983         fox.type = tswap32(target_fox->type);
5984         fox.pid = tswap32(target_fox->pid);
5985         unlock_user_struct(target_fox, arg, 0);
5986         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5987         break;
5988 #endif
5989 
5990     case TARGET_F_SETOWN:
5991     case TARGET_F_GETOWN:
5992     case TARGET_F_SETSIG:
5993     case TARGET_F_GETSIG:
5994     case TARGET_F_SETLEASE:
5995     case TARGET_F_GETLEASE:
5996     case TARGET_F_SETPIPE_SZ:
5997     case TARGET_F_GETPIPE_SZ:
5998         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5999         break;
6000 
6001     default:
6002         ret = get_errno(safe_fcntl(fd, cmd, arg));
6003         break;
6004     }
6005     return ret;
6006 }
6007 
6008 #ifdef USE_UID16
6009 
6010 static inline int high2lowuid(int uid)
6011 {
6012     if (uid > 65535)
6013         return 65534;
6014     else
6015         return uid;
6016 }
6017 
6018 static inline int high2lowgid(int gid)
6019 {
6020     if (gid > 65535)
6021         return 65534;
6022     else
6023         return gid;
6024 }
6025 
6026 static inline int low2highuid(int uid)
6027 {
6028     if ((int16_t)uid == -1)
6029         return -1;
6030     else
6031         return uid;
6032 }
6033 
6034 static inline int low2highgid(int gid)
6035 {
6036     if ((int16_t)gid == -1)
6037         return -1;
6038     else
6039         return gid;
6040 }
6041 static inline int tswapid(int id)
6042 {
6043     return tswap16(id);
6044 }
6045 
6046 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6047 
6048 #else /* !USE_UID16 */
6049 static inline int high2lowuid(int uid)
6050 {
6051     return uid;
6052 }
6053 static inline int high2lowgid(int gid)
6054 {
6055     return gid;
6056 }
6057 static inline int low2highuid(int uid)
6058 {
6059     return uid;
6060 }
6061 static inline int low2highgid(int gid)
6062 {
6063     return gid;
6064 }
6065 static inline int tswapid(int id)
6066 {
6067     return tswap32(id);
6068 }
6069 
6070 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6071 
6072 #endif /* USE_UID16 */
6073 
6074 /* We must do direct syscalls for setting UID/GID, because we want to
6075  * implement the Linux system call semantics of "change only for this thread",
6076  * not the libc/POSIX semantics of "change for all threads in process".
6077  * (See http://ewontfix.com/17/ for more details.)
6078  * We use the 32-bit version of the syscalls if present; if it is not
6079  * then either the host architecture supports 32-bit UIDs natively with
6080  * the standard syscall, or the 16-bit UID is the best we can do.
6081  */
6082 #ifdef __NR_setuid32
6083 #define __NR_sys_setuid __NR_setuid32
6084 #else
6085 #define __NR_sys_setuid __NR_setuid
6086 #endif
6087 #ifdef __NR_setgid32
6088 #define __NR_sys_setgid __NR_setgid32
6089 #else
6090 #define __NR_sys_setgid __NR_setgid
6091 #endif
6092 #ifdef __NR_setresuid32
6093 #define __NR_sys_setresuid __NR_setresuid32
6094 #else
6095 #define __NR_sys_setresuid __NR_setresuid
6096 #endif
6097 #ifdef __NR_setresgid32
6098 #define __NR_sys_setresgid __NR_setresgid32
6099 #else
6100 #define __NR_sys_setresgid __NR_setresgid
6101 #endif
6102 
6103 _syscall1(int, sys_setuid, uid_t, uid)
6104 _syscall1(int, sys_setgid, gid_t, gid)
6105 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6106 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6107 
6108 void syscall_init(void)
6109 {
6110     IOCTLEntry *ie;
6111     const argtype *arg_type;
6112     int size;
6113     int i;
6114 
6115     thunk_init(STRUCT_MAX);
6116 
6117 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6118 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6119 #include "syscall_types.h"
6120 #undef STRUCT
6121 #undef STRUCT_SPECIAL
6122 
6123     /* Build target_to_host_errno_table[] table from
6124      * host_to_target_errno_table[]. */
6125     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6126         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6127     }
6128 
6129     /* we patch the ioctl size if necessary. We rely on the fact that
6130        no ioctl has all the bits at '1' in the size field */
6131     ie = ioctl_entries;
6132     while (ie->target_cmd != 0) {
6133         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6134             TARGET_IOC_SIZEMASK) {
6135             arg_type = ie->arg_type;
6136             if (arg_type[0] != TYPE_PTR) {
6137                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6138                         ie->target_cmd);
6139                 exit(1);
6140             }
6141             arg_type++;
6142             size = thunk_type_size(arg_type, 0);
6143             ie->target_cmd = (ie->target_cmd &
6144                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6145                 (size << TARGET_IOC_SIZESHIFT);
6146         }
6147 
6148         /* automatic consistency check if same arch */
6149 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6150     (defined(__x86_64__) && defined(TARGET_X86_64))
6151         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6152             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6153                     ie->name, ie->target_cmd, ie->host_cmd);
6154         }
6155 #endif
6156         ie++;
6157     }
6158 }
6159 
6160 #if TARGET_ABI_BITS == 32
6161 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6162 {
6163 #ifdef TARGET_WORDS_BIGENDIAN
6164     return ((uint64_t)word0 << 32) | word1;
6165 #else
6166     return ((uint64_t)word1 << 32) | word0;
6167 #endif
6168 }
6169 #else /* TARGET_ABI_BITS == 32 */
6170 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6171 {
6172     return word0;
6173 }
6174 #endif /* TARGET_ABI_BITS != 32 */
6175 
6176 #ifdef TARGET_NR_truncate64
6177 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6178                                          abi_long arg2,
6179                                          abi_long arg3,
6180                                          abi_long arg4)
6181 {
6182     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6183         arg2 = arg3;
6184         arg3 = arg4;
6185     }
6186     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6187 }
6188 #endif
6189 
6190 #ifdef TARGET_NR_ftruncate64
6191 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6192                                           abi_long arg2,
6193                                           abi_long arg3,
6194                                           abi_long arg4)
6195 {
6196     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6197         arg2 = arg3;
6198         arg3 = arg4;
6199     }
6200     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6201 }
6202 #endif
6203 
6204 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6205                                                abi_ulong target_addr)
6206 {
6207     struct target_timespec *target_ts;
6208 
6209     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6210         return -TARGET_EFAULT;
6211     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6212     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6213     unlock_user_struct(target_ts, target_addr, 0);
6214     return 0;
6215 }
6216 
6217 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6218                                                struct timespec *host_ts)
6219 {
6220     struct target_timespec *target_ts;
6221 
6222     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6223         return -TARGET_EFAULT;
6224     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6225     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6226     unlock_user_struct(target_ts, target_addr, 1);
6227     return 0;
6228 }
6229 
6230 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6231                                                  abi_ulong target_addr)
6232 {
6233     struct target_itimerspec *target_itspec;
6234 
6235     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6236         return -TARGET_EFAULT;
6237     }
6238 
6239     host_itspec->it_interval.tv_sec =
6240                             tswapal(target_itspec->it_interval.tv_sec);
6241     host_itspec->it_interval.tv_nsec =
6242                             tswapal(target_itspec->it_interval.tv_nsec);
6243     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6244     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6245 
6246     unlock_user_struct(target_itspec, target_addr, 1);
6247     return 0;
6248 }
6249 
6250 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6251                                                struct itimerspec *host_its)
6252 {
6253     struct target_itimerspec *target_itspec;
6254 
6255     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6256         return -TARGET_EFAULT;
6257     }
6258 
6259     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6260     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6261 
6262     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6263     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6264 
6265     unlock_user_struct(target_itspec, target_addr, 0);
6266     return 0;
6267 }
6268 
6269 static inline abi_long target_to_host_timex(struct timex *host_tx,
6270                                             abi_long target_addr)
6271 {
6272     struct target_timex *target_tx;
6273 
6274     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6275         return -TARGET_EFAULT;
6276     }
6277 
6278     __get_user(host_tx->modes, &target_tx->modes);
6279     __get_user(host_tx->offset, &target_tx->offset);
6280     __get_user(host_tx->freq, &target_tx->freq);
6281     __get_user(host_tx->maxerror, &target_tx->maxerror);
6282     __get_user(host_tx->esterror, &target_tx->esterror);
6283     __get_user(host_tx->status, &target_tx->status);
6284     __get_user(host_tx->constant, &target_tx->constant);
6285     __get_user(host_tx->precision, &target_tx->precision);
6286     __get_user(host_tx->tolerance, &target_tx->tolerance);
6287     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6288     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6289     __get_user(host_tx->tick, &target_tx->tick);
6290     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6291     __get_user(host_tx->jitter, &target_tx->jitter);
6292     __get_user(host_tx->shift, &target_tx->shift);
6293     __get_user(host_tx->stabil, &target_tx->stabil);
6294     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6295     __get_user(host_tx->calcnt, &target_tx->calcnt);
6296     __get_user(host_tx->errcnt, &target_tx->errcnt);
6297     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6298     __get_user(host_tx->tai, &target_tx->tai);
6299 
6300     unlock_user_struct(target_tx, target_addr, 0);
6301     return 0;
6302 }
6303 
6304 static inline abi_long host_to_target_timex(abi_long target_addr,
6305                                             struct timex *host_tx)
6306 {
6307     struct target_timex *target_tx;
6308 
6309     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6310         return -TARGET_EFAULT;
6311     }
6312 
6313     __put_user(host_tx->modes, &target_tx->modes);
6314     __put_user(host_tx->offset, &target_tx->offset);
6315     __put_user(host_tx->freq, &target_tx->freq);
6316     __put_user(host_tx->maxerror, &target_tx->maxerror);
6317     __put_user(host_tx->esterror, &target_tx->esterror);
6318     __put_user(host_tx->status, &target_tx->status);
6319     __put_user(host_tx->constant, &target_tx->constant);
6320     __put_user(host_tx->precision, &target_tx->precision);
6321     __put_user(host_tx->tolerance, &target_tx->tolerance);
6322     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6323     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6324     __put_user(host_tx->tick, &target_tx->tick);
6325     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6326     __put_user(host_tx->jitter, &target_tx->jitter);
6327     __put_user(host_tx->shift, &target_tx->shift);
6328     __put_user(host_tx->stabil, &target_tx->stabil);
6329     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6330     __put_user(host_tx->calcnt, &target_tx->calcnt);
6331     __put_user(host_tx->errcnt, &target_tx->errcnt);
6332     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6333     __put_user(host_tx->tai, &target_tx->tai);
6334 
6335     unlock_user_struct(target_tx, target_addr, 1);
6336     return 0;
6337 }
6338 
6339 
6340 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6341                                                abi_ulong target_addr)
6342 {
6343     struct target_sigevent *target_sevp;
6344 
6345     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6346         return -TARGET_EFAULT;
6347     }
6348 
6349     /* This union is awkward on 64 bit systems because it has a 32 bit
6350      * integer and a pointer in it; we follow the conversion approach
6351      * used for handling sigval types in signal.c so the guest should get
6352      * the correct value back even if we did a 64 bit byteswap and it's
6353      * using the 32 bit integer.
6354      */
6355     host_sevp->sigev_value.sival_ptr =
6356         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6357     host_sevp->sigev_signo =
6358         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6359     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6360     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6361 
6362     unlock_user_struct(target_sevp, target_addr, 1);
6363     return 0;
6364 }
6365 
6366 #if defined(TARGET_NR_mlockall)
6367 static inline int target_to_host_mlockall_arg(int arg)
6368 {
6369     int result = 0;
6370 
6371     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6372         result |= MCL_CURRENT;
6373     }
6374     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6375         result |= MCL_FUTURE;
6376     }
6377     return result;
6378 }
6379 #endif
6380 
6381 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6382      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6383      defined(TARGET_NR_newfstatat))
6384 static inline abi_long host_to_target_stat64(void *cpu_env,
6385                                              abi_ulong target_addr,
6386                                              struct stat *host_st)
6387 {
6388 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6389     if (((CPUARMState *)cpu_env)->eabi) {
6390         struct target_eabi_stat64 *target_st;
6391 
6392         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6393             return -TARGET_EFAULT;
6394         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6395         __put_user(host_st->st_dev, &target_st->st_dev);
6396         __put_user(host_st->st_ino, &target_st->st_ino);
6397 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6398         __put_user(host_st->st_ino, &target_st->__st_ino);
6399 #endif
6400         __put_user(host_st->st_mode, &target_st->st_mode);
6401         __put_user(host_st->st_nlink, &target_st->st_nlink);
6402         __put_user(host_st->st_uid, &target_st->st_uid);
6403         __put_user(host_st->st_gid, &target_st->st_gid);
6404         __put_user(host_st->st_rdev, &target_st->st_rdev);
6405         __put_user(host_st->st_size, &target_st->st_size);
6406         __put_user(host_st->st_blksize, &target_st->st_blksize);
6407         __put_user(host_st->st_blocks, &target_st->st_blocks);
6408         __put_user(host_st->st_atime, &target_st->target_st_atime);
6409         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6410         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6411         unlock_user_struct(target_st, target_addr, 1);
6412     } else
6413 #endif
6414     {
6415 #if defined(TARGET_HAS_STRUCT_STAT64)
6416         struct target_stat64 *target_st;
6417 #else
6418         struct target_stat *target_st;
6419 #endif
6420 
6421         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6422             return -TARGET_EFAULT;
6423         memset(target_st, 0, sizeof(*target_st));
6424         __put_user(host_st->st_dev, &target_st->st_dev);
6425         __put_user(host_st->st_ino, &target_st->st_ino);
6426 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6427         __put_user(host_st->st_ino, &target_st->__st_ino);
6428 #endif
6429         __put_user(host_st->st_mode, &target_st->st_mode);
6430         __put_user(host_st->st_nlink, &target_st->st_nlink);
6431         __put_user(host_st->st_uid, &target_st->st_uid);
6432         __put_user(host_st->st_gid, &target_st->st_gid);
6433         __put_user(host_st->st_rdev, &target_st->st_rdev);
6434         /* XXX: better use of kernel struct */
6435         __put_user(host_st->st_size, &target_st->st_size);
6436         __put_user(host_st->st_blksize, &target_st->st_blksize);
6437         __put_user(host_st->st_blocks, &target_st->st_blocks);
6438         __put_user(host_st->st_atime, &target_st->target_st_atime);
6439         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6440         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6441         unlock_user_struct(target_st, target_addr, 1);
6442     }
6443 
6444     return 0;
6445 }
6446 #endif
6447 
6448 /* ??? Using host futex calls even when target atomic operations
6449    are not really atomic probably breaks things.  However implementing
6450    futexes locally would make futexes shared between multiple processes
6451    tricky.  However they're probably useless because guest atomic
6452    operations won't work either.  */
6453 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6454                     target_ulong uaddr2, int val3)
6455 {
6456     struct timespec ts, *pts;
6457     int base_op;
6458 
6459     /* ??? We assume FUTEX_* constants are the same on both host
6460        and target.  */
6461 #ifdef FUTEX_CMD_MASK
6462     base_op = op & FUTEX_CMD_MASK;
6463 #else
6464     base_op = op;
6465 #endif
6466     switch (base_op) {
6467     case FUTEX_WAIT:
6468     case FUTEX_WAIT_BITSET:
6469         if (timeout) {
6470             pts = &ts;
6471             target_to_host_timespec(pts, timeout);
6472         } else {
6473             pts = NULL;
6474         }
6475         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6476                          pts, NULL, val3));
6477     case FUTEX_WAKE:
6478         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6479     case FUTEX_FD:
6480         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6481     case FUTEX_REQUEUE:
6482     case FUTEX_CMP_REQUEUE:
6483     case FUTEX_WAKE_OP:
6484         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6485            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6486            But the prototype takes a `struct timespec *'; insert casts
6487            to satisfy the compiler.  We do not need to tswap TIMEOUT
6488            since it's not compared to guest memory.  */
6489         pts = (struct timespec *)(uintptr_t) timeout;
6490         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6491                                     g2h(uaddr2),
6492                                     (base_op == FUTEX_CMP_REQUEUE
6493                                      ? tswap32(val3)
6494                                      : val3)));
6495     default:
6496         return -TARGET_ENOSYS;
6497     }
6498 }
6499 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6500 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6501                                      abi_long handle, abi_long mount_id,
6502                                      abi_long flags)
6503 {
6504     struct file_handle *target_fh;
6505     struct file_handle *fh;
6506     int mid = 0;
6507     abi_long ret;
6508     char *name;
6509     unsigned int size, total_size;
6510 
6511     if (get_user_s32(size, handle)) {
6512         return -TARGET_EFAULT;
6513     }
6514 
6515     name = lock_user_string(pathname);
6516     if (!name) {
6517         return -TARGET_EFAULT;
6518     }
6519 
6520     total_size = sizeof(struct file_handle) + size;
6521     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6522     if (!target_fh) {
6523         unlock_user(name, pathname, 0);
6524         return -TARGET_EFAULT;
6525     }
6526 
6527     fh = g_malloc0(total_size);
6528     fh->handle_bytes = size;
6529 
6530     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6531     unlock_user(name, pathname, 0);
6532 
6533     /* man name_to_handle_at(2):
6534      * Other than the use of the handle_bytes field, the caller should treat
6535      * the file_handle structure as an opaque data type
6536      */
6537 
6538     memcpy(target_fh, fh, total_size);
6539     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6540     target_fh->handle_type = tswap32(fh->handle_type);
6541     g_free(fh);
6542     unlock_user(target_fh, handle, total_size);
6543 
6544     if (put_user_s32(mid, mount_id)) {
6545         return -TARGET_EFAULT;
6546     }
6547 
6548     return ret;
6549 
6550 }
6551 #endif
6552 
6553 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6554 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6555                                      abi_long flags)
6556 {
6557     struct file_handle *target_fh;
6558     struct file_handle *fh;
6559     unsigned int size, total_size;
6560     abi_long ret;
6561 
6562     if (get_user_s32(size, handle)) {
6563         return -TARGET_EFAULT;
6564     }
6565 
6566     total_size = sizeof(struct file_handle) + size;
6567     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6568     if (!target_fh) {
6569         return -TARGET_EFAULT;
6570     }
6571 
6572     fh = g_memdup(target_fh, total_size);
6573     fh->handle_bytes = size;
6574     fh->handle_type = tswap32(target_fh->handle_type);
6575 
6576     ret = get_errno(open_by_handle_at(mount_fd, fh,
6577                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6578 
6579     g_free(fh);
6580 
6581     unlock_user(target_fh, handle, total_size);
6582 
6583     return ret;
6584 }
6585 #endif
6586 
6587 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6588 
6589 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6590 {
6591     int host_flags;
6592     target_sigset_t *target_mask;
6593     sigset_t host_mask;
6594     abi_long ret;
6595 
6596     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6597         return -TARGET_EINVAL;
6598     }
6599     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6600         return -TARGET_EFAULT;
6601     }
6602 
6603     target_to_host_sigset(&host_mask, target_mask);
6604 
6605     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6606 
6607     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6608     if (ret >= 0) {
6609         fd_trans_register(ret, &target_signalfd_trans);
6610     }
6611 
6612     unlock_user_struct(target_mask, mask, 0);
6613 
6614     return ret;
6615 }
6616 #endif
6617 
6618 /* Map host to target signal numbers for the wait family of syscalls.
6619    Assume all other status bits are the same.  */
6620 int host_to_target_waitstatus(int status)
6621 {
6622     if (WIFSIGNALED(status)) {
6623         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6624     }
6625     if (WIFSTOPPED(status)) {
6626         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6627                | (status & 0xff);
6628     }
6629     return status;
6630 }
6631 
6632 static int open_self_cmdline(void *cpu_env, int fd)
6633 {
6634     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6635     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6636     int i;
6637 
6638     for (i = 0; i < bprm->argc; i++) {
6639         size_t len = strlen(bprm->argv[i]) + 1;
6640 
6641         if (write(fd, bprm->argv[i], len) != len) {
6642             return -1;
6643         }
6644     }
6645 
6646     return 0;
6647 }
6648 
6649 static int open_self_maps(void *cpu_env, int fd)
6650 {
6651     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6652     TaskState *ts = cpu->opaque;
6653     FILE *fp;
6654     char *line = NULL;
6655     size_t len = 0;
6656     ssize_t read;
6657 
6658     fp = fopen("/proc/self/maps", "r");
6659     if (fp == NULL) {
6660         return -1;
6661     }
6662 
6663     while ((read = getline(&line, &len, fp)) != -1) {
6664         int fields, dev_maj, dev_min, inode;
6665         uint64_t min, max, offset;
6666         char flag_r, flag_w, flag_x, flag_p;
6667         char path[512] = "";
6668         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6669                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6670                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6671 
6672         if ((fields < 10) || (fields > 11)) {
6673             continue;
6674         }
6675         if (h2g_valid(min)) {
6676             int flags = page_get_flags(h2g(min));
6677             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6678             if (page_check_range(h2g(min), max - min, flags) == -1) {
6679                 continue;
6680             }
6681             if (h2g(min) == ts->info->stack_limit) {
6682                 pstrcpy(path, sizeof(path), "      [stack]");
6683             }
6684             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6685                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6686                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6687                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6688                     path[0] ? "         " : "", path);
6689         }
6690     }
6691 
6692     free(line);
6693     fclose(fp);
6694 
6695     return 0;
6696 }
6697 
6698 static int open_self_stat(void *cpu_env, int fd)
6699 {
6700     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6701     TaskState *ts = cpu->opaque;
6702     abi_ulong start_stack = ts->info->start_stack;
6703     int i;
6704 
6705     for (i = 0; i < 44; i++) {
6706       char buf[128];
6707       int len;
6708       uint64_t val = 0;
6709 
6710       if (i == 0) {
6711         /* pid */
6712         val = getpid();
6713         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6714       } else if (i == 1) {
6715         /* app name */
6716         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6717       } else if (i == 27) {
6718         /* stack bottom */
6719         val = start_stack;
6720         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6721       } else {
6722         /* for the rest, there is MasterCard */
6723         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6724       }
6725 
6726       len = strlen(buf);
6727       if (write(fd, buf, len) != len) {
6728           return -1;
6729       }
6730     }
6731 
6732     return 0;
6733 }
6734 
6735 static int open_self_auxv(void *cpu_env, int fd)
6736 {
6737     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6738     TaskState *ts = cpu->opaque;
6739     abi_ulong auxv = ts->info->saved_auxv;
6740     abi_ulong len = ts->info->auxv_len;
6741     char *ptr;
6742 
6743     /*
6744      * Auxiliary vector is stored in target process stack.
6745      * read in whole auxv vector and copy it to file
6746      */
6747     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6748     if (ptr != NULL) {
6749         while (len > 0) {
6750             ssize_t r;
6751             r = write(fd, ptr, len);
6752             if (r <= 0) {
6753                 break;
6754             }
6755             len -= r;
6756             ptr += r;
6757         }
6758         lseek(fd, 0, SEEK_SET);
6759         unlock_user(ptr, auxv, len);
6760     }
6761 
6762     return 0;
6763 }
6764 
6765 static int is_proc_myself(const char *filename, const char *entry)
6766 {
6767     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6768         filename += strlen("/proc/");
6769         if (!strncmp(filename, "self/", strlen("self/"))) {
6770             filename += strlen("self/");
6771         } else if (*filename >= '1' && *filename <= '9') {
6772             char myself[80];
6773             snprintf(myself, sizeof(myself), "%d/", getpid());
6774             if (!strncmp(filename, myself, strlen(myself))) {
6775                 filename += strlen(myself);
6776             } else {
6777                 return 0;
6778             }
6779         } else {
6780             return 0;
6781         }
6782         if (!strcmp(filename, entry)) {
6783             return 1;
6784         }
6785     }
6786     return 0;
6787 }
6788 
6789 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6790     defined(TARGET_SPARC) || defined(TARGET_M68K)
6791 static int is_proc(const char *filename, const char *entry)
6792 {
6793     return strcmp(filename, entry) == 0;
6794 }
6795 #endif
6796 
6797 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6798 static int open_net_route(void *cpu_env, int fd)
6799 {
6800     FILE *fp;
6801     char *line = NULL;
6802     size_t len = 0;
6803     ssize_t read;
6804 
6805     fp = fopen("/proc/net/route", "r");
6806     if (fp == NULL) {
6807         return -1;
6808     }
6809 
6810     /* read header */
6811 
6812     read = getline(&line, &len, fp);
6813     dprintf(fd, "%s", line);
6814 
6815     /* read routes */
6816 
6817     while ((read = getline(&line, &len, fp)) != -1) {
6818         char iface[16];
6819         uint32_t dest, gw, mask;
6820         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6821         int fields;
6822 
6823         fields = sscanf(line,
6824                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6825                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6826                         &mask, &mtu, &window, &irtt);
6827         if (fields != 11) {
6828             continue;
6829         }
6830         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6831                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6832                 metric, tswap32(mask), mtu, window, irtt);
6833     }
6834 
6835     free(line);
6836     fclose(fp);
6837 
6838     return 0;
6839 }
6840 #endif
6841 
6842 #if defined(TARGET_SPARC)
6843 static int open_cpuinfo(void *cpu_env, int fd)
6844 {
6845     dprintf(fd, "type\t\t: sun4u\n");
6846     return 0;
6847 }
6848 #endif
6849 
6850 #if defined(TARGET_M68K)
6851 static int open_hardware(void *cpu_env, int fd)
6852 {
6853     dprintf(fd, "Model:\t\tqemu-m68k\n");
6854     return 0;
6855 }
6856 #endif
6857 
6858 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6859 {
6860     struct fake_open {
6861         const char *filename;
6862         int (*fill)(void *cpu_env, int fd);
6863         int (*cmp)(const char *s1, const char *s2);
6864     };
6865     const struct fake_open *fake_open;
6866     static const struct fake_open fakes[] = {
6867         { "maps", open_self_maps, is_proc_myself },
6868         { "stat", open_self_stat, is_proc_myself },
6869         { "auxv", open_self_auxv, is_proc_myself },
6870         { "cmdline", open_self_cmdline, is_proc_myself },
6871 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6872         { "/proc/net/route", open_net_route, is_proc },
6873 #endif
6874 #if defined(TARGET_SPARC)
6875         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6876 #endif
6877 #if defined(TARGET_M68K)
6878         { "/proc/hardware", open_hardware, is_proc },
6879 #endif
6880         { NULL, NULL, NULL }
6881     };
6882 
6883     if (is_proc_myself(pathname, "exe")) {
6884         int execfd = qemu_getauxval(AT_EXECFD);
6885         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6886     }
6887 
6888     for (fake_open = fakes; fake_open->filename; fake_open++) {
6889         if (fake_open->cmp(pathname, fake_open->filename)) {
6890             break;
6891         }
6892     }
6893 
6894     if (fake_open->filename) {
6895         const char *tmpdir;
6896         char filename[PATH_MAX];
6897         int fd, r;
6898 
6899         /* create temporary file to map stat to */
6900         tmpdir = getenv("TMPDIR");
6901         if (!tmpdir)
6902             tmpdir = "/tmp";
6903         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6904         fd = mkstemp(filename);
6905         if (fd < 0) {
6906             return fd;
6907         }
6908         unlink(filename);
6909 
6910         if ((r = fake_open->fill(cpu_env, fd))) {
6911             int e = errno;
6912             close(fd);
6913             errno = e;
6914             return r;
6915         }
6916         lseek(fd, 0, SEEK_SET);
6917 
6918         return fd;
6919     }
6920 
6921     return safe_openat(dirfd, path(pathname), flags, mode);
6922 }
6923 
6924 #define TIMER_MAGIC 0x0caf0000
6925 #define TIMER_MAGIC_MASK 0xffff0000
6926 
6927 /* Convert QEMU provided timer ID back to internal 16bit index format */
6928 static target_timer_t get_timer_id(abi_long arg)
6929 {
6930     target_timer_t timerid = arg;
6931 
6932     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6933         return -TARGET_EINVAL;
6934     }
6935 
6936     timerid &= 0xffff;
6937 
6938     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6939         return -TARGET_EINVAL;
6940     }
6941 
6942     return timerid;
6943 }
6944 
6945 static int target_to_host_cpu_mask(unsigned long *host_mask,
6946                                    size_t host_size,
6947                                    abi_ulong target_addr,
6948                                    size_t target_size)
6949 {
6950     unsigned target_bits = sizeof(abi_ulong) * 8;
6951     unsigned host_bits = sizeof(*host_mask) * 8;
6952     abi_ulong *target_mask;
6953     unsigned i, j;
6954 
6955     assert(host_size >= target_size);
6956 
6957     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6958     if (!target_mask) {
6959         return -TARGET_EFAULT;
6960     }
6961     memset(host_mask, 0, host_size);
6962 
6963     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6964         unsigned bit = i * target_bits;
6965         abi_ulong val;
6966 
6967         __get_user(val, &target_mask[i]);
6968         for (j = 0; j < target_bits; j++, bit++) {
6969             if (val & (1UL << j)) {
6970                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6971             }
6972         }
6973     }
6974 
6975     unlock_user(target_mask, target_addr, 0);
6976     return 0;
6977 }
6978 
6979 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6980                                    size_t host_size,
6981                                    abi_ulong target_addr,
6982                                    size_t target_size)
6983 {
6984     unsigned target_bits = sizeof(abi_ulong) * 8;
6985     unsigned host_bits = sizeof(*host_mask) * 8;
6986     abi_ulong *target_mask;
6987     unsigned i, j;
6988 
6989     assert(host_size >= target_size);
6990 
6991     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6992     if (!target_mask) {
6993         return -TARGET_EFAULT;
6994     }
6995 
6996     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6997         unsigned bit = i * target_bits;
6998         abi_ulong val = 0;
6999 
7000         for (j = 0; j < target_bits; j++, bit++) {
7001             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7002                 val |= 1UL << j;
7003             }
7004         }
7005         __put_user(val, &target_mask[i]);
7006     }
7007 
7008     unlock_user(target_mask, target_addr, target_size);
7009     return 0;
7010 }
7011 
7012 /* This is an internal helper for do_syscall so that it is easier
7013  * to have a single return point, so that actions, such as logging
7014  * of syscall results, can be performed.
7015  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7016  */
7017 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7018                             abi_long arg2, abi_long arg3, abi_long arg4,
7019                             abi_long arg5, abi_long arg6, abi_long arg7,
7020                             abi_long arg8)
7021 {
7022     CPUState *cpu = ENV_GET_CPU(cpu_env);
7023     abi_long ret;
7024 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7025     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7026     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7027     struct stat st;
7028 #endif
7029 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7030     || defined(TARGET_NR_fstatfs)
7031     struct statfs stfs;
7032 #endif
7033     void *p;
7034 
7035     switch(num) {
7036     case TARGET_NR_exit:
7037         /* In old applications this may be used to implement _exit(2).
7038            However in threaded applictions it is used for thread termination,
7039            and _exit_group is used for application termination.
7040            Do thread termination if we have more then one thread.  */
7041 
7042         if (block_signals()) {
7043             return -TARGET_ERESTARTSYS;
7044         }
7045 
7046         cpu_list_lock();
7047 
7048         if (CPU_NEXT(first_cpu)) {
7049             TaskState *ts;
7050 
7051             /* Remove the CPU from the list.  */
7052             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7053 
7054             cpu_list_unlock();
7055 
7056             ts = cpu->opaque;
7057             if (ts->child_tidptr) {
7058                 put_user_u32(0, ts->child_tidptr);
7059                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7060                           NULL, NULL, 0);
7061             }
7062             thread_cpu = NULL;
7063             object_unref(OBJECT(cpu));
7064             g_free(ts);
7065             rcu_unregister_thread();
7066             pthread_exit(NULL);
7067         }
7068 
7069         cpu_list_unlock();
7070         preexit_cleanup(cpu_env, arg1);
7071         _exit(arg1);
7072         return 0; /* avoid warning */
7073     case TARGET_NR_read:
7074         if (arg2 == 0 && arg3 == 0) {
7075             return get_errno(safe_read(arg1, 0, 0));
7076         } else {
7077             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7078                 return -TARGET_EFAULT;
7079             ret = get_errno(safe_read(arg1, p, arg3));
7080             if (ret >= 0 &&
7081                 fd_trans_host_to_target_data(arg1)) {
7082                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7083             }
7084             unlock_user(p, arg2, ret);
7085         }
7086         return ret;
7087     case TARGET_NR_write:
7088         if (arg2 == 0 && arg3 == 0) {
7089             return get_errno(safe_write(arg1, 0, 0));
7090         }
7091         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7092             return -TARGET_EFAULT;
7093         if (fd_trans_target_to_host_data(arg1)) {
7094             void *copy = g_malloc(arg3);
7095             memcpy(copy, p, arg3);
7096             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7097             if (ret >= 0) {
7098                 ret = get_errno(safe_write(arg1, copy, ret));
7099             }
7100             g_free(copy);
7101         } else {
7102             ret = get_errno(safe_write(arg1, p, arg3));
7103         }
7104         unlock_user(p, arg2, 0);
7105         return ret;
7106 
7107 #ifdef TARGET_NR_open
7108     case TARGET_NR_open:
7109         if (!(p = lock_user_string(arg1)))
7110             return -TARGET_EFAULT;
7111         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7112                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7113                                   arg3));
7114         fd_trans_unregister(ret);
7115         unlock_user(p, arg1, 0);
7116         return ret;
7117 #endif
7118     case TARGET_NR_openat:
7119         if (!(p = lock_user_string(arg2)))
7120             return -TARGET_EFAULT;
7121         ret = get_errno(do_openat(cpu_env, arg1, p,
7122                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7123                                   arg4));
7124         fd_trans_unregister(ret);
7125         unlock_user(p, arg2, 0);
7126         return ret;
7127 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7128     case TARGET_NR_name_to_handle_at:
7129         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7130         return ret;
7131 #endif
7132 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7133     case TARGET_NR_open_by_handle_at:
7134         ret = do_open_by_handle_at(arg1, arg2, arg3);
7135         fd_trans_unregister(ret);
7136         return ret;
7137 #endif
7138     case TARGET_NR_close:
7139         fd_trans_unregister(arg1);
7140         return get_errno(close(arg1));
7141 
7142     case TARGET_NR_brk:
7143         return do_brk(arg1);
7144 #ifdef TARGET_NR_fork
7145     case TARGET_NR_fork:
7146         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7147 #endif
7148 #ifdef TARGET_NR_waitpid
7149     case TARGET_NR_waitpid:
7150         {
7151             int status;
7152             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7153             if (!is_error(ret) && arg2 && ret
7154                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7155                 return -TARGET_EFAULT;
7156         }
7157         return ret;
7158 #endif
7159 #ifdef TARGET_NR_waitid
7160     case TARGET_NR_waitid:
7161         {
7162             siginfo_t info;
7163             info.si_pid = 0;
7164             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7165             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7166                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7167                     return -TARGET_EFAULT;
7168                 host_to_target_siginfo(p, &info);
7169                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7170             }
7171         }
7172         return ret;
7173 #endif
7174 #ifdef TARGET_NR_creat /* not on alpha */
7175     case TARGET_NR_creat:
7176         if (!(p = lock_user_string(arg1)))
7177             return -TARGET_EFAULT;
7178         ret = get_errno(creat(p, arg2));
7179         fd_trans_unregister(ret);
7180         unlock_user(p, arg1, 0);
7181         return ret;
7182 #endif
7183 #ifdef TARGET_NR_link
7184     case TARGET_NR_link:
7185         {
7186             void * p2;
7187             p = lock_user_string(arg1);
7188             p2 = lock_user_string(arg2);
7189             if (!p || !p2)
7190                 ret = -TARGET_EFAULT;
7191             else
7192                 ret = get_errno(link(p, p2));
7193             unlock_user(p2, arg2, 0);
7194             unlock_user(p, arg1, 0);
7195         }
7196         return ret;
7197 #endif
7198 #if defined(TARGET_NR_linkat)
7199     case TARGET_NR_linkat:
7200         {
7201             void * p2 = NULL;
7202             if (!arg2 || !arg4)
7203                 return -TARGET_EFAULT;
7204             p  = lock_user_string(arg2);
7205             p2 = lock_user_string(arg4);
7206             if (!p || !p2)
7207                 ret = -TARGET_EFAULT;
7208             else
7209                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7210             unlock_user(p, arg2, 0);
7211             unlock_user(p2, arg4, 0);
7212         }
7213         return ret;
7214 #endif
7215 #ifdef TARGET_NR_unlink
7216     case TARGET_NR_unlink:
7217         if (!(p = lock_user_string(arg1)))
7218             return -TARGET_EFAULT;
7219         ret = get_errno(unlink(p));
7220         unlock_user(p, arg1, 0);
7221         return ret;
7222 #endif
7223 #if defined(TARGET_NR_unlinkat)
7224     case TARGET_NR_unlinkat:
7225         if (!(p = lock_user_string(arg2)))
7226             return -TARGET_EFAULT;
7227         ret = get_errno(unlinkat(arg1, p, arg3));
7228         unlock_user(p, arg2, 0);
7229         return ret;
7230 #endif
7231     case TARGET_NR_execve:
7232         {
7233             char **argp, **envp;
7234             int argc, envc;
7235             abi_ulong gp;
7236             abi_ulong guest_argp;
7237             abi_ulong guest_envp;
7238             abi_ulong addr;
7239             char **q;
7240             int total_size = 0;
7241 
7242             argc = 0;
7243             guest_argp = arg2;
7244             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7245                 if (get_user_ual(addr, gp))
7246                     return -TARGET_EFAULT;
7247                 if (!addr)
7248                     break;
7249                 argc++;
7250             }
7251             envc = 0;
7252             guest_envp = arg3;
7253             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7254                 if (get_user_ual(addr, gp))
7255                     return -TARGET_EFAULT;
7256                 if (!addr)
7257                     break;
7258                 envc++;
7259             }
7260 
7261             argp = g_new0(char *, argc + 1);
7262             envp = g_new0(char *, envc + 1);
7263 
7264             for (gp = guest_argp, q = argp; gp;
7265                   gp += sizeof(abi_ulong), q++) {
7266                 if (get_user_ual(addr, gp))
7267                     goto execve_efault;
7268                 if (!addr)
7269                     break;
7270                 if (!(*q = lock_user_string(addr)))
7271                     goto execve_efault;
7272                 total_size += strlen(*q) + 1;
7273             }
7274             *q = NULL;
7275 
7276             for (gp = guest_envp, q = envp; gp;
7277                   gp += sizeof(abi_ulong), q++) {
7278                 if (get_user_ual(addr, gp))
7279                     goto execve_efault;
7280                 if (!addr)
7281                     break;
7282                 if (!(*q = lock_user_string(addr)))
7283                     goto execve_efault;
7284                 total_size += strlen(*q) + 1;
7285             }
7286             *q = NULL;
7287 
7288             if (!(p = lock_user_string(arg1)))
7289                 goto execve_efault;
7290             /* Although execve() is not an interruptible syscall it is
7291              * a special case where we must use the safe_syscall wrapper:
7292              * if we allow a signal to happen before we make the host
7293              * syscall then we will 'lose' it, because at the point of
7294              * execve the process leaves QEMU's control. So we use the
7295              * safe syscall wrapper to ensure that we either take the
7296              * signal as a guest signal, or else it does not happen
7297              * before the execve completes and makes it the other
7298              * program's problem.
7299              */
7300             ret = get_errno(safe_execve(p, argp, envp));
7301             unlock_user(p, arg1, 0);
7302 
7303             goto execve_end;
7304 
7305         execve_efault:
7306             ret = -TARGET_EFAULT;
7307 
7308         execve_end:
7309             for (gp = guest_argp, q = argp; *q;
7310                   gp += sizeof(abi_ulong), q++) {
7311                 if (get_user_ual(addr, gp)
7312                     || !addr)
7313                     break;
7314                 unlock_user(*q, addr, 0);
7315             }
7316             for (gp = guest_envp, q = envp; *q;
7317                   gp += sizeof(abi_ulong), q++) {
7318                 if (get_user_ual(addr, gp)
7319                     || !addr)
7320                     break;
7321                 unlock_user(*q, addr, 0);
7322             }
7323 
7324             g_free(argp);
7325             g_free(envp);
7326         }
7327         return ret;
7328     case TARGET_NR_chdir:
7329         if (!(p = lock_user_string(arg1)))
7330             return -TARGET_EFAULT;
7331         ret = get_errno(chdir(p));
7332         unlock_user(p, arg1, 0);
7333         return ret;
7334 #ifdef TARGET_NR_time
7335     case TARGET_NR_time:
7336         {
7337             time_t host_time;
7338             ret = get_errno(time(&host_time));
7339             if (!is_error(ret)
7340                 && arg1
7341                 && put_user_sal(host_time, arg1))
7342                 return -TARGET_EFAULT;
7343         }
7344         return ret;
7345 #endif
7346 #ifdef TARGET_NR_mknod
7347     case TARGET_NR_mknod:
7348         if (!(p = lock_user_string(arg1)))
7349             return -TARGET_EFAULT;
7350         ret = get_errno(mknod(p, arg2, arg3));
7351         unlock_user(p, arg1, 0);
7352         return ret;
7353 #endif
7354 #if defined(TARGET_NR_mknodat)
7355     case TARGET_NR_mknodat:
7356         if (!(p = lock_user_string(arg2)))
7357             return -TARGET_EFAULT;
7358         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7359         unlock_user(p, arg2, 0);
7360         return ret;
7361 #endif
7362 #ifdef TARGET_NR_chmod
7363     case TARGET_NR_chmod:
7364         if (!(p = lock_user_string(arg1)))
7365             return -TARGET_EFAULT;
7366         ret = get_errno(chmod(p, arg2));
7367         unlock_user(p, arg1, 0);
7368         return ret;
7369 #endif
7370 #ifdef TARGET_NR_lseek
7371     case TARGET_NR_lseek:
7372         return get_errno(lseek(arg1, arg2, arg3));
7373 #endif
7374 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7375     /* Alpha specific */
7376     case TARGET_NR_getxpid:
7377         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7378         return get_errno(getpid());
7379 #endif
7380 #ifdef TARGET_NR_getpid
7381     case TARGET_NR_getpid:
7382         return get_errno(getpid());
7383 #endif
7384     case TARGET_NR_mount:
7385         {
7386             /* need to look at the data field */
7387             void *p2, *p3;
7388 
7389             if (arg1) {
7390                 p = lock_user_string(arg1);
7391                 if (!p) {
7392                     return -TARGET_EFAULT;
7393                 }
7394             } else {
7395                 p = NULL;
7396             }
7397 
7398             p2 = lock_user_string(arg2);
7399             if (!p2) {
7400                 if (arg1) {
7401                     unlock_user(p, arg1, 0);
7402                 }
7403                 return -TARGET_EFAULT;
7404             }
7405 
7406             if (arg3) {
7407                 p3 = lock_user_string(arg3);
7408                 if (!p3) {
7409                     if (arg1) {
7410                         unlock_user(p, arg1, 0);
7411                     }
7412                     unlock_user(p2, arg2, 0);
7413                     return -TARGET_EFAULT;
7414                 }
7415             } else {
7416                 p3 = NULL;
7417             }
7418 
7419             /* FIXME - arg5 should be locked, but it isn't clear how to
7420              * do that since it's not guaranteed to be a NULL-terminated
7421              * string.
7422              */
7423             if (!arg5) {
7424                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7425             } else {
7426                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7427             }
7428             ret = get_errno(ret);
7429 
7430             if (arg1) {
7431                 unlock_user(p, arg1, 0);
7432             }
7433             unlock_user(p2, arg2, 0);
7434             if (arg3) {
7435                 unlock_user(p3, arg3, 0);
7436             }
7437         }
7438         return ret;
7439 #ifdef TARGET_NR_umount
7440     case TARGET_NR_umount:
7441         if (!(p = lock_user_string(arg1)))
7442             return -TARGET_EFAULT;
7443         ret = get_errno(umount(p));
7444         unlock_user(p, arg1, 0);
7445         return ret;
7446 #endif
7447 #ifdef TARGET_NR_stime /* not on alpha */
7448     case TARGET_NR_stime:
7449         {
7450             time_t host_time;
7451             if (get_user_sal(host_time, arg1))
7452                 return -TARGET_EFAULT;
7453             return get_errno(stime(&host_time));
7454         }
7455 #endif
7456 #ifdef TARGET_NR_alarm /* not on alpha */
7457     case TARGET_NR_alarm:
7458         return alarm(arg1);
7459 #endif
7460 #ifdef TARGET_NR_pause /* not on alpha */
7461     case TARGET_NR_pause:
7462         if (!block_signals()) {
7463             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7464         }
7465         return -TARGET_EINTR;
7466 #endif
7467 #ifdef TARGET_NR_utime
7468     case TARGET_NR_utime:
7469         {
7470             struct utimbuf tbuf, *host_tbuf;
7471             struct target_utimbuf *target_tbuf;
7472             if (arg2) {
7473                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7474                     return -TARGET_EFAULT;
7475                 tbuf.actime = tswapal(target_tbuf->actime);
7476                 tbuf.modtime = tswapal(target_tbuf->modtime);
7477                 unlock_user_struct(target_tbuf, arg2, 0);
7478                 host_tbuf = &tbuf;
7479             } else {
7480                 host_tbuf = NULL;
7481             }
7482             if (!(p = lock_user_string(arg1)))
7483                 return -TARGET_EFAULT;
7484             ret = get_errno(utime(p, host_tbuf));
7485             unlock_user(p, arg1, 0);
7486         }
7487         return ret;
7488 #endif
7489 #ifdef TARGET_NR_utimes
7490     case TARGET_NR_utimes:
7491         {
7492             struct timeval *tvp, tv[2];
7493             if (arg2) {
7494                 if (copy_from_user_timeval(&tv[0], arg2)
7495                     || copy_from_user_timeval(&tv[1],
7496                                               arg2 + sizeof(struct target_timeval)))
7497                     return -TARGET_EFAULT;
7498                 tvp = tv;
7499             } else {
7500                 tvp = NULL;
7501             }
7502             if (!(p = lock_user_string(arg1)))
7503                 return -TARGET_EFAULT;
7504             ret = get_errno(utimes(p, tvp));
7505             unlock_user(p, arg1, 0);
7506         }
7507         return ret;
7508 #endif
7509 #if defined(TARGET_NR_futimesat)
7510     case TARGET_NR_futimesat:
7511         {
7512             struct timeval *tvp, tv[2];
7513             if (arg3) {
7514                 if (copy_from_user_timeval(&tv[0], arg3)
7515                     || copy_from_user_timeval(&tv[1],
7516                                               arg3 + sizeof(struct target_timeval)))
7517                     return -TARGET_EFAULT;
7518                 tvp = tv;
7519             } else {
7520                 tvp = NULL;
7521             }
7522             if (!(p = lock_user_string(arg2))) {
7523                 return -TARGET_EFAULT;
7524             }
7525             ret = get_errno(futimesat(arg1, path(p), tvp));
7526             unlock_user(p, arg2, 0);
7527         }
7528         return ret;
7529 #endif
7530 #ifdef TARGET_NR_access
7531     case TARGET_NR_access:
7532         if (!(p = lock_user_string(arg1))) {
7533             return -TARGET_EFAULT;
7534         }
7535         ret = get_errno(access(path(p), arg2));
7536         unlock_user(p, arg1, 0);
7537         return ret;
7538 #endif
7539 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7540     case TARGET_NR_faccessat:
7541         if (!(p = lock_user_string(arg2))) {
7542             return -TARGET_EFAULT;
7543         }
7544         ret = get_errno(faccessat(arg1, p, arg3, 0));
7545         unlock_user(p, arg2, 0);
7546         return ret;
7547 #endif
7548 #ifdef TARGET_NR_nice /* not on alpha */
7549     case TARGET_NR_nice:
7550         return get_errno(nice(arg1));
7551 #endif
7552     case TARGET_NR_sync:
7553         sync();
7554         return 0;
7555 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7556     case TARGET_NR_syncfs:
7557         return get_errno(syncfs(arg1));
7558 #endif
7559     case TARGET_NR_kill:
7560         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7561 #ifdef TARGET_NR_rename
7562     case TARGET_NR_rename:
7563         {
7564             void *p2;
7565             p = lock_user_string(arg1);
7566             p2 = lock_user_string(arg2);
7567             if (!p || !p2)
7568                 ret = -TARGET_EFAULT;
7569             else
7570                 ret = get_errno(rename(p, p2));
7571             unlock_user(p2, arg2, 0);
7572             unlock_user(p, arg1, 0);
7573         }
7574         return ret;
7575 #endif
7576 #if defined(TARGET_NR_renameat)
7577     case TARGET_NR_renameat:
7578         {
7579             void *p2;
7580             p  = lock_user_string(arg2);
7581             p2 = lock_user_string(arg4);
7582             if (!p || !p2)
7583                 ret = -TARGET_EFAULT;
7584             else
7585                 ret = get_errno(renameat(arg1, p, arg3, p2));
7586             unlock_user(p2, arg4, 0);
7587             unlock_user(p, arg2, 0);
7588         }
7589         return ret;
7590 #endif
7591 #if defined(TARGET_NR_renameat2)
7592     case TARGET_NR_renameat2:
7593         {
7594             void *p2;
7595             p  = lock_user_string(arg2);
7596             p2 = lock_user_string(arg4);
7597             if (!p || !p2) {
7598                 ret = -TARGET_EFAULT;
7599             } else {
7600                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7601             }
7602             unlock_user(p2, arg4, 0);
7603             unlock_user(p, arg2, 0);
7604         }
7605         return ret;
7606 #endif
7607 #ifdef TARGET_NR_mkdir
7608     case TARGET_NR_mkdir:
7609         if (!(p = lock_user_string(arg1)))
7610             return -TARGET_EFAULT;
7611         ret = get_errno(mkdir(p, arg2));
7612         unlock_user(p, arg1, 0);
7613         return ret;
7614 #endif
7615 #if defined(TARGET_NR_mkdirat)
7616     case TARGET_NR_mkdirat:
7617         if (!(p = lock_user_string(arg2)))
7618             return -TARGET_EFAULT;
7619         ret = get_errno(mkdirat(arg1, p, arg3));
7620         unlock_user(p, arg2, 0);
7621         return ret;
7622 #endif
7623 #ifdef TARGET_NR_rmdir
7624     case TARGET_NR_rmdir:
7625         if (!(p = lock_user_string(arg1)))
7626             return -TARGET_EFAULT;
7627         ret = get_errno(rmdir(p));
7628         unlock_user(p, arg1, 0);
7629         return ret;
7630 #endif
7631     case TARGET_NR_dup:
7632         ret = get_errno(dup(arg1));
7633         if (ret >= 0) {
7634             fd_trans_dup(arg1, ret);
7635         }
7636         return ret;
7637 #ifdef TARGET_NR_pipe
7638     case TARGET_NR_pipe:
7639         return do_pipe(cpu_env, arg1, 0, 0);
7640 #endif
7641 #ifdef TARGET_NR_pipe2
7642     case TARGET_NR_pipe2:
7643         return do_pipe(cpu_env, arg1,
7644                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7645 #endif
7646     case TARGET_NR_times:
7647         {
7648             struct target_tms *tmsp;
7649             struct tms tms;
7650             ret = get_errno(times(&tms));
7651             if (arg1) {
7652                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7653                 if (!tmsp)
7654                     return -TARGET_EFAULT;
7655                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7656                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7657                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7658                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7659             }
7660             if (!is_error(ret))
7661                 ret = host_to_target_clock_t(ret);
7662         }
7663         return ret;
7664     case TARGET_NR_acct:
7665         if (arg1 == 0) {
7666             ret = get_errno(acct(NULL));
7667         } else {
7668             if (!(p = lock_user_string(arg1))) {
7669                 return -TARGET_EFAULT;
7670             }
7671             ret = get_errno(acct(path(p)));
7672             unlock_user(p, arg1, 0);
7673         }
7674         return ret;
7675 #ifdef TARGET_NR_umount2
7676     case TARGET_NR_umount2:
7677         if (!(p = lock_user_string(arg1)))
7678             return -TARGET_EFAULT;
7679         ret = get_errno(umount2(p, arg2));
7680         unlock_user(p, arg1, 0);
7681         return ret;
7682 #endif
7683     case TARGET_NR_ioctl:
7684         return do_ioctl(arg1, arg2, arg3);
7685 #ifdef TARGET_NR_fcntl
7686     case TARGET_NR_fcntl:
7687         return do_fcntl(arg1, arg2, arg3);
7688 #endif
7689     case TARGET_NR_setpgid:
7690         return get_errno(setpgid(arg1, arg2));
7691     case TARGET_NR_umask:
7692         return get_errno(umask(arg1));
7693     case TARGET_NR_chroot:
7694         if (!(p = lock_user_string(arg1)))
7695             return -TARGET_EFAULT;
7696         ret = get_errno(chroot(p));
7697         unlock_user(p, arg1, 0);
7698         return ret;
7699 #ifdef TARGET_NR_dup2
7700     case TARGET_NR_dup2:
7701         ret = get_errno(dup2(arg1, arg2));
7702         if (ret >= 0) {
7703             fd_trans_dup(arg1, arg2);
7704         }
7705         return ret;
7706 #endif
7707 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7708     case TARGET_NR_dup3:
7709     {
7710         int host_flags;
7711 
7712         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7713             return -EINVAL;
7714         }
7715         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7716         ret = get_errno(dup3(arg1, arg2, host_flags));
7717         if (ret >= 0) {
7718             fd_trans_dup(arg1, arg2);
7719         }
7720         return ret;
7721     }
7722 #endif
7723 #ifdef TARGET_NR_getppid /* not on alpha */
7724     case TARGET_NR_getppid:
7725         return get_errno(getppid());
7726 #endif
7727 #ifdef TARGET_NR_getpgrp
7728     case TARGET_NR_getpgrp:
7729         return get_errno(getpgrp());
7730 #endif
7731     case TARGET_NR_setsid:
7732         return get_errno(setsid());
7733 #ifdef TARGET_NR_sigaction
7734     case TARGET_NR_sigaction:
7735         {
7736 #if defined(TARGET_ALPHA)
7737             struct target_sigaction act, oact, *pact = 0;
7738             struct target_old_sigaction *old_act;
7739             if (arg2) {
7740                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7741                     return -TARGET_EFAULT;
7742                 act._sa_handler = old_act->_sa_handler;
7743                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7744                 act.sa_flags = old_act->sa_flags;
7745                 act.sa_restorer = 0;
7746                 unlock_user_struct(old_act, arg2, 0);
7747                 pact = &act;
7748             }
7749             ret = get_errno(do_sigaction(arg1, pact, &oact));
7750             if (!is_error(ret) && arg3) {
7751                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7752                     return -TARGET_EFAULT;
7753                 old_act->_sa_handler = oact._sa_handler;
7754                 old_act->sa_mask = oact.sa_mask.sig[0];
7755                 old_act->sa_flags = oact.sa_flags;
7756                 unlock_user_struct(old_act, arg3, 1);
7757             }
7758 #elif defined(TARGET_MIPS)
7759 	    struct target_sigaction act, oact, *pact, *old_act;
7760 
7761 	    if (arg2) {
7762                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7763                     return -TARGET_EFAULT;
7764 		act._sa_handler = old_act->_sa_handler;
7765 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7766 		act.sa_flags = old_act->sa_flags;
7767 		unlock_user_struct(old_act, arg2, 0);
7768 		pact = &act;
7769 	    } else {
7770 		pact = NULL;
7771 	    }
7772 
7773 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7774 
7775 	    if (!is_error(ret) && arg3) {
7776                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7777                     return -TARGET_EFAULT;
7778 		old_act->_sa_handler = oact._sa_handler;
7779 		old_act->sa_flags = oact.sa_flags;
7780 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7781 		old_act->sa_mask.sig[1] = 0;
7782 		old_act->sa_mask.sig[2] = 0;
7783 		old_act->sa_mask.sig[3] = 0;
7784 		unlock_user_struct(old_act, arg3, 1);
7785 	    }
7786 #else
7787             struct target_old_sigaction *old_act;
7788             struct target_sigaction act, oact, *pact;
7789             if (arg2) {
7790                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7791                     return -TARGET_EFAULT;
7792                 act._sa_handler = old_act->_sa_handler;
7793                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7794                 act.sa_flags = old_act->sa_flags;
7795                 act.sa_restorer = old_act->sa_restorer;
7796 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7797                 act.ka_restorer = 0;
7798 #endif
7799                 unlock_user_struct(old_act, arg2, 0);
7800                 pact = &act;
7801             } else {
7802                 pact = NULL;
7803             }
7804             ret = get_errno(do_sigaction(arg1, pact, &oact));
7805             if (!is_error(ret) && arg3) {
7806                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7807                     return -TARGET_EFAULT;
7808                 old_act->_sa_handler = oact._sa_handler;
7809                 old_act->sa_mask = oact.sa_mask.sig[0];
7810                 old_act->sa_flags = oact.sa_flags;
7811                 old_act->sa_restorer = oact.sa_restorer;
7812                 unlock_user_struct(old_act, arg3, 1);
7813             }
7814 #endif
7815         }
7816         return ret;
7817 #endif
7818     case TARGET_NR_rt_sigaction:
7819         {
7820 #if defined(TARGET_ALPHA)
7821             /* For Alpha and SPARC this is a 5 argument syscall, with
7822              * a 'restorer' parameter which must be copied into the
7823              * sa_restorer field of the sigaction struct.
7824              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7825              * and arg5 is the sigsetsize.
7826              * Alpha also has a separate rt_sigaction struct that it uses
7827              * here; SPARC uses the usual sigaction struct.
7828              */
7829             struct target_rt_sigaction *rt_act;
7830             struct target_sigaction act, oact, *pact = 0;
7831 
7832             if (arg4 != sizeof(target_sigset_t)) {
7833                 return -TARGET_EINVAL;
7834             }
7835             if (arg2) {
7836                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7837                     return -TARGET_EFAULT;
7838                 act._sa_handler = rt_act->_sa_handler;
7839                 act.sa_mask = rt_act->sa_mask;
7840                 act.sa_flags = rt_act->sa_flags;
7841                 act.sa_restorer = arg5;
7842                 unlock_user_struct(rt_act, arg2, 0);
7843                 pact = &act;
7844             }
7845             ret = get_errno(do_sigaction(arg1, pact, &oact));
7846             if (!is_error(ret) && arg3) {
7847                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7848                     return -TARGET_EFAULT;
7849                 rt_act->_sa_handler = oact._sa_handler;
7850                 rt_act->sa_mask = oact.sa_mask;
7851                 rt_act->sa_flags = oact.sa_flags;
7852                 unlock_user_struct(rt_act, arg3, 1);
7853             }
7854 #else
7855 #ifdef TARGET_SPARC
7856             target_ulong restorer = arg4;
7857             target_ulong sigsetsize = arg5;
7858 #else
7859             target_ulong sigsetsize = arg4;
7860 #endif
7861             struct target_sigaction *act;
7862             struct target_sigaction *oact;
7863 
7864             if (sigsetsize != sizeof(target_sigset_t)) {
7865                 return -TARGET_EINVAL;
7866             }
7867             if (arg2) {
7868                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7869                     return -TARGET_EFAULT;
7870                 }
7871 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7872                 act->ka_restorer = restorer;
7873 #endif
7874             } else {
7875                 act = NULL;
7876             }
7877             if (arg3) {
7878                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7879                     ret = -TARGET_EFAULT;
7880                     goto rt_sigaction_fail;
7881                 }
7882             } else
7883                 oact = NULL;
7884             ret = get_errno(do_sigaction(arg1, act, oact));
7885 	rt_sigaction_fail:
7886             if (act)
7887                 unlock_user_struct(act, arg2, 0);
7888             if (oact)
7889                 unlock_user_struct(oact, arg3, 1);
7890 #endif
7891         }
7892         return ret;
7893 #ifdef TARGET_NR_sgetmask /* not on alpha */
7894     case TARGET_NR_sgetmask:
7895         {
7896             sigset_t cur_set;
7897             abi_ulong target_set;
7898             ret = do_sigprocmask(0, NULL, &cur_set);
7899             if (!ret) {
7900                 host_to_target_old_sigset(&target_set, &cur_set);
7901                 ret = target_set;
7902             }
7903         }
7904         return ret;
7905 #endif
7906 #ifdef TARGET_NR_ssetmask /* not on alpha */
7907     case TARGET_NR_ssetmask:
7908         {
7909             sigset_t set, oset;
7910             abi_ulong target_set = arg1;
7911             target_to_host_old_sigset(&set, &target_set);
7912             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7913             if (!ret) {
7914                 host_to_target_old_sigset(&target_set, &oset);
7915                 ret = target_set;
7916             }
7917         }
7918         return ret;
7919 #endif
7920 #ifdef TARGET_NR_sigprocmask
7921     case TARGET_NR_sigprocmask:
7922         {
7923 #if defined(TARGET_ALPHA)
7924             sigset_t set, oldset;
7925             abi_ulong mask;
7926             int how;
7927 
7928             switch (arg1) {
7929             case TARGET_SIG_BLOCK:
7930                 how = SIG_BLOCK;
7931                 break;
7932             case TARGET_SIG_UNBLOCK:
7933                 how = SIG_UNBLOCK;
7934                 break;
7935             case TARGET_SIG_SETMASK:
7936                 how = SIG_SETMASK;
7937                 break;
7938             default:
7939                 return -TARGET_EINVAL;
7940             }
7941             mask = arg2;
7942             target_to_host_old_sigset(&set, &mask);
7943 
7944             ret = do_sigprocmask(how, &set, &oldset);
7945             if (!is_error(ret)) {
7946                 host_to_target_old_sigset(&mask, &oldset);
7947                 ret = mask;
7948                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7949             }
7950 #else
7951             sigset_t set, oldset, *set_ptr;
7952             int how;
7953 
7954             if (arg2) {
7955                 switch (arg1) {
7956                 case TARGET_SIG_BLOCK:
7957                     how = SIG_BLOCK;
7958                     break;
7959                 case TARGET_SIG_UNBLOCK:
7960                     how = SIG_UNBLOCK;
7961                     break;
7962                 case TARGET_SIG_SETMASK:
7963                     how = SIG_SETMASK;
7964                     break;
7965                 default:
7966                     return -TARGET_EINVAL;
7967                 }
7968                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7969                     return -TARGET_EFAULT;
7970                 target_to_host_old_sigset(&set, p);
7971                 unlock_user(p, arg2, 0);
7972                 set_ptr = &set;
7973             } else {
7974                 how = 0;
7975                 set_ptr = NULL;
7976             }
7977             ret = do_sigprocmask(how, set_ptr, &oldset);
7978             if (!is_error(ret) && arg3) {
7979                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7980                     return -TARGET_EFAULT;
7981                 host_to_target_old_sigset(p, &oldset);
7982                 unlock_user(p, arg3, sizeof(target_sigset_t));
7983             }
7984 #endif
7985         }
7986         return ret;
7987 #endif
7988     case TARGET_NR_rt_sigprocmask:
7989         {
7990             int how = arg1;
7991             sigset_t set, oldset, *set_ptr;
7992 
7993             if (arg4 != sizeof(target_sigset_t)) {
7994                 return -TARGET_EINVAL;
7995             }
7996 
7997             if (arg2) {
7998                 switch(how) {
7999                 case TARGET_SIG_BLOCK:
8000                     how = SIG_BLOCK;
8001                     break;
8002                 case TARGET_SIG_UNBLOCK:
8003                     how = SIG_UNBLOCK;
8004                     break;
8005                 case TARGET_SIG_SETMASK:
8006                     how = SIG_SETMASK;
8007                     break;
8008                 default:
8009                     return -TARGET_EINVAL;
8010                 }
8011                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8012                     return -TARGET_EFAULT;
8013                 target_to_host_sigset(&set, p);
8014                 unlock_user(p, arg2, 0);
8015                 set_ptr = &set;
8016             } else {
8017                 how = 0;
8018                 set_ptr = NULL;
8019             }
8020             ret = do_sigprocmask(how, set_ptr, &oldset);
8021             if (!is_error(ret) && arg3) {
8022                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8023                     return -TARGET_EFAULT;
8024                 host_to_target_sigset(p, &oldset);
8025                 unlock_user(p, arg3, sizeof(target_sigset_t));
8026             }
8027         }
8028         return ret;
8029 #ifdef TARGET_NR_sigpending
8030     case TARGET_NR_sigpending:
8031         {
8032             sigset_t set;
8033             ret = get_errno(sigpending(&set));
8034             if (!is_error(ret)) {
8035                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8036                     return -TARGET_EFAULT;
8037                 host_to_target_old_sigset(p, &set);
8038                 unlock_user(p, arg1, sizeof(target_sigset_t));
8039             }
8040         }
8041         return ret;
8042 #endif
8043     case TARGET_NR_rt_sigpending:
8044         {
8045             sigset_t set;
8046 
8047             /* Yes, this check is >, not != like most. We follow the kernel's
8048              * logic and it does it like this because it implements
8049              * NR_sigpending through the same code path, and in that case
8050              * the old_sigset_t is smaller in size.
8051              */
8052             if (arg2 > sizeof(target_sigset_t)) {
8053                 return -TARGET_EINVAL;
8054             }
8055 
8056             ret = get_errno(sigpending(&set));
8057             if (!is_error(ret)) {
8058                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8059                     return -TARGET_EFAULT;
8060                 host_to_target_sigset(p, &set);
8061                 unlock_user(p, arg1, sizeof(target_sigset_t));
8062             }
8063         }
8064         return ret;
8065 #ifdef TARGET_NR_sigsuspend
8066     case TARGET_NR_sigsuspend:
8067         {
8068             TaskState *ts = cpu->opaque;
8069 #if defined(TARGET_ALPHA)
8070             abi_ulong mask = arg1;
8071             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8072 #else
8073             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8074                 return -TARGET_EFAULT;
8075             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8076             unlock_user(p, arg1, 0);
8077 #endif
8078             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8079                                                SIGSET_T_SIZE));
8080             if (ret != -TARGET_ERESTARTSYS) {
8081                 ts->in_sigsuspend = 1;
8082             }
8083         }
8084         return ret;
8085 #endif
8086     case TARGET_NR_rt_sigsuspend:
8087         {
8088             TaskState *ts = cpu->opaque;
8089 
8090             if (arg2 != sizeof(target_sigset_t)) {
8091                 return -TARGET_EINVAL;
8092             }
8093             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8094                 return -TARGET_EFAULT;
8095             target_to_host_sigset(&ts->sigsuspend_mask, p);
8096             unlock_user(p, arg1, 0);
8097             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8098                                                SIGSET_T_SIZE));
8099             if (ret != -TARGET_ERESTARTSYS) {
8100                 ts->in_sigsuspend = 1;
8101             }
8102         }
8103         return ret;
8104     case TARGET_NR_rt_sigtimedwait:
8105         {
8106             sigset_t set;
8107             struct timespec uts, *puts;
8108             siginfo_t uinfo;
8109 
8110             if (arg4 != sizeof(target_sigset_t)) {
8111                 return -TARGET_EINVAL;
8112             }
8113 
8114             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8115                 return -TARGET_EFAULT;
8116             target_to_host_sigset(&set, p);
8117             unlock_user(p, arg1, 0);
8118             if (arg3) {
8119                 puts = &uts;
8120                 target_to_host_timespec(puts, arg3);
8121             } else {
8122                 puts = NULL;
8123             }
8124             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8125                                                  SIGSET_T_SIZE));
8126             if (!is_error(ret)) {
8127                 if (arg2) {
8128                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8129                                   0);
8130                     if (!p) {
8131                         return -TARGET_EFAULT;
8132                     }
8133                     host_to_target_siginfo(p, &uinfo);
8134                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8135                 }
8136                 ret = host_to_target_signal(ret);
8137             }
8138         }
8139         return ret;
8140     case TARGET_NR_rt_sigqueueinfo:
8141         {
8142             siginfo_t uinfo;
8143 
8144             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8145             if (!p) {
8146                 return -TARGET_EFAULT;
8147             }
8148             target_to_host_siginfo(&uinfo, p);
8149             unlock_user(p, arg3, 0);
8150             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8151         }
8152         return ret;
8153     case TARGET_NR_rt_tgsigqueueinfo:
8154         {
8155             siginfo_t uinfo;
8156 
8157             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8158             if (!p) {
8159                 return -TARGET_EFAULT;
8160             }
8161             target_to_host_siginfo(&uinfo, p);
8162             unlock_user(p, arg4, 0);
8163             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8164         }
8165         return ret;
8166 #ifdef TARGET_NR_sigreturn
8167     case TARGET_NR_sigreturn:
8168         if (block_signals()) {
8169             return -TARGET_ERESTARTSYS;
8170         }
8171         return do_sigreturn(cpu_env);
8172 #endif
8173     case TARGET_NR_rt_sigreturn:
8174         if (block_signals()) {
8175             return -TARGET_ERESTARTSYS;
8176         }
8177         return do_rt_sigreturn(cpu_env);
8178     case TARGET_NR_sethostname:
8179         if (!(p = lock_user_string(arg1)))
8180             return -TARGET_EFAULT;
8181         ret = get_errno(sethostname(p, arg2));
8182         unlock_user(p, arg1, 0);
8183         return ret;
8184 #ifdef TARGET_NR_setrlimit
8185     case TARGET_NR_setrlimit:
8186         {
8187             int resource = target_to_host_resource(arg1);
8188             struct target_rlimit *target_rlim;
8189             struct rlimit rlim;
8190             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8191                 return -TARGET_EFAULT;
8192             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8193             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8194             unlock_user_struct(target_rlim, arg2, 0);
8195             /*
8196              * If we just passed through resource limit settings for memory then
8197              * they would also apply to QEMU's own allocations, and QEMU will
8198              * crash or hang or die if its allocations fail. Ideally we would
8199              * track the guest allocations in QEMU and apply the limits ourselves.
8200              * For now, just tell the guest the call succeeded but don't actually
8201              * limit anything.
8202              */
8203             if (resource != RLIMIT_AS &&
8204                 resource != RLIMIT_DATA &&
8205                 resource != RLIMIT_STACK) {
8206                 return get_errno(setrlimit(resource, &rlim));
8207             } else {
8208                 return 0;
8209             }
8210         }
8211 #endif
8212 #ifdef TARGET_NR_getrlimit
8213     case TARGET_NR_getrlimit:
8214         {
8215             int resource = target_to_host_resource(arg1);
8216             struct target_rlimit *target_rlim;
8217             struct rlimit rlim;
8218 
8219             ret = get_errno(getrlimit(resource, &rlim));
8220             if (!is_error(ret)) {
8221                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8222                     return -TARGET_EFAULT;
8223                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8224                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8225                 unlock_user_struct(target_rlim, arg2, 1);
8226             }
8227         }
8228         return ret;
8229 #endif
8230     case TARGET_NR_getrusage:
8231         {
8232             struct rusage rusage;
8233             ret = get_errno(getrusage(arg1, &rusage));
8234             if (!is_error(ret)) {
8235                 ret = host_to_target_rusage(arg2, &rusage);
8236             }
8237         }
8238         return ret;
8239     case TARGET_NR_gettimeofday:
8240         {
8241             struct timeval tv;
8242             ret = get_errno(gettimeofday(&tv, NULL));
8243             if (!is_error(ret)) {
8244                 if (copy_to_user_timeval(arg1, &tv))
8245                     return -TARGET_EFAULT;
8246             }
8247         }
8248         return ret;
8249     case TARGET_NR_settimeofday:
8250         {
8251             struct timeval tv, *ptv = NULL;
8252             struct timezone tz, *ptz = NULL;
8253 
8254             if (arg1) {
8255                 if (copy_from_user_timeval(&tv, arg1)) {
8256                     return -TARGET_EFAULT;
8257                 }
8258                 ptv = &tv;
8259             }
8260 
8261             if (arg2) {
8262                 if (copy_from_user_timezone(&tz, arg2)) {
8263                     return -TARGET_EFAULT;
8264                 }
8265                 ptz = &tz;
8266             }
8267 
8268             return get_errno(settimeofday(ptv, ptz));
8269         }
8270 #if defined(TARGET_NR_select)
8271     case TARGET_NR_select:
8272 #if defined(TARGET_WANT_NI_OLD_SELECT)
8273         /* some architectures used to have old_select here
8274          * but now ENOSYS it.
8275          */
8276         ret = -TARGET_ENOSYS;
8277 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8278         ret = do_old_select(arg1);
8279 #else
8280         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8281 #endif
8282         return ret;
8283 #endif
8284 #ifdef TARGET_NR_pselect6
8285     case TARGET_NR_pselect6:
8286         {
8287             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8288             fd_set rfds, wfds, efds;
8289             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8290             struct timespec ts, *ts_ptr;
8291 
8292             /*
8293              * The 6th arg is actually two args smashed together,
8294              * so we cannot use the C library.
8295              */
8296             sigset_t set;
8297             struct {
8298                 sigset_t *set;
8299                 size_t size;
8300             } sig, *sig_ptr;
8301 
8302             abi_ulong arg_sigset, arg_sigsize, *arg7;
8303             target_sigset_t *target_sigset;
8304 
8305             n = arg1;
8306             rfd_addr = arg2;
8307             wfd_addr = arg3;
8308             efd_addr = arg4;
8309             ts_addr = arg5;
8310 
8311             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8312             if (ret) {
8313                 return ret;
8314             }
8315             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8316             if (ret) {
8317                 return ret;
8318             }
8319             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8320             if (ret) {
8321                 return ret;
8322             }
8323 
8324             /*
8325              * This takes a timespec, and not a timeval, so we cannot
8326              * use the do_select() helper ...
8327              */
8328             if (ts_addr) {
8329                 if (target_to_host_timespec(&ts, ts_addr)) {
8330                     return -TARGET_EFAULT;
8331                 }
8332                 ts_ptr = &ts;
8333             } else {
8334                 ts_ptr = NULL;
8335             }
8336 
8337             /* Extract the two packed args for the sigset */
8338             if (arg6) {
8339                 sig_ptr = &sig;
8340                 sig.size = SIGSET_T_SIZE;
8341 
8342                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8343                 if (!arg7) {
8344                     return -TARGET_EFAULT;
8345                 }
8346                 arg_sigset = tswapal(arg7[0]);
8347                 arg_sigsize = tswapal(arg7[1]);
8348                 unlock_user(arg7, arg6, 0);
8349 
8350                 if (arg_sigset) {
8351                     sig.set = &set;
8352                     if (arg_sigsize != sizeof(*target_sigset)) {
8353                         /* Like the kernel, we enforce correct size sigsets */
8354                         return -TARGET_EINVAL;
8355                     }
8356                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8357                                               sizeof(*target_sigset), 1);
8358                     if (!target_sigset) {
8359                         return -TARGET_EFAULT;
8360                     }
8361                     target_to_host_sigset(&set, target_sigset);
8362                     unlock_user(target_sigset, arg_sigset, 0);
8363                 } else {
8364                     sig.set = NULL;
8365                 }
8366             } else {
8367                 sig_ptr = NULL;
8368             }
8369 
8370             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8371                                           ts_ptr, sig_ptr));
8372 
8373             if (!is_error(ret)) {
8374                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8375                     return -TARGET_EFAULT;
8376                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8377                     return -TARGET_EFAULT;
8378                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8379                     return -TARGET_EFAULT;
8380 
8381                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8382                     return -TARGET_EFAULT;
8383             }
8384         }
8385         return ret;
8386 #endif
8387 #ifdef TARGET_NR_symlink
8388     case TARGET_NR_symlink:
8389         {
8390             void *p2;
8391             p = lock_user_string(arg1);
8392             p2 = lock_user_string(arg2);
8393             if (!p || !p2)
8394                 ret = -TARGET_EFAULT;
8395             else
8396                 ret = get_errno(symlink(p, p2));
8397             unlock_user(p2, arg2, 0);
8398             unlock_user(p, arg1, 0);
8399         }
8400         return ret;
8401 #endif
8402 #if defined(TARGET_NR_symlinkat)
8403     case TARGET_NR_symlinkat:
8404         {
8405             void *p2;
8406             p  = lock_user_string(arg1);
8407             p2 = lock_user_string(arg3);
8408             if (!p || !p2)
8409                 ret = -TARGET_EFAULT;
8410             else
8411                 ret = get_errno(symlinkat(p, arg2, p2));
8412             unlock_user(p2, arg3, 0);
8413             unlock_user(p, arg1, 0);
8414         }
8415         return ret;
8416 #endif
8417 #ifdef TARGET_NR_readlink
8418     case TARGET_NR_readlink:
8419         {
8420             void *p2;
8421             p = lock_user_string(arg1);
8422             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8423             if (!p || !p2) {
8424                 ret = -TARGET_EFAULT;
8425             } else if (!arg3) {
8426                 /* Short circuit this for the magic exe check. */
8427                 ret = -TARGET_EINVAL;
8428             } else if (is_proc_myself((const char *)p, "exe")) {
8429                 char real[PATH_MAX], *temp;
8430                 temp = realpath(exec_path, real);
8431                 /* Return value is # of bytes that we wrote to the buffer. */
8432                 if (temp == NULL) {
8433                     ret = get_errno(-1);
8434                 } else {
8435                     /* Don't worry about sign mismatch as earlier mapping
8436                      * logic would have thrown a bad address error. */
8437                     ret = MIN(strlen(real), arg3);
8438                     /* We cannot NUL terminate the string. */
8439                     memcpy(p2, real, ret);
8440                 }
8441             } else {
8442                 ret = get_errno(readlink(path(p), p2, arg3));
8443             }
8444             unlock_user(p2, arg2, ret);
8445             unlock_user(p, arg1, 0);
8446         }
8447         return ret;
8448 #endif
8449 #if defined(TARGET_NR_readlinkat)
8450     case TARGET_NR_readlinkat:
8451         {
8452             void *p2;
8453             p  = lock_user_string(arg2);
8454             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8455             if (!p || !p2) {
8456                 ret = -TARGET_EFAULT;
8457             } else if (is_proc_myself((const char *)p, "exe")) {
8458                 char real[PATH_MAX], *temp;
8459                 temp = realpath(exec_path, real);
8460                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8461                 snprintf((char *)p2, arg4, "%s", real);
8462             } else {
8463                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8464             }
8465             unlock_user(p2, arg3, ret);
8466             unlock_user(p, arg2, 0);
8467         }
8468         return ret;
8469 #endif
8470 #ifdef TARGET_NR_swapon
8471     case TARGET_NR_swapon:
8472         if (!(p = lock_user_string(arg1)))
8473             return -TARGET_EFAULT;
8474         ret = get_errno(swapon(p, arg2));
8475         unlock_user(p, arg1, 0);
8476         return ret;
8477 #endif
8478     case TARGET_NR_reboot:
8479         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8480            /* arg4 must be ignored in all other cases */
8481            p = lock_user_string(arg4);
8482            if (!p) {
8483                return -TARGET_EFAULT;
8484            }
8485            ret = get_errno(reboot(arg1, arg2, arg3, p));
8486            unlock_user(p, arg4, 0);
8487         } else {
8488            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8489         }
8490         return ret;
8491 #ifdef TARGET_NR_mmap
8492     case TARGET_NR_mmap:
8493 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8494     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8495     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8496     || defined(TARGET_S390X)
8497         {
8498             abi_ulong *v;
8499             abi_ulong v1, v2, v3, v4, v5, v6;
8500             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8501                 return -TARGET_EFAULT;
8502             v1 = tswapal(v[0]);
8503             v2 = tswapal(v[1]);
8504             v3 = tswapal(v[2]);
8505             v4 = tswapal(v[3]);
8506             v5 = tswapal(v[4]);
8507             v6 = tswapal(v[5]);
8508             unlock_user(v, arg1, 0);
8509             ret = get_errno(target_mmap(v1, v2, v3,
8510                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8511                                         v5, v6));
8512         }
8513 #else
8514         ret = get_errno(target_mmap(arg1, arg2, arg3,
8515                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8516                                     arg5,
8517                                     arg6));
8518 #endif
8519         return ret;
8520 #endif
8521 #ifdef TARGET_NR_mmap2
8522     case TARGET_NR_mmap2:
8523 #ifndef MMAP_SHIFT
8524 #define MMAP_SHIFT 12
8525 #endif
8526         ret = target_mmap(arg1, arg2, arg3,
8527                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8528                           arg5, arg6 << MMAP_SHIFT);
8529         return get_errno(ret);
8530 #endif
8531     case TARGET_NR_munmap:
8532         return get_errno(target_munmap(arg1, arg2));
8533     case TARGET_NR_mprotect:
8534         {
8535             TaskState *ts = cpu->opaque;
8536             /* Special hack to detect libc making the stack executable.  */
8537             if ((arg3 & PROT_GROWSDOWN)
8538                 && arg1 >= ts->info->stack_limit
8539                 && arg1 <= ts->info->start_stack) {
8540                 arg3 &= ~PROT_GROWSDOWN;
8541                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8542                 arg1 = ts->info->stack_limit;
8543             }
8544         }
8545         return get_errno(target_mprotect(arg1, arg2, arg3));
8546 #ifdef TARGET_NR_mremap
8547     case TARGET_NR_mremap:
8548         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8549 #endif
8550         /* ??? msync/mlock/munlock are broken for softmmu.  */
8551 #ifdef TARGET_NR_msync
8552     case TARGET_NR_msync:
8553         return get_errno(msync(g2h(arg1), arg2, arg3));
8554 #endif
8555 #ifdef TARGET_NR_mlock
8556     case TARGET_NR_mlock:
8557         return get_errno(mlock(g2h(arg1), arg2));
8558 #endif
8559 #ifdef TARGET_NR_munlock
8560     case TARGET_NR_munlock:
8561         return get_errno(munlock(g2h(arg1), arg2));
8562 #endif
8563 #ifdef TARGET_NR_mlockall
8564     case TARGET_NR_mlockall:
8565         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8566 #endif
8567 #ifdef TARGET_NR_munlockall
8568     case TARGET_NR_munlockall:
8569         return get_errno(munlockall());
8570 #endif
8571 #ifdef TARGET_NR_truncate
8572     case TARGET_NR_truncate:
8573         if (!(p = lock_user_string(arg1)))
8574             return -TARGET_EFAULT;
8575         ret = get_errno(truncate(p, arg2));
8576         unlock_user(p, arg1, 0);
8577         return ret;
8578 #endif
8579 #ifdef TARGET_NR_ftruncate
8580     case TARGET_NR_ftruncate:
8581         return get_errno(ftruncate(arg1, arg2));
8582 #endif
8583     case TARGET_NR_fchmod:
8584         return get_errno(fchmod(arg1, arg2));
8585 #if defined(TARGET_NR_fchmodat)
8586     case TARGET_NR_fchmodat:
8587         if (!(p = lock_user_string(arg2)))
8588             return -TARGET_EFAULT;
8589         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8590         unlock_user(p, arg2, 0);
8591         return ret;
8592 #endif
8593     case TARGET_NR_getpriority:
8594         /* Note that negative values are valid for getpriority, so we must
8595            differentiate based on errno settings.  */
8596         errno = 0;
8597         ret = getpriority(arg1, arg2);
8598         if (ret == -1 && errno != 0) {
8599             return -host_to_target_errno(errno);
8600         }
8601 #ifdef TARGET_ALPHA
8602         /* Return value is the unbiased priority.  Signal no error.  */
8603         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8604 #else
8605         /* Return value is a biased priority to avoid negative numbers.  */
8606         ret = 20 - ret;
8607 #endif
8608         return ret;
8609     case TARGET_NR_setpriority:
8610         return get_errno(setpriority(arg1, arg2, arg3));
8611 #ifdef TARGET_NR_statfs
8612     case TARGET_NR_statfs:
8613         if (!(p = lock_user_string(arg1))) {
8614             return -TARGET_EFAULT;
8615         }
8616         ret = get_errno(statfs(path(p), &stfs));
8617         unlock_user(p, arg1, 0);
8618     convert_statfs:
8619         if (!is_error(ret)) {
8620             struct target_statfs *target_stfs;
8621 
8622             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8623                 return -TARGET_EFAULT;
8624             __put_user(stfs.f_type, &target_stfs->f_type);
8625             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8626             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8627             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8628             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8629             __put_user(stfs.f_files, &target_stfs->f_files);
8630             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8631             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8632             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8633             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8634             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8635 #ifdef _STATFS_F_FLAGS
8636             __put_user(stfs.f_flags, &target_stfs->f_flags);
8637 #else
8638             __put_user(0, &target_stfs->f_flags);
8639 #endif
8640             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8641             unlock_user_struct(target_stfs, arg2, 1);
8642         }
8643         return ret;
8644 #endif
8645 #ifdef TARGET_NR_fstatfs
8646     case TARGET_NR_fstatfs:
8647         ret = get_errno(fstatfs(arg1, &stfs));
8648         goto convert_statfs;
8649 #endif
8650 #ifdef TARGET_NR_statfs64
8651     case TARGET_NR_statfs64:
8652         if (!(p = lock_user_string(arg1))) {
8653             return -TARGET_EFAULT;
8654         }
8655         ret = get_errno(statfs(path(p), &stfs));
8656         unlock_user(p, arg1, 0);
8657     convert_statfs64:
8658         if (!is_error(ret)) {
8659             struct target_statfs64 *target_stfs;
8660 
8661             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8662                 return -TARGET_EFAULT;
8663             __put_user(stfs.f_type, &target_stfs->f_type);
8664             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8665             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8666             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8667             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8668             __put_user(stfs.f_files, &target_stfs->f_files);
8669             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8670             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8671             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8672             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8673             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8674             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8675             unlock_user_struct(target_stfs, arg3, 1);
8676         }
8677         return ret;
8678     case TARGET_NR_fstatfs64:
8679         ret = get_errno(fstatfs(arg1, &stfs));
8680         goto convert_statfs64;
8681 #endif
8682 #ifdef TARGET_NR_socketcall
8683     case TARGET_NR_socketcall:
8684         return do_socketcall(arg1, arg2);
8685 #endif
8686 #ifdef TARGET_NR_accept
8687     case TARGET_NR_accept:
8688         return do_accept4(arg1, arg2, arg3, 0);
8689 #endif
8690 #ifdef TARGET_NR_accept4
8691     case TARGET_NR_accept4:
8692         return do_accept4(arg1, arg2, arg3, arg4);
8693 #endif
8694 #ifdef TARGET_NR_bind
8695     case TARGET_NR_bind:
8696         return do_bind(arg1, arg2, arg3);
8697 #endif
8698 #ifdef TARGET_NR_connect
8699     case TARGET_NR_connect:
8700         return do_connect(arg1, arg2, arg3);
8701 #endif
8702 #ifdef TARGET_NR_getpeername
8703     case TARGET_NR_getpeername:
8704         return do_getpeername(arg1, arg2, arg3);
8705 #endif
8706 #ifdef TARGET_NR_getsockname
8707     case TARGET_NR_getsockname:
8708         return do_getsockname(arg1, arg2, arg3);
8709 #endif
8710 #ifdef TARGET_NR_getsockopt
8711     case TARGET_NR_getsockopt:
8712         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8713 #endif
8714 #ifdef TARGET_NR_listen
8715     case TARGET_NR_listen:
8716         return get_errno(listen(arg1, arg2));
8717 #endif
8718 #ifdef TARGET_NR_recv
8719     case TARGET_NR_recv:
8720         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8721 #endif
8722 #ifdef TARGET_NR_recvfrom
8723     case TARGET_NR_recvfrom:
8724         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8725 #endif
8726 #ifdef TARGET_NR_recvmsg
8727     case TARGET_NR_recvmsg:
8728         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8729 #endif
8730 #ifdef TARGET_NR_send
8731     case TARGET_NR_send:
8732         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8733 #endif
8734 #ifdef TARGET_NR_sendmsg
8735     case TARGET_NR_sendmsg:
8736         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8737 #endif
8738 #ifdef TARGET_NR_sendmmsg
8739     case TARGET_NR_sendmmsg:
8740         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8741     case TARGET_NR_recvmmsg:
8742         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8743 #endif
8744 #ifdef TARGET_NR_sendto
8745     case TARGET_NR_sendto:
8746         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8747 #endif
8748 #ifdef TARGET_NR_shutdown
8749     case TARGET_NR_shutdown:
8750         return get_errno(shutdown(arg1, arg2));
8751 #endif
8752 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8753     case TARGET_NR_getrandom:
8754         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8755         if (!p) {
8756             return -TARGET_EFAULT;
8757         }
8758         ret = get_errno(getrandom(p, arg2, arg3));
8759         unlock_user(p, arg1, ret);
8760         return ret;
8761 #endif
8762 #ifdef TARGET_NR_socket
8763     case TARGET_NR_socket:
8764         return do_socket(arg1, arg2, arg3);
8765 #endif
8766 #ifdef TARGET_NR_socketpair
8767     case TARGET_NR_socketpair:
8768         return do_socketpair(arg1, arg2, arg3, arg4);
8769 #endif
8770 #ifdef TARGET_NR_setsockopt
8771     case TARGET_NR_setsockopt:
8772         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8773 #endif
8774 #if defined(TARGET_NR_syslog)
8775     case TARGET_NR_syslog:
8776         {
8777             int len = arg2;
8778 
8779             switch (arg1) {
8780             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8781             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8782             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8783             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8784             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8785             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8786             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8787             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8788                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8789             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8790             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8791             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8792                 {
8793                     if (len < 0) {
8794                         return -TARGET_EINVAL;
8795                     }
8796                     if (len == 0) {
8797                         return 0;
8798                     }
8799                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8800                     if (!p) {
8801                         return -TARGET_EFAULT;
8802                     }
8803                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8804                     unlock_user(p, arg2, arg3);
8805                 }
8806                 return ret;
8807             default:
8808                 return -TARGET_EINVAL;
8809             }
8810         }
8811         break;
8812 #endif
8813     case TARGET_NR_setitimer:
8814         {
8815             struct itimerval value, ovalue, *pvalue;
8816 
8817             if (arg2) {
8818                 pvalue = &value;
8819                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8820                     || copy_from_user_timeval(&pvalue->it_value,
8821                                               arg2 + sizeof(struct target_timeval)))
8822                     return -TARGET_EFAULT;
8823             } else {
8824                 pvalue = NULL;
8825             }
8826             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8827             if (!is_error(ret) && arg3) {
8828                 if (copy_to_user_timeval(arg3,
8829                                          &ovalue.it_interval)
8830                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8831                                             &ovalue.it_value))
8832                     return -TARGET_EFAULT;
8833             }
8834         }
8835         return ret;
8836     case TARGET_NR_getitimer:
8837         {
8838             struct itimerval value;
8839 
8840             ret = get_errno(getitimer(arg1, &value));
8841             if (!is_error(ret) && arg2) {
8842                 if (copy_to_user_timeval(arg2,
8843                                          &value.it_interval)
8844                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8845                                             &value.it_value))
8846                     return -TARGET_EFAULT;
8847             }
8848         }
8849         return ret;
8850 #ifdef TARGET_NR_stat
8851     case TARGET_NR_stat:
8852         if (!(p = lock_user_string(arg1))) {
8853             return -TARGET_EFAULT;
8854         }
8855         ret = get_errno(stat(path(p), &st));
8856         unlock_user(p, arg1, 0);
8857         goto do_stat;
8858 #endif
8859 #ifdef TARGET_NR_lstat
8860     case TARGET_NR_lstat:
8861         if (!(p = lock_user_string(arg1))) {
8862             return -TARGET_EFAULT;
8863         }
8864         ret = get_errno(lstat(path(p), &st));
8865         unlock_user(p, arg1, 0);
8866         goto do_stat;
8867 #endif
8868 #ifdef TARGET_NR_fstat
8869     case TARGET_NR_fstat:
8870         {
8871             ret = get_errno(fstat(arg1, &st));
8872 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8873         do_stat:
8874 #endif
8875             if (!is_error(ret)) {
8876                 struct target_stat *target_st;
8877 
8878                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8879                     return -TARGET_EFAULT;
8880                 memset(target_st, 0, sizeof(*target_st));
8881                 __put_user(st.st_dev, &target_st->st_dev);
8882                 __put_user(st.st_ino, &target_st->st_ino);
8883                 __put_user(st.st_mode, &target_st->st_mode);
8884                 __put_user(st.st_uid, &target_st->st_uid);
8885                 __put_user(st.st_gid, &target_st->st_gid);
8886                 __put_user(st.st_nlink, &target_st->st_nlink);
8887                 __put_user(st.st_rdev, &target_st->st_rdev);
8888                 __put_user(st.st_size, &target_st->st_size);
8889                 __put_user(st.st_blksize, &target_st->st_blksize);
8890                 __put_user(st.st_blocks, &target_st->st_blocks);
8891                 __put_user(st.st_atime, &target_st->target_st_atime);
8892                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8893                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8894                 unlock_user_struct(target_st, arg2, 1);
8895             }
8896         }
8897         return ret;
8898 #endif
8899     case TARGET_NR_vhangup:
8900         return get_errno(vhangup());
8901 #ifdef TARGET_NR_syscall
8902     case TARGET_NR_syscall:
8903         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8904                           arg6, arg7, arg8, 0);
8905 #endif
8906     case TARGET_NR_wait4:
8907         {
8908             int status;
8909             abi_long status_ptr = arg2;
8910             struct rusage rusage, *rusage_ptr;
8911             abi_ulong target_rusage = arg4;
8912             abi_long rusage_err;
8913             if (target_rusage)
8914                 rusage_ptr = &rusage;
8915             else
8916                 rusage_ptr = NULL;
8917             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8918             if (!is_error(ret)) {
8919                 if (status_ptr && ret) {
8920                     status = host_to_target_waitstatus(status);
8921                     if (put_user_s32(status, status_ptr))
8922                         return -TARGET_EFAULT;
8923                 }
8924                 if (target_rusage) {
8925                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8926                     if (rusage_err) {
8927                         ret = rusage_err;
8928                     }
8929                 }
8930             }
8931         }
8932         return ret;
8933 #ifdef TARGET_NR_swapoff
8934     case TARGET_NR_swapoff:
8935         if (!(p = lock_user_string(arg1)))
8936             return -TARGET_EFAULT;
8937         ret = get_errno(swapoff(p));
8938         unlock_user(p, arg1, 0);
8939         return ret;
8940 #endif
8941     case TARGET_NR_sysinfo:
8942         {
8943             struct target_sysinfo *target_value;
8944             struct sysinfo value;
8945             ret = get_errno(sysinfo(&value));
8946             if (!is_error(ret) && arg1)
8947             {
8948                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8949                     return -TARGET_EFAULT;
8950                 __put_user(value.uptime, &target_value->uptime);
8951                 __put_user(value.loads[0], &target_value->loads[0]);
8952                 __put_user(value.loads[1], &target_value->loads[1]);
8953                 __put_user(value.loads[2], &target_value->loads[2]);
8954                 __put_user(value.totalram, &target_value->totalram);
8955                 __put_user(value.freeram, &target_value->freeram);
8956                 __put_user(value.sharedram, &target_value->sharedram);
8957                 __put_user(value.bufferram, &target_value->bufferram);
8958                 __put_user(value.totalswap, &target_value->totalswap);
8959                 __put_user(value.freeswap, &target_value->freeswap);
8960                 __put_user(value.procs, &target_value->procs);
8961                 __put_user(value.totalhigh, &target_value->totalhigh);
8962                 __put_user(value.freehigh, &target_value->freehigh);
8963                 __put_user(value.mem_unit, &target_value->mem_unit);
8964                 unlock_user_struct(target_value, arg1, 1);
8965             }
8966         }
8967         return ret;
8968 #ifdef TARGET_NR_ipc
8969     case TARGET_NR_ipc:
8970         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8971 #endif
8972 #ifdef TARGET_NR_semget
8973     case TARGET_NR_semget:
8974         return get_errno(semget(arg1, arg2, arg3));
8975 #endif
8976 #ifdef TARGET_NR_semop
8977     case TARGET_NR_semop:
8978         return do_semop(arg1, arg2, arg3);
8979 #endif
8980 #ifdef TARGET_NR_semctl
8981     case TARGET_NR_semctl:
8982         return do_semctl(arg1, arg2, arg3, arg4);
8983 #endif
8984 #ifdef TARGET_NR_msgctl
8985     case TARGET_NR_msgctl:
8986         return do_msgctl(arg1, arg2, arg3);
8987 #endif
8988 #ifdef TARGET_NR_msgget
8989     case TARGET_NR_msgget:
8990         return get_errno(msgget(arg1, arg2));
8991 #endif
8992 #ifdef TARGET_NR_msgrcv
8993     case TARGET_NR_msgrcv:
8994         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8995 #endif
8996 #ifdef TARGET_NR_msgsnd
8997     case TARGET_NR_msgsnd:
8998         return do_msgsnd(arg1, arg2, arg3, arg4);
8999 #endif
9000 #ifdef TARGET_NR_shmget
9001     case TARGET_NR_shmget:
9002         return get_errno(shmget(arg1, arg2, arg3));
9003 #endif
9004 #ifdef TARGET_NR_shmctl
9005     case TARGET_NR_shmctl:
9006         return do_shmctl(arg1, arg2, arg3);
9007 #endif
9008 #ifdef TARGET_NR_shmat
9009     case TARGET_NR_shmat:
9010         return do_shmat(cpu_env, arg1, arg2, arg3);
9011 #endif
9012 #ifdef TARGET_NR_shmdt
9013     case TARGET_NR_shmdt:
9014         return do_shmdt(arg1);
9015 #endif
9016     case TARGET_NR_fsync:
9017         return get_errno(fsync(arg1));
9018     case TARGET_NR_clone:
9019         /* Linux manages to have three different orderings for its
9020          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9021          * match the kernel's CONFIG_CLONE_* settings.
9022          * Microblaze is further special in that it uses a sixth
9023          * implicit argument to clone for the TLS pointer.
9024          */
9025 #if defined(TARGET_MICROBLAZE)
9026         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9027 #elif defined(TARGET_CLONE_BACKWARDS)
9028         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9029 #elif defined(TARGET_CLONE_BACKWARDS2)
9030         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9031 #else
9032         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9033 #endif
9034         return ret;
9035 #ifdef __NR_exit_group
9036         /* new thread calls */
9037     case TARGET_NR_exit_group:
9038         preexit_cleanup(cpu_env, arg1);
9039         return get_errno(exit_group(arg1));
9040 #endif
9041     case TARGET_NR_setdomainname:
9042         if (!(p = lock_user_string(arg1)))
9043             return -TARGET_EFAULT;
9044         ret = get_errno(setdomainname(p, arg2));
9045         unlock_user(p, arg1, 0);
9046         return ret;
9047     case TARGET_NR_uname:
9048         /* no need to transcode because we use the linux syscall */
9049         {
9050             struct new_utsname * buf;
9051 
9052             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9053                 return -TARGET_EFAULT;
9054             ret = get_errno(sys_uname(buf));
9055             if (!is_error(ret)) {
9056                 /* Overwrite the native machine name with whatever is being
9057                    emulated. */
9058                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9059                           sizeof(buf->machine));
9060                 /* Allow the user to override the reported release.  */
9061                 if (qemu_uname_release && *qemu_uname_release) {
9062                     g_strlcpy(buf->release, qemu_uname_release,
9063                               sizeof(buf->release));
9064                 }
9065             }
9066             unlock_user_struct(buf, arg1, 1);
9067         }
9068         return ret;
9069 #ifdef TARGET_I386
9070     case TARGET_NR_modify_ldt:
9071         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9072 #if !defined(TARGET_X86_64)
9073     case TARGET_NR_vm86:
9074         return do_vm86(cpu_env, arg1, arg2);
9075 #endif
9076 #endif
9077     case TARGET_NR_adjtimex:
9078         {
9079             struct timex host_buf;
9080 
9081             if (target_to_host_timex(&host_buf, arg1) != 0) {
9082                 return -TARGET_EFAULT;
9083             }
9084             ret = get_errno(adjtimex(&host_buf));
9085             if (!is_error(ret)) {
9086                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9087                     return -TARGET_EFAULT;
9088                 }
9089             }
9090         }
9091         return ret;
9092 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9093     case TARGET_NR_clock_adjtime:
9094         {
9095             struct timex htx, *phtx = &htx;
9096 
9097             if (target_to_host_timex(phtx, arg2) != 0) {
9098                 return -TARGET_EFAULT;
9099             }
9100             ret = get_errno(clock_adjtime(arg1, phtx));
9101             if (!is_error(ret) && phtx) {
9102                 if (host_to_target_timex(arg2, phtx) != 0) {
9103                     return -TARGET_EFAULT;
9104                 }
9105             }
9106         }
9107         return ret;
9108 #endif
9109     case TARGET_NR_getpgid:
9110         return get_errno(getpgid(arg1));
9111     case TARGET_NR_fchdir:
9112         return get_errno(fchdir(arg1));
9113     case TARGET_NR_personality:
9114         return get_errno(personality(arg1));
9115 #ifdef TARGET_NR__llseek /* Not on alpha */
9116     case TARGET_NR__llseek:
9117         {
9118             int64_t res;
9119 #if !defined(__NR_llseek)
9120             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9121             if (res == -1) {
9122                 ret = get_errno(res);
9123             } else {
9124                 ret = 0;
9125             }
9126 #else
9127             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9128 #endif
9129             if ((ret == 0) && put_user_s64(res, arg4)) {
9130                 return -TARGET_EFAULT;
9131             }
9132         }
9133         return ret;
9134 #endif
9135 #ifdef TARGET_NR_getdents
9136     case TARGET_NR_getdents:
9137 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9138 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9139         {
9140             struct target_dirent *target_dirp;
9141             struct linux_dirent *dirp;
9142             abi_long count = arg3;
9143 
9144             dirp = g_try_malloc(count);
9145             if (!dirp) {
9146                 return -TARGET_ENOMEM;
9147             }
9148 
9149             ret = get_errno(sys_getdents(arg1, dirp, count));
9150             if (!is_error(ret)) {
9151                 struct linux_dirent *de;
9152 		struct target_dirent *tde;
9153                 int len = ret;
9154                 int reclen, treclen;
9155 		int count1, tnamelen;
9156 
9157 		count1 = 0;
9158                 de = dirp;
9159                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9160                     return -TARGET_EFAULT;
9161 		tde = target_dirp;
9162                 while (len > 0) {
9163                     reclen = de->d_reclen;
9164                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9165                     assert(tnamelen >= 0);
9166                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9167                     assert(count1 + treclen <= count);
9168                     tde->d_reclen = tswap16(treclen);
9169                     tde->d_ino = tswapal(de->d_ino);
9170                     tde->d_off = tswapal(de->d_off);
9171                     memcpy(tde->d_name, de->d_name, tnamelen);
9172                     de = (struct linux_dirent *)((char *)de + reclen);
9173                     len -= reclen;
9174                     tde = (struct target_dirent *)((char *)tde + treclen);
9175 		    count1 += treclen;
9176                 }
9177 		ret = count1;
9178                 unlock_user(target_dirp, arg2, ret);
9179             }
9180             g_free(dirp);
9181         }
9182 #else
9183         {
9184             struct linux_dirent *dirp;
9185             abi_long count = arg3;
9186 
9187             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9188                 return -TARGET_EFAULT;
9189             ret = get_errno(sys_getdents(arg1, dirp, count));
9190             if (!is_error(ret)) {
9191                 struct linux_dirent *de;
9192                 int len = ret;
9193                 int reclen;
9194                 de = dirp;
9195                 while (len > 0) {
9196                     reclen = de->d_reclen;
9197                     if (reclen > len)
9198                         break;
9199                     de->d_reclen = tswap16(reclen);
9200                     tswapls(&de->d_ino);
9201                     tswapls(&de->d_off);
9202                     de = (struct linux_dirent *)((char *)de + reclen);
9203                     len -= reclen;
9204                 }
9205             }
9206             unlock_user(dirp, arg2, ret);
9207         }
9208 #endif
9209 #else
9210         /* Implement getdents in terms of getdents64 */
9211         {
9212             struct linux_dirent64 *dirp;
9213             abi_long count = arg3;
9214 
9215             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9216             if (!dirp) {
9217                 return -TARGET_EFAULT;
9218             }
9219             ret = get_errno(sys_getdents64(arg1, dirp, count));
9220             if (!is_error(ret)) {
9221                 /* Convert the dirent64 structs to target dirent.  We do this
9222                  * in-place, since we can guarantee that a target_dirent is no
9223                  * larger than a dirent64; however this means we have to be
9224                  * careful to read everything before writing in the new format.
9225                  */
9226                 struct linux_dirent64 *de;
9227                 struct target_dirent *tde;
9228                 int len = ret;
9229                 int tlen = 0;
9230 
9231                 de = dirp;
9232                 tde = (struct target_dirent *)dirp;
9233                 while (len > 0) {
9234                     int namelen, treclen;
9235                     int reclen = de->d_reclen;
9236                     uint64_t ino = de->d_ino;
9237                     int64_t off = de->d_off;
9238                     uint8_t type = de->d_type;
9239 
9240                     namelen = strlen(de->d_name);
9241                     treclen = offsetof(struct target_dirent, d_name)
9242                         + namelen + 2;
9243                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9244 
9245                     memmove(tde->d_name, de->d_name, namelen + 1);
9246                     tde->d_ino = tswapal(ino);
9247                     tde->d_off = tswapal(off);
9248                     tde->d_reclen = tswap16(treclen);
9249                     /* The target_dirent type is in what was formerly a padding
9250                      * byte at the end of the structure:
9251                      */
9252                     *(((char *)tde) + treclen - 1) = type;
9253 
9254                     de = (struct linux_dirent64 *)((char *)de + reclen);
9255                     tde = (struct target_dirent *)((char *)tde + treclen);
9256                     len -= reclen;
9257                     tlen += treclen;
9258                 }
9259                 ret = tlen;
9260             }
9261             unlock_user(dirp, arg2, ret);
9262         }
9263 #endif
9264         return ret;
9265 #endif /* TARGET_NR_getdents */
9266 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9267     case TARGET_NR_getdents64:
9268         {
9269             struct linux_dirent64 *dirp;
9270             abi_long count = arg3;
9271             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9272                 return -TARGET_EFAULT;
9273             ret = get_errno(sys_getdents64(arg1, dirp, count));
9274             if (!is_error(ret)) {
9275                 struct linux_dirent64 *de;
9276                 int len = ret;
9277                 int reclen;
9278                 de = dirp;
9279                 while (len > 0) {
9280                     reclen = de->d_reclen;
9281                     if (reclen > len)
9282                         break;
9283                     de->d_reclen = tswap16(reclen);
9284                     tswap64s((uint64_t *)&de->d_ino);
9285                     tswap64s((uint64_t *)&de->d_off);
9286                     de = (struct linux_dirent64 *)((char *)de + reclen);
9287                     len -= reclen;
9288                 }
9289             }
9290             unlock_user(dirp, arg2, ret);
9291         }
9292         return ret;
9293 #endif /* TARGET_NR_getdents64 */
9294 #if defined(TARGET_NR__newselect)
9295     case TARGET_NR__newselect:
9296         return do_select(arg1, arg2, arg3, arg4, arg5);
9297 #endif
9298 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9299 # ifdef TARGET_NR_poll
9300     case TARGET_NR_poll:
9301 # endif
9302 # ifdef TARGET_NR_ppoll
9303     case TARGET_NR_ppoll:
9304 # endif
9305         {
9306             struct target_pollfd *target_pfd;
9307             unsigned int nfds = arg2;
9308             struct pollfd *pfd;
9309             unsigned int i;
9310 
9311             pfd = NULL;
9312             target_pfd = NULL;
9313             if (nfds) {
9314                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9315                     return -TARGET_EINVAL;
9316                 }
9317 
9318                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9319                                        sizeof(struct target_pollfd) * nfds, 1);
9320                 if (!target_pfd) {
9321                     return -TARGET_EFAULT;
9322                 }
9323 
9324                 pfd = alloca(sizeof(struct pollfd) * nfds);
9325                 for (i = 0; i < nfds; i++) {
9326                     pfd[i].fd = tswap32(target_pfd[i].fd);
9327                     pfd[i].events = tswap16(target_pfd[i].events);
9328                 }
9329             }
9330 
9331             switch (num) {
9332 # ifdef TARGET_NR_ppoll
9333             case TARGET_NR_ppoll:
9334             {
9335                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9336                 target_sigset_t *target_set;
9337                 sigset_t _set, *set = &_set;
9338 
9339                 if (arg3) {
9340                     if (target_to_host_timespec(timeout_ts, arg3)) {
9341                         unlock_user(target_pfd, arg1, 0);
9342                         return -TARGET_EFAULT;
9343                     }
9344                 } else {
9345                     timeout_ts = NULL;
9346                 }
9347 
9348                 if (arg4) {
9349                     if (arg5 != sizeof(target_sigset_t)) {
9350                         unlock_user(target_pfd, arg1, 0);
9351                         return -TARGET_EINVAL;
9352                     }
9353 
9354                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9355                     if (!target_set) {
9356                         unlock_user(target_pfd, arg1, 0);
9357                         return -TARGET_EFAULT;
9358                     }
9359                     target_to_host_sigset(set, target_set);
9360                 } else {
9361                     set = NULL;
9362                 }
9363 
9364                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9365                                            set, SIGSET_T_SIZE));
9366 
9367                 if (!is_error(ret) && arg3) {
9368                     host_to_target_timespec(arg3, timeout_ts);
9369                 }
9370                 if (arg4) {
9371                     unlock_user(target_set, arg4, 0);
9372                 }
9373                 break;
9374             }
9375 # endif
9376 # ifdef TARGET_NR_poll
9377             case TARGET_NR_poll:
9378             {
9379                 struct timespec ts, *pts;
9380 
9381                 if (arg3 >= 0) {
9382                     /* Convert ms to secs, ns */
9383                     ts.tv_sec = arg3 / 1000;
9384                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9385                     pts = &ts;
9386                 } else {
9387                     /* -ve poll() timeout means "infinite" */
9388                     pts = NULL;
9389                 }
9390                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9391                 break;
9392             }
9393 # endif
9394             default:
9395                 g_assert_not_reached();
9396             }
9397 
9398             if (!is_error(ret)) {
9399                 for(i = 0; i < nfds; i++) {
9400                     target_pfd[i].revents = tswap16(pfd[i].revents);
9401                 }
9402             }
9403             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9404         }
9405         return ret;
9406 #endif
9407     case TARGET_NR_flock:
9408         /* NOTE: the flock constant seems to be the same for every
9409            Linux platform */
9410         return get_errno(safe_flock(arg1, arg2));
9411     case TARGET_NR_readv:
9412         {
9413             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9414             if (vec != NULL) {
9415                 ret = get_errno(safe_readv(arg1, vec, arg3));
9416                 unlock_iovec(vec, arg2, arg3, 1);
9417             } else {
9418                 ret = -host_to_target_errno(errno);
9419             }
9420         }
9421         return ret;
9422     case TARGET_NR_writev:
9423         {
9424             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9425             if (vec != NULL) {
9426                 ret = get_errno(safe_writev(arg1, vec, arg3));
9427                 unlock_iovec(vec, arg2, arg3, 0);
9428             } else {
9429                 ret = -host_to_target_errno(errno);
9430             }
9431         }
9432         return ret;
9433 #if defined(TARGET_NR_preadv)
9434     case TARGET_NR_preadv:
9435         {
9436             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9437             if (vec != NULL) {
9438                 unsigned long low, high;
9439 
9440                 target_to_host_low_high(arg4, arg5, &low, &high);
9441                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9442                 unlock_iovec(vec, arg2, arg3, 1);
9443             } else {
9444                 ret = -host_to_target_errno(errno);
9445            }
9446         }
9447         return ret;
9448 #endif
9449 #if defined(TARGET_NR_pwritev)
9450     case TARGET_NR_pwritev:
9451         {
9452             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9453             if (vec != NULL) {
9454                 unsigned long low, high;
9455 
9456                 target_to_host_low_high(arg4, arg5, &low, &high);
9457                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9458                 unlock_iovec(vec, arg2, arg3, 0);
9459             } else {
9460                 ret = -host_to_target_errno(errno);
9461            }
9462         }
9463         return ret;
9464 #endif
9465     case TARGET_NR_getsid:
9466         return get_errno(getsid(arg1));
9467 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9468     case TARGET_NR_fdatasync:
9469         return get_errno(fdatasync(arg1));
9470 #endif
9471 #ifdef TARGET_NR__sysctl
9472     case TARGET_NR__sysctl:
9473         /* We don't implement this, but ENOTDIR is always a safe
9474            return value. */
9475         return -TARGET_ENOTDIR;
9476 #endif
9477     case TARGET_NR_sched_getaffinity:
9478         {
9479             unsigned int mask_size;
9480             unsigned long *mask;
9481 
9482             /*
9483              * sched_getaffinity needs multiples of ulong, so need to take
9484              * care of mismatches between target ulong and host ulong sizes.
9485              */
9486             if (arg2 & (sizeof(abi_ulong) - 1)) {
9487                 return -TARGET_EINVAL;
9488             }
9489             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9490 
9491             mask = alloca(mask_size);
9492             memset(mask, 0, mask_size);
9493             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9494 
9495             if (!is_error(ret)) {
9496                 if (ret > arg2) {
9497                     /* More data returned than the caller's buffer will fit.
9498                      * This only happens if sizeof(abi_long) < sizeof(long)
9499                      * and the caller passed us a buffer holding an odd number
9500                      * of abi_longs. If the host kernel is actually using the
9501                      * extra 4 bytes then fail EINVAL; otherwise we can just
9502                      * ignore them and only copy the interesting part.
9503                      */
9504                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9505                     if (numcpus > arg2 * 8) {
9506                         return -TARGET_EINVAL;
9507                     }
9508                     ret = arg2;
9509                 }
9510 
9511                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9512                     return -TARGET_EFAULT;
9513                 }
9514             }
9515         }
9516         return ret;
9517     case TARGET_NR_sched_setaffinity:
9518         {
9519             unsigned int mask_size;
9520             unsigned long *mask;
9521 
9522             /*
9523              * sched_setaffinity needs multiples of ulong, so need to take
9524              * care of mismatches between target ulong and host ulong sizes.
9525              */
9526             if (arg2 & (sizeof(abi_ulong) - 1)) {
9527                 return -TARGET_EINVAL;
9528             }
9529             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9530             mask = alloca(mask_size);
9531 
9532             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9533             if (ret) {
9534                 return ret;
9535             }
9536 
9537             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9538         }
9539     case TARGET_NR_getcpu:
9540         {
9541             unsigned cpu, node;
9542             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9543                                        arg2 ? &node : NULL,
9544                                        NULL));
9545             if (is_error(ret)) {
9546                 return ret;
9547             }
9548             if (arg1 && put_user_u32(cpu, arg1)) {
9549                 return -TARGET_EFAULT;
9550             }
9551             if (arg2 && put_user_u32(node, arg2)) {
9552                 return -TARGET_EFAULT;
9553             }
9554         }
9555         return ret;
9556     case TARGET_NR_sched_setparam:
9557         {
9558             struct sched_param *target_schp;
9559             struct sched_param schp;
9560 
9561             if (arg2 == 0) {
9562                 return -TARGET_EINVAL;
9563             }
9564             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9565                 return -TARGET_EFAULT;
9566             schp.sched_priority = tswap32(target_schp->sched_priority);
9567             unlock_user_struct(target_schp, arg2, 0);
9568             return get_errno(sched_setparam(arg1, &schp));
9569         }
9570     case TARGET_NR_sched_getparam:
9571         {
9572             struct sched_param *target_schp;
9573             struct sched_param schp;
9574 
9575             if (arg2 == 0) {
9576                 return -TARGET_EINVAL;
9577             }
9578             ret = get_errno(sched_getparam(arg1, &schp));
9579             if (!is_error(ret)) {
9580                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9581                     return -TARGET_EFAULT;
9582                 target_schp->sched_priority = tswap32(schp.sched_priority);
9583                 unlock_user_struct(target_schp, arg2, 1);
9584             }
9585         }
9586         return ret;
9587     case TARGET_NR_sched_setscheduler:
9588         {
9589             struct sched_param *target_schp;
9590             struct sched_param schp;
9591             if (arg3 == 0) {
9592                 return -TARGET_EINVAL;
9593             }
9594             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9595                 return -TARGET_EFAULT;
9596             schp.sched_priority = tswap32(target_schp->sched_priority);
9597             unlock_user_struct(target_schp, arg3, 0);
9598             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9599         }
9600     case TARGET_NR_sched_getscheduler:
9601         return get_errno(sched_getscheduler(arg1));
9602     case TARGET_NR_sched_yield:
9603         return get_errno(sched_yield());
9604     case TARGET_NR_sched_get_priority_max:
9605         return get_errno(sched_get_priority_max(arg1));
9606     case TARGET_NR_sched_get_priority_min:
9607         return get_errno(sched_get_priority_min(arg1));
9608     case TARGET_NR_sched_rr_get_interval:
9609         {
9610             struct timespec ts;
9611             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9612             if (!is_error(ret)) {
9613                 ret = host_to_target_timespec(arg2, &ts);
9614             }
9615         }
9616         return ret;
9617     case TARGET_NR_nanosleep:
9618         {
9619             struct timespec req, rem;
9620             target_to_host_timespec(&req, arg1);
9621             ret = get_errno(safe_nanosleep(&req, &rem));
9622             if (is_error(ret) && arg2) {
9623                 host_to_target_timespec(arg2, &rem);
9624             }
9625         }
9626         return ret;
9627     case TARGET_NR_prctl:
9628         switch (arg1) {
9629         case PR_GET_PDEATHSIG:
9630         {
9631             int deathsig;
9632             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9633             if (!is_error(ret) && arg2
9634                 && put_user_ual(deathsig, arg2)) {
9635                 return -TARGET_EFAULT;
9636             }
9637             return ret;
9638         }
9639 #ifdef PR_GET_NAME
9640         case PR_GET_NAME:
9641         {
9642             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9643             if (!name) {
9644                 return -TARGET_EFAULT;
9645             }
9646             ret = get_errno(prctl(arg1, (unsigned long)name,
9647                                   arg3, arg4, arg5));
9648             unlock_user(name, arg2, 16);
9649             return ret;
9650         }
9651         case PR_SET_NAME:
9652         {
9653             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9654             if (!name) {
9655                 return -TARGET_EFAULT;
9656             }
9657             ret = get_errno(prctl(arg1, (unsigned long)name,
9658                                   arg3, arg4, arg5));
9659             unlock_user(name, arg2, 0);
9660             return ret;
9661         }
9662 #endif
9663 #ifdef TARGET_MIPS
9664         case TARGET_PR_GET_FP_MODE:
9665         {
9666             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9667             ret = 0;
9668             if (env->CP0_Status & (1 << CP0St_FR)) {
9669                 ret |= TARGET_PR_FP_MODE_FR;
9670             }
9671             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9672                 ret |= TARGET_PR_FP_MODE_FRE;
9673             }
9674             return ret;
9675         }
9676         case TARGET_PR_SET_FP_MODE:
9677         {
9678             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9679             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9680             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9681             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9682             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9683 
9684             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9685                                             TARGET_PR_FP_MODE_FRE;
9686 
9687             /* If nothing to change, return right away, successfully.  */
9688             if (old_fr == new_fr && old_fre == new_fre) {
9689                 return 0;
9690             }
9691             /* Check the value is valid */
9692             if (arg2 & ~known_bits) {
9693                 return -TARGET_EOPNOTSUPP;
9694             }
9695             /* Setting FRE without FR is not supported.  */
9696             if (new_fre && !new_fr) {
9697                 return -TARGET_EOPNOTSUPP;
9698             }
9699             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9700                 /* FR1 is not supported */
9701                 return -TARGET_EOPNOTSUPP;
9702             }
9703             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9704                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9705                 /* cannot set FR=0 */
9706                 return -TARGET_EOPNOTSUPP;
9707             }
9708             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9709                 /* Cannot set FRE=1 */
9710                 return -TARGET_EOPNOTSUPP;
9711             }
9712 
9713             int i;
9714             fpr_t *fpr = env->active_fpu.fpr;
9715             for (i = 0; i < 32 ; i += 2) {
9716                 if (!old_fr && new_fr) {
9717                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9718                 } else if (old_fr && !new_fr) {
9719                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9720                 }
9721             }
9722 
9723             if (new_fr) {
9724                 env->CP0_Status |= (1 << CP0St_FR);
9725                 env->hflags |= MIPS_HFLAG_F64;
9726             } else {
9727                 env->CP0_Status &= ~(1 << CP0St_FR);
9728                 env->hflags &= ~MIPS_HFLAG_F64;
9729             }
9730             if (new_fre) {
9731                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9732                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9733                     env->hflags |= MIPS_HFLAG_FRE;
9734                 }
9735             } else {
9736                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9737                 env->hflags &= ~MIPS_HFLAG_FRE;
9738             }
9739 
9740             return 0;
9741         }
9742 #endif /* MIPS */
9743 #ifdef TARGET_AARCH64
9744         case TARGET_PR_SVE_SET_VL:
9745             /*
9746              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9747              * PR_SVE_VL_INHERIT.  Note the kernel definition
9748              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9749              * even though the current architectural maximum is VQ=16.
9750              */
9751             ret = -TARGET_EINVAL;
9752             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9753                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9754                 CPUARMState *env = cpu_env;
9755                 ARMCPU *cpu = arm_env_get_cpu(env);
9756                 uint32_t vq, old_vq;
9757 
9758                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9759                 vq = MAX(arg2 / 16, 1);
9760                 vq = MIN(vq, cpu->sve_max_vq);
9761 
9762                 if (vq < old_vq) {
9763                     aarch64_sve_narrow_vq(env, vq);
9764                 }
9765                 env->vfp.zcr_el[1] = vq - 1;
9766                 ret = vq * 16;
9767             }
9768             return ret;
9769         case TARGET_PR_SVE_GET_VL:
9770             ret = -TARGET_EINVAL;
9771             {
9772                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9773                 if (cpu_isar_feature(aa64_sve, cpu)) {
9774                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9775                 }
9776             }
9777             return ret;
9778         case TARGET_PR_PAC_RESET_KEYS:
9779             {
9780                 CPUARMState *env = cpu_env;
9781                 ARMCPU *cpu = arm_env_get_cpu(env);
9782 
9783                 if (arg3 || arg4 || arg5) {
9784                     return -TARGET_EINVAL;
9785                 }
9786                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9787                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9788                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9789                                TARGET_PR_PAC_APGAKEY);
9790                     if (arg2 == 0) {
9791                         arg2 = all;
9792                     } else if (arg2 & ~all) {
9793                         return -TARGET_EINVAL;
9794                     }
9795                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9796                         arm_init_pauth_key(&env->apia_key);
9797                     }
9798                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9799                         arm_init_pauth_key(&env->apib_key);
9800                     }
9801                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9802                         arm_init_pauth_key(&env->apda_key);
9803                     }
9804                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9805                         arm_init_pauth_key(&env->apdb_key);
9806                     }
9807                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9808                         arm_init_pauth_key(&env->apga_key);
9809                     }
9810                     return 0;
9811                 }
9812             }
9813             return -TARGET_EINVAL;
9814 #endif /* AARCH64 */
9815         case PR_GET_SECCOMP:
9816         case PR_SET_SECCOMP:
9817             /* Disable seccomp to prevent the target disabling syscalls we
9818              * need. */
9819             return -TARGET_EINVAL;
9820         default:
9821             /* Most prctl options have no pointer arguments */
9822             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9823         }
9824         break;
9825 #ifdef TARGET_NR_arch_prctl
9826     case TARGET_NR_arch_prctl:
9827 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9828         return do_arch_prctl(cpu_env, arg1, arg2);
9829 #else
9830 #error unreachable
9831 #endif
9832 #endif
9833 #ifdef TARGET_NR_pread64
9834     case TARGET_NR_pread64:
9835         if (regpairs_aligned(cpu_env, num)) {
9836             arg4 = arg5;
9837             arg5 = arg6;
9838         }
9839         if (arg2 == 0 && arg3 == 0) {
9840             /* Special-case NULL buffer and zero length, which should succeed */
9841             p = 0;
9842         } else {
9843             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9844             if (!p) {
9845                 return -TARGET_EFAULT;
9846             }
9847         }
9848         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9849         unlock_user(p, arg2, ret);
9850         return ret;
9851     case TARGET_NR_pwrite64:
9852         if (regpairs_aligned(cpu_env, num)) {
9853             arg4 = arg5;
9854             arg5 = arg6;
9855         }
9856         if (arg2 == 0 && arg3 == 0) {
9857             /* Special-case NULL buffer and zero length, which should succeed */
9858             p = 0;
9859         } else {
9860             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9861             if (!p) {
9862                 return -TARGET_EFAULT;
9863             }
9864         }
9865         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9866         unlock_user(p, arg2, 0);
9867         return ret;
9868 #endif
9869     case TARGET_NR_getcwd:
9870         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9871             return -TARGET_EFAULT;
9872         ret = get_errno(sys_getcwd1(p, arg2));
9873         unlock_user(p, arg1, ret);
9874         return ret;
9875     case TARGET_NR_capget:
9876     case TARGET_NR_capset:
9877     {
9878         struct target_user_cap_header *target_header;
9879         struct target_user_cap_data *target_data = NULL;
9880         struct __user_cap_header_struct header;
9881         struct __user_cap_data_struct data[2];
9882         struct __user_cap_data_struct *dataptr = NULL;
9883         int i, target_datalen;
9884         int data_items = 1;
9885 
9886         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9887             return -TARGET_EFAULT;
9888         }
9889         header.version = tswap32(target_header->version);
9890         header.pid = tswap32(target_header->pid);
9891 
9892         if (header.version != _LINUX_CAPABILITY_VERSION) {
9893             /* Version 2 and up takes pointer to two user_data structs */
9894             data_items = 2;
9895         }
9896 
9897         target_datalen = sizeof(*target_data) * data_items;
9898 
9899         if (arg2) {
9900             if (num == TARGET_NR_capget) {
9901                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9902             } else {
9903                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9904             }
9905             if (!target_data) {
9906                 unlock_user_struct(target_header, arg1, 0);
9907                 return -TARGET_EFAULT;
9908             }
9909 
9910             if (num == TARGET_NR_capset) {
9911                 for (i = 0; i < data_items; i++) {
9912                     data[i].effective = tswap32(target_data[i].effective);
9913                     data[i].permitted = tswap32(target_data[i].permitted);
9914                     data[i].inheritable = tswap32(target_data[i].inheritable);
9915                 }
9916             }
9917 
9918             dataptr = data;
9919         }
9920 
9921         if (num == TARGET_NR_capget) {
9922             ret = get_errno(capget(&header, dataptr));
9923         } else {
9924             ret = get_errno(capset(&header, dataptr));
9925         }
9926 
9927         /* The kernel always updates version for both capget and capset */
9928         target_header->version = tswap32(header.version);
9929         unlock_user_struct(target_header, arg1, 1);
9930 
9931         if (arg2) {
9932             if (num == TARGET_NR_capget) {
9933                 for (i = 0; i < data_items; i++) {
9934                     target_data[i].effective = tswap32(data[i].effective);
9935                     target_data[i].permitted = tswap32(data[i].permitted);
9936                     target_data[i].inheritable = tswap32(data[i].inheritable);
9937                 }
9938                 unlock_user(target_data, arg2, target_datalen);
9939             } else {
9940                 unlock_user(target_data, arg2, 0);
9941             }
9942         }
9943         return ret;
9944     }
9945     case TARGET_NR_sigaltstack:
9946         return do_sigaltstack(arg1, arg2,
9947                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9948 
9949 #ifdef CONFIG_SENDFILE
9950 #ifdef TARGET_NR_sendfile
9951     case TARGET_NR_sendfile:
9952     {
9953         off_t *offp = NULL;
9954         off_t off;
9955         if (arg3) {
9956             ret = get_user_sal(off, arg3);
9957             if (is_error(ret)) {
9958                 return ret;
9959             }
9960             offp = &off;
9961         }
9962         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9963         if (!is_error(ret) && arg3) {
9964             abi_long ret2 = put_user_sal(off, arg3);
9965             if (is_error(ret2)) {
9966                 ret = ret2;
9967             }
9968         }
9969         return ret;
9970     }
9971 #endif
9972 #ifdef TARGET_NR_sendfile64
9973     case TARGET_NR_sendfile64:
9974     {
9975         off_t *offp = NULL;
9976         off_t off;
9977         if (arg3) {
9978             ret = get_user_s64(off, arg3);
9979             if (is_error(ret)) {
9980                 return ret;
9981             }
9982             offp = &off;
9983         }
9984         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9985         if (!is_error(ret) && arg3) {
9986             abi_long ret2 = put_user_s64(off, arg3);
9987             if (is_error(ret2)) {
9988                 ret = ret2;
9989             }
9990         }
9991         return ret;
9992     }
9993 #endif
9994 #endif
9995 #ifdef TARGET_NR_vfork
9996     case TARGET_NR_vfork:
9997         return get_errno(do_fork(cpu_env,
9998                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9999                          0, 0, 0, 0));
10000 #endif
10001 #ifdef TARGET_NR_ugetrlimit
10002     case TARGET_NR_ugetrlimit:
10003     {
10004 	struct rlimit rlim;
10005 	int resource = target_to_host_resource(arg1);
10006 	ret = get_errno(getrlimit(resource, &rlim));
10007 	if (!is_error(ret)) {
10008 	    struct target_rlimit *target_rlim;
10009             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10010                 return -TARGET_EFAULT;
10011 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10012 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10013             unlock_user_struct(target_rlim, arg2, 1);
10014 	}
10015         return ret;
10016     }
10017 #endif
10018 #ifdef TARGET_NR_truncate64
10019     case TARGET_NR_truncate64:
10020         if (!(p = lock_user_string(arg1)))
10021             return -TARGET_EFAULT;
10022 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10023         unlock_user(p, arg1, 0);
10024         return ret;
10025 #endif
10026 #ifdef TARGET_NR_ftruncate64
10027     case TARGET_NR_ftruncate64:
10028         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10029 #endif
10030 #ifdef TARGET_NR_stat64
10031     case TARGET_NR_stat64:
10032         if (!(p = lock_user_string(arg1))) {
10033             return -TARGET_EFAULT;
10034         }
10035         ret = get_errno(stat(path(p), &st));
10036         unlock_user(p, arg1, 0);
10037         if (!is_error(ret))
10038             ret = host_to_target_stat64(cpu_env, arg2, &st);
10039         return ret;
10040 #endif
10041 #ifdef TARGET_NR_lstat64
10042     case TARGET_NR_lstat64:
10043         if (!(p = lock_user_string(arg1))) {
10044             return -TARGET_EFAULT;
10045         }
10046         ret = get_errno(lstat(path(p), &st));
10047         unlock_user(p, arg1, 0);
10048         if (!is_error(ret))
10049             ret = host_to_target_stat64(cpu_env, arg2, &st);
10050         return ret;
10051 #endif
10052 #ifdef TARGET_NR_fstat64
10053     case TARGET_NR_fstat64:
10054         ret = get_errno(fstat(arg1, &st));
10055         if (!is_error(ret))
10056             ret = host_to_target_stat64(cpu_env, arg2, &st);
10057         return ret;
10058 #endif
10059 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10060 #ifdef TARGET_NR_fstatat64
10061     case TARGET_NR_fstatat64:
10062 #endif
10063 #ifdef TARGET_NR_newfstatat
10064     case TARGET_NR_newfstatat:
10065 #endif
10066         if (!(p = lock_user_string(arg2))) {
10067             return -TARGET_EFAULT;
10068         }
10069         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10070         unlock_user(p, arg2, 0);
10071         if (!is_error(ret))
10072             ret = host_to_target_stat64(cpu_env, arg3, &st);
10073         return ret;
10074 #endif
10075 #ifdef TARGET_NR_lchown
10076     case TARGET_NR_lchown:
10077         if (!(p = lock_user_string(arg1)))
10078             return -TARGET_EFAULT;
10079         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10080         unlock_user(p, arg1, 0);
10081         return ret;
10082 #endif
10083 #ifdef TARGET_NR_getuid
10084     case TARGET_NR_getuid:
10085         return get_errno(high2lowuid(getuid()));
10086 #endif
10087 #ifdef TARGET_NR_getgid
10088     case TARGET_NR_getgid:
10089         return get_errno(high2lowgid(getgid()));
10090 #endif
10091 #ifdef TARGET_NR_geteuid
10092     case TARGET_NR_geteuid:
10093         return get_errno(high2lowuid(geteuid()));
10094 #endif
10095 #ifdef TARGET_NR_getegid
10096     case TARGET_NR_getegid:
10097         return get_errno(high2lowgid(getegid()));
10098 #endif
10099     case TARGET_NR_setreuid:
10100         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10101     case TARGET_NR_setregid:
10102         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10103     case TARGET_NR_getgroups:
10104         {
10105             int gidsetsize = arg1;
10106             target_id *target_grouplist;
10107             gid_t *grouplist;
10108             int i;
10109 
10110             grouplist = alloca(gidsetsize * sizeof(gid_t));
10111             ret = get_errno(getgroups(gidsetsize, grouplist));
10112             if (gidsetsize == 0)
10113                 return ret;
10114             if (!is_error(ret)) {
10115                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10116                 if (!target_grouplist)
10117                     return -TARGET_EFAULT;
10118                 for(i = 0;i < ret; i++)
10119                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10120                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10121             }
10122         }
10123         return ret;
10124     case TARGET_NR_setgroups:
10125         {
10126             int gidsetsize = arg1;
10127             target_id *target_grouplist;
10128             gid_t *grouplist = NULL;
10129             int i;
10130             if (gidsetsize) {
10131                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10132                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10133                 if (!target_grouplist) {
10134                     return -TARGET_EFAULT;
10135                 }
10136                 for (i = 0; i < gidsetsize; i++) {
10137                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10138                 }
10139                 unlock_user(target_grouplist, arg2, 0);
10140             }
10141             return get_errno(setgroups(gidsetsize, grouplist));
10142         }
10143     case TARGET_NR_fchown:
10144         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10145 #if defined(TARGET_NR_fchownat)
10146     case TARGET_NR_fchownat:
10147         if (!(p = lock_user_string(arg2)))
10148             return -TARGET_EFAULT;
10149         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10150                                  low2highgid(arg4), arg5));
10151         unlock_user(p, arg2, 0);
10152         return ret;
10153 #endif
10154 #ifdef TARGET_NR_setresuid
10155     case TARGET_NR_setresuid:
10156         return get_errno(sys_setresuid(low2highuid(arg1),
10157                                        low2highuid(arg2),
10158                                        low2highuid(arg3)));
10159 #endif
10160 #ifdef TARGET_NR_getresuid
10161     case TARGET_NR_getresuid:
10162         {
10163             uid_t ruid, euid, suid;
10164             ret = get_errno(getresuid(&ruid, &euid, &suid));
10165             if (!is_error(ret)) {
10166                 if (put_user_id(high2lowuid(ruid), arg1)
10167                     || put_user_id(high2lowuid(euid), arg2)
10168                     || put_user_id(high2lowuid(suid), arg3))
10169                     return -TARGET_EFAULT;
10170             }
10171         }
10172         return ret;
10173 #endif
10174 #ifdef TARGET_NR_getresgid
10175     case TARGET_NR_setresgid:
10176         return get_errno(sys_setresgid(low2highgid(arg1),
10177                                        low2highgid(arg2),
10178                                        low2highgid(arg3)));
10179 #endif
10180 #ifdef TARGET_NR_getresgid
10181     case TARGET_NR_getresgid:
10182         {
10183             gid_t rgid, egid, sgid;
10184             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10185             if (!is_error(ret)) {
10186                 if (put_user_id(high2lowgid(rgid), arg1)
10187                     || put_user_id(high2lowgid(egid), arg2)
10188                     || put_user_id(high2lowgid(sgid), arg3))
10189                     return -TARGET_EFAULT;
10190             }
10191         }
10192         return ret;
10193 #endif
10194 #ifdef TARGET_NR_chown
10195     case TARGET_NR_chown:
10196         if (!(p = lock_user_string(arg1)))
10197             return -TARGET_EFAULT;
10198         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10199         unlock_user(p, arg1, 0);
10200         return ret;
10201 #endif
10202     case TARGET_NR_setuid:
10203         return get_errno(sys_setuid(low2highuid(arg1)));
10204     case TARGET_NR_setgid:
10205         return get_errno(sys_setgid(low2highgid(arg1)));
10206     case TARGET_NR_setfsuid:
10207         return get_errno(setfsuid(arg1));
10208     case TARGET_NR_setfsgid:
10209         return get_errno(setfsgid(arg1));
10210 
10211 #ifdef TARGET_NR_lchown32
10212     case TARGET_NR_lchown32:
10213         if (!(p = lock_user_string(arg1)))
10214             return -TARGET_EFAULT;
10215         ret = get_errno(lchown(p, arg2, arg3));
10216         unlock_user(p, arg1, 0);
10217         return ret;
10218 #endif
10219 #ifdef TARGET_NR_getuid32
10220     case TARGET_NR_getuid32:
10221         return get_errno(getuid());
10222 #endif
10223 
10224 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10225    /* Alpha specific */
10226     case TARGET_NR_getxuid:
10227          {
10228             uid_t euid;
10229             euid=geteuid();
10230             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10231          }
10232         return get_errno(getuid());
10233 #endif
10234 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10235    /* Alpha specific */
10236     case TARGET_NR_getxgid:
10237          {
10238             uid_t egid;
10239             egid=getegid();
10240             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10241          }
10242         return get_errno(getgid());
10243 #endif
10244 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10245     /* Alpha specific */
10246     case TARGET_NR_osf_getsysinfo:
10247         ret = -TARGET_EOPNOTSUPP;
10248         switch (arg1) {
10249           case TARGET_GSI_IEEE_FP_CONTROL:
10250             {
10251                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10252                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10253 
10254                 swcr &= ~SWCR_STATUS_MASK;
10255                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10256 
10257                 if (put_user_u64 (swcr, arg2))
10258                         return -TARGET_EFAULT;
10259                 ret = 0;
10260             }
10261             break;
10262 
10263           /* case GSI_IEEE_STATE_AT_SIGNAL:
10264              -- Not implemented in linux kernel.
10265              case GSI_UACPROC:
10266              -- Retrieves current unaligned access state; not much used.
10267              case GSI_PROC_TYPE:
10268              -- Retrieves implver information; surely not used.
10269              case GSI_GET_HWRPB:
10270              -- Grabs a copy of the HWRPB; surely not used.
10271           */
10272         }
10273         return ret;
10274 #endif
10275 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10276     /* Alpha specific */
10277     case TARGET_NR_osf_setsysinfo:
10278         ret = -TARGET_EOPNOTSUPP;
10279         switch (arg1) {
10280           case TARGET_SSI_IEEE_FP_CONTROL:
10281             {
10282                 uint64_t swcr, fpcr;
10283 
10284                 if (get_user_u64 (swcr, arg2)) {
10285                     return -TARGET_EFAULT;
10286                 }
10287 
10288                 /*
10289                  * The kernel calls swcr_update_status to update the
10290                  * status bits from the fpcr at every point that it
10291                  * could be queried.  Therefore, we store the status
10292                  * bits only in FPCR.
10293                  */
10294                 ((CPUAlphaState *)cpu_env)->swcr
10295                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10296 
10297                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10298                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10299                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10300                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10301                 ret = 0;
10302             }
10303             break;
10304 
10305           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10306             {
10307                 uint64_t exc, fpcr, fex;
10308 
10309                 if (get_user_u64(exc, arg2)) {
10310                     return -TARGET_EFAULT;
10311                 }
10312                 exc &= SWCR_STATUS_MASK;
10313                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10314 
10315                 /* Old exceptions are not signaled.  */
10316                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10317                 fex = exc & ~fex;
10318                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10319                 fex &= ((CPUArchState *)cpu_env)->swcr;
10320 
10321                 /* Update the hardware fpcr.  */
10322                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10323                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10324 
10325                 if (fex) {
10326                     int si_code = TARGET_FPE_FLTUNK;
10327                     target_siginfo_t info;
10328 
10329                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10330                         si_code = TARGET_FPE_FLTUND;
10331                     }
10332                     if (fex & SWCR_TRAP_ENABLE_INE) {
10333                         si_code = TARGET_FPE_FLTRES;
10334                     }
10335                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10336                         si_code = TARGET_FPE_FLTUND;
10337                     }
10338                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10339                         si_code = TARGET_FPE_FLTOVF;
10340                     }
10341                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10342                         si_code = TARGET_FPE_FLTDIV;
10343                     }
10344                     if (fex & SWCR_TRAP_ENABLE_INV) {
10345                         si_code = TARGET_FPE_FLTINV;
10346                     }
10347 
10348                     info.si_signo = SIGFPE;
10349                     info.si_errno = 0;
10350                     info.si_code = si_code;
10351                     info._sifields._sigfault._addr
10352                         = ((CPUArchState *)cpu_env)->pc;
10353                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10354                                  QEMU_SI_FAULT, &info);
10355                 }
10356                 ret = 0;
10357             }
10358             break;
10359 
10360           /* case SSI_NVPAIRS:
10361              -- Used with SSIN_UACPROC to enable unaligned accesses.
10362              case SSI_IEEE_STATE_AT_SIGNAL:
10363              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10364              -- Not implemented in linux kernel
10365           */
10366         }
10367         return ret;
10368 #endif
10369 #ifdef TARGET_NR_osf_sigprocmask
10370     /* Alpha specific.  */
10371     case TARGET_NR_osf_sigprocmask:
10372         {
10373             abi_ulong mask;
10374             int how;
10375             sigset_t set, oldset;
10376 
10377             switch(arg1) {
10378             case TARGET_SIG_BLOCK:
10379                 how = SIG_BLOCK;
10380                 break;
10381             case TARGET_SIG_UNBLOCK:
10382                 how = SIG_UNBLOCK;
10383                 break;
10384             case TARGET_SIG_SETMASK:
10385                 how = SIG_SETMASK;
10386                 break;
10387             default:
10388                 return -TARGET_EINVAL;
10389             }
10390             mask = arg2;
10391             target_to_host_old_sigset(&set, &mask);
10392             ret = do_sigprocmask(how, &set, &oldset);
10393             if (!ret) {
10394                 host_to_target_old_sigset(&mask, &oldset);
10395                 ret = mask;
10396             }
10397         }
10398         return ret;
10399 #endif
10400 
10401 #ifdef TARGET_NR_getgid32
10402     case TARGET_NR_getgid32:
10403         return get_errno(getgid());
10404 #endif
10405 #ifdef TARGET_NR_geteuid32
10406     case TARGET_NR_geteuid32:
10407         return get_errno(geteuid());
10408 #endif
10409 #ifdef TARGET_NR_getegid32
10410     case TARGET_NR_getegid32:
10411         return get_errno(getegid());
10412 #endif
10413 #ifdef TARGET_NR_setreuid32
10414     case TARGET_NR_setreuid32:
10415         return get_errno(setreuid(arg1, arg2));
10416 #endif
10417 #ifdef TARGET_NR_setregid32
10418     case TARGET_NR_setregid32:
10419         return get_errno(setregid(arg1, arg2));
10420 #endif
10421 #ifdef TARGET_NR_getgroups32
10422     case TARGET_NR_getgroups32:
10423         {
10424             int gidsetsize = arg1;
10425             uint32_t *target_grouplist;
10426             gid_t *grouplist;
10427             int i;
10428 
10429             grouplist = alloca(gidsetsize * sizeof(gid_t));
10430             ret = get_errno(getgroups(gidsetsize, grouplist));
10431             if (gidsetsize == 0)
10432                 return ret;
10433             if (!is_error(ret)) {
10434                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10435                 if (!target_grouplist) {
10436                     return -TARGET_EFAULT;
10437                 }
10438                 for(i = 0;i < ret; i++)
10439                     target_grouplist[i] = tswap32(grouplist[i]);
10440                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10441             }
10442         }
10443         return ret;
10444 #endif
10445 #ifdef TARGET_NR_setgroups32
10446     case TARGET_NR_setgroups32:
10447         {
10448             int gidsetsize = arg1;
10449             uint32_t *target_grouplist;
10450             gid_t *grouplist;
10451             int i;
10452 
10453             grouplist = alloca(gidsetsize * sizeof(gid_t));
10454             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10455             if (!target_grouplist) {
10456                 return -TARGET_EFAULT;
10457             }
10458             for(i = 0;i < gidsetsize; i++)
10459                 grouplist[i] = tswap32(target_grouplist[i]);
10460             unlock_user(target_grouplist, arg2, 0);
10461             return get_errno(setgroups(gidsetsize, grouplist));
10462         }
10463 #endif
10464 #ifdef TARGET_NR_fchown32
10465     case TARGET_NR_fchown32:
10466         return get_errno(fchown(arg1, arg2, arg3));
10467 #endif
10468 #ifdef TARGET_NR_setresuid32
10469     case TARGET_NR_setresuid32:
10470         return get_errno(sys_setresuid(arg1, arg2, arg3));
10471 #endif
10472 #ifdef TARGET_NR_getresuid32
10473     case TARGET_NR_getresuid32:
10474         {
10475             uid_t ruid, euid, suid;
10476             ret = get_errno(getresuid(&ruid, &euid, &suid));
10477             if (!is_error(ret)) {
10478                 if (put_user_u32(ruid, arg1)
10479                     || put_user_u32(euid, arg2)
10480                     || put_user_u32(suid, arg3))
10481                     return -TARGET_EFAULT;
10482             }
10483         }
10484         return ret;
10485 #endif
10486 #ifdef TARGET_NR_setresgid32
10487     case TARGET_NR_setresgid32:
10488         return get_errno(sys_setresgid(arg1, arg2, arg3));
10489 #endif
10490 #ifdef TARGET_NR_getresgid32
10491     case TARGET_NR_getresgid32:
10492         {
10493             gid_t rgid, egid, sgid;
10494             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10495             if (!is_error(ret)) {
10496                 if (put_user_u32(rgid, arg1)
10497                     || put_user_u32(egid, arg2)
10498                     || put_user_u32(sgid, arg3))
10499                     return -TARGET_EFAULT;
10500             }
10501         }
10502         return ret;
10503 #endif
10504 #ifdef TARGET_NR_chown32
10505     case TARGET_NR_chown32:
10506         if (!(p = lock_user_string(arg1)))
10507             return -TARGET_EFAULT;
10508         ret = get_errno(chown(p, arg2, arg3));
10509         unlock_user(p, arg1, 0);
10510         return ret;
10511 #endif
10512 #ifdef TARGET_NR_setuid32
10513     case TARGET_NR_setuid32:
10514         return get_errno(sys_setuid(arg1));
10515 #endif
10516 #ifdef TARGET_NR_setgid32
10517     case TARGET_NR_setgid32:
10518         return get_errno(sys_setgid(arg1));
10519 #endif
10520 #ifdef TARGET_NR_setfsuid32
10521     case TARGET_NR_setfsuid32:
10522         return get_errno(setfsuid(arg1));
10523 #endif
10524 #ifdef TARGET_NR_setfsgid32
10525     case TARGET_NR_setfsgid32:
10526         return get_errno(setfsgid(arg1));
10527 #endif
10528 #ifdef TARGET_NR_mincore
10529     case TARGET_NR_mincore:
10530         {
10531             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10532             if (!a) {
10533                 return -TARGET_ENOMEM;
10534             }
10535             p = lock_user_string(arg3);
10536             if (!p) {
10537                 ret = -TARGET_EFAULT;
10538             } else {
10539                 ret = get_errno(mincore(a, arg2, p));
10540                 unlock_user(p, arg3, ret);
10541             }
10542             unlock_user(a, arg1, 0);
10543         }
10544         return ret;
10545 #endif
10546 #ifdef TARGET_NR_arm_fadvise64_64
10547     case TARGET_NR_arm_fadvise64_64:
10548         /* arm_fadvise64_64 looks like fadvise64_64 but
10549          * with different argument order: fd, advice, offset, len
10550          * rather than the usual fd, offset, len, advice.
10551          * Note that offset and len are both 64-bit so appear as
10552          * pairs of 32-bit registers.
10553          */
10554         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10555                             target_offset64(arg5, arg6), arg2);
10556         return -host_to_target_errno(ret);
10557 #endif
10558 
10559 #if TARGET_ABI_BITS == 32
10560 
10561 #ifdef TARGET_NR_fadvise64_64
10562     case TARGET_NR_fadvise64_64:
10563 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10564         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10565         ret = arg2;
10566         arg2 = arg3;
10567         arg3 = arg4;
10568         arg4 = arg5;
10569         arg5 = arg6;
10570         arg6 = ret;
10571 #else
10572         /* 6 args: fd, offset (high, low), len (high, low), advice */
10573         if (regpairs_aligned(cpu_env, num)) {
10574             /* offset is in (3,4), len in (5,6) and advice in 7 */
10575             arg2 = arg3;
10576             arg3 = arg4;
10577             arg4 = arg5;
10578             arg5 = arg6;
10579             arg6 = arg7;
10580         }
10581 #endif
10582         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10583                             target_offset64(arg4, arg5), arg6);
10584         return -host_to_target_errno(ret);
10585 #endif
10586 
10587 #ifdef TARGET_NR_fadvise64
10588     case TARGET_NR_fadvise64:
10589         /* 5 args: fd, offset (high, low), len, advice */
10590         if (regpairs_aligned(cpu_env, num)) {
10591             /* offset is in (3,4), len in 5 and advice in 6 */
10592             arg2 = arg3;
10593             arg3 = arg4;
10594             arg4 = arg5;
10595             arg5 = arg6;
10596         }
10597         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10598         return -host_to_target_errno(ret);
10599 #endif
10600 
10601 #else /* not a 32-bit ABI */
10602 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10603 #ifdef TARGET_NR_fadvise64_64
10604     case TARGET_NR_fadvise64_64:
10605 #endif
10606 #ifdef TARGET_NR_fadvise64
10607     case TARGET_NR_fadvise64:
10608 #endif
10609 #ifdef TARGET_S390X
10610         switch (arg4) {
10611         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10612         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10613         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10614         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10615         default: break;
10616         }
10617 #endif
10618         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10619 #endif
10620 #endif /* end of 64-bit ABI fadvise handling */
10621 
10622 #ifdef TARGET_NR_madvise
10623     case TARGET_NR_madvise:
10624         /* A straight passthrough may not be safe because qemu sometimes
10625            turns private file-backed mappings into anonymous mappings.
10626            This will break MADV_DONTNEED.
10627            This is a hint, so ignoring and returning success is ok.  */
10628         return 0;
10629 #endif
10630 #if TARGET_ABI_BITS == 32
10631     case TARGET_NR_fcntl64:
10632     {
10633 	int cmd;
10634 	struct flock64 fl;
10635         from_flock64_fn *copyfrom = copy_from_user_flock64;
10636         to_flock64_fn *copyto = copy_to_user_flock64;
10637 
10638 #ifdef TARGET_ARM
10639         if (!((CPUARMState *)cpu_env)->eabi) {
10640             copyfrom = copy_from_user_oabi_flock64;
10641             copyto = copy_to_user_oabi_flock64;
10642         }
10643 #endif
10644 
10645 	cmd = target_to_host_fcntl_cmd(arg2);
10646         if (cmd == -TARGET_EINVAL) {
10647             return cmd;
10648         }
10649 
10650         switch(arg2) {
10651         case TARGET_F_GETLK64:
10652             ret = copyfrom(&fl, arg3);
10653             if (ret) {
10654                 break;
10655             }
10656             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10657             if (ret == 0) {
10658                 ret = copyto(arg3, &fl);
10659             }
10660 	    break;
10661 
10662         case TARGET_F_SETLK64:
10663         case TARGET_F_SETLKW64:
10664             ret = copyfrom(&fl, arg3);
10665             if (ret) {
10666                 break;
10667             }
10668             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10669 	    break;
10670         default:
10671             ret = do_fcntl(arg1, arg2, arg3);
10672             break;
10673         }
10674         return ret;
10675     }
10676 #endif
10677 #ifdef TARGET_NR_cacheflush
10678     case TARGET_NR_cacheflush:
10679         /* self-modifying code is handled automatically, so nothing needed */
10680         return 0;
10681 #endif
10682 #ifdef TARGET_NR_getpagesize
10683     case TARGET_NR_getpagesize:
10684         return TARGET_PAGE_SIZE;
10685 #endif
10686     case TARGET_NR_gettid:
10687         return get_errno(sys_gettid());
10688 #ifdef TARGET_NR_readahead
10689     case TARGET_NR_readahead:
10690 #if TARGET_ABI_BITS == 32
10691         if (regpairs_aligned(cpu_env, num)) {
10692             arg2 = arg3;
10693             arg3 = arg4;
10694             arg4 = arg5;
10695         }
10696         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10697 #else
10698         ret = get_errno(readahead(arg1, arg2, arg3));
10699 #endif
10700         return ret;
10701 #endif
10702 #ifdef CONFIG_ATTR
10703 #ifdef TARGET_NR_setxattr
10704     case TARGET_NR_listxattr:
10705     case TARGET_NR_llistxattr:
10706     {
10707         void *p, *b = 0;
10708         if (arg2) {
10709             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10710             if (!b) {
10711                 return -TARGET_EFAULT;
10712             }
10713         }
10714         p = lock_user_string(arg1);
10715         if (p) {
10716             if (num == TARGET_NR_listxattr) {
10717                 ret = get_errno(listxattr(p, b, arg3));
10718             } else {
10719                 ret = get_errno(llistxattr(p, b, arg3));
10720             }
10721         } else {
10722             ret = -TARGET_EFAULT;
10723         }
10724         unlock_user(p, arg1, 0);
10725         unlock_user(b, arg2, arg3);
10726         return ret;
10727     }
10728     case TARGET_NR_flistxattr:
10729     {
10730         void *b = 0;
10731         if (arg2) {
10732             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10733             if (!b) {
10734                 return -TARGET_EFAULT;
10735             }
10736         }
10737         ret = get_errno(flistxattr(arg1, b, arg3));
10738         unlock_user(b, arg2, arg3);
10739         return ret;
10740     }
10741     case TARGET_NR_setxattr:
10742     case TARGET_NR_lsetxattr:
10743         {
10744             void *p, *n, *v = 0;
10745             if (arg3) {
10746                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10747                 if (!v) {
10748                     return -TARGET_EFAULT;
10749                 }
10750             }
10751             p = lock_user_string(arg1);
10752             n = lock_user_string(arg2);
10753             if (p && n) {
10754                 if (num == TARGET_NR_setxattr) {
10755                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10756                 } else {
10757                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10758                 }
10759             } else {
10760                 ret = -TARGET_EFAULT;
10761             }
10762             unlock_user(p, arg1, 0);
10763             unlock_user(n, arg2, 0);
10764             unlock_user(v, arg3, 0);
10765         }
10766         return ret;
10767     case TARGET_NR_fsetxattr:
10768         {
10769             void *n, *v = 0;
10770             if (arg3) {
10771                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10772                 if (!v) {
10773                     return -TARGET_EFAULT;
10774                 }
10775             }
10776             n = lock_user_string(arg2);
10777             if (n) {
10778                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10779             } else {
10780                 ret = -TARGET_EFAULT;
10781             }
10782             unlock_user(n, arg2, 0);
10783             unlock_user(v, arg3, 0);
10784         }
10785         return ret;
10786     case TARGET_NR_getxattr:
10787     case TARGET_NR_lgetxattr:
10788         {
10789             void *p, *n, *v = 0;
10790             if (arg3) {
10791                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10792                 if (!v) {
10793                     return -TARGET_EFAULT;
10794                 }
10795             }
10796             p = lock_user_string(arg1);
10797             n = lock_user_string(arg2);
10798             if (p && n) {
10799                 if (num == TARGET_NR_getxattr) {
10800                     ret = get_errno(getxattr(p, n, v, arg4));
10801                 } else {
10802                     ret = get_errno(lgetxattr(p, n, v, arg4));
10803                 }
10804             } else {
10805                 ret = -TARGET_EFAULT;
10806             }
10807             unlock_user(p, arg1, 0);
10808             unlock_user(n, arg2, 0);
10809             unlock_user(v, arg3, arg4);
10810         }
10811         return ret;
10812     case TARGET_NR_fgetxattr:
10813         {
10814             void *n, *v = 0;
10815             if (arg3) {
10816                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10817                 if (!v) {
10818                     return -TARGET_EFAULT;
10819                 }
10820             }
10821             n = lock_user_string(arg2);
10822             if (n) {
10823                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10824             } else {
10825                 ret = -TARGET_EFAULT;
10826             }
10827             unlock_user(n, arg2, 0);
10828             unlock_user(v, arg3, arg4);
10829         }
10830         return ret;
10831     case TARGET_NR_removexattr:
10832     case TARGET_NR_lremovexattr:
10833         {
10834             void *p, *n;
10835             p = lock_user_string(arg1);
10836             n = lock_user_string(arg2);
10837             if (p && n) {
10838                 if (num == TARGET_NR_removexattr) {
10839                     ret = get_errno(removexattr(p, n));
10840                 } else {
10841                     ret = get_errno(lremovexattr(p, n));
10842                 }
10843             } else {
10844                 ret = -TARGET_EFAULT;
10845             }
10846             unlock_user(p, arg1, 0);
10847             unlock_user(n, arg2, 0);
10848         }
10849         return ret;
10850     case TARGET_NR_fremovexattr:
10851         {
10852             void *n;
10853             n = lock_user_string(arg2);
10854             if (n) {
10855                 ret = get_errno(fremovexattr(arg1, n));
10856             } else {
10857                 ret = -TARGET_EFAULT;
10858             }
10859             unlock_user(n, arg2, 0);
10860         }
10861         return ret;
10862 #endif
10863 #endif /* CONFIG_ATTR */
10864 #ifdef TARGET_NR_set_thread_area
10865     case TARGET_NR_set_thread_area:
10866 #if defined(TARGET_MIPS)
10867       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10868       return 0;
10869 #elif defined(TARGET_CRIS)
10870       if (arg1 & 0xff)
10871           ret = -TARGET_EINVAL;
10872       else {
10873           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10874           ret = 0;
10875       }
10876       return ret;
10877 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10878       return do_set_thread_area(cpu_env, arg1);
10879 #elif defined(TARGET_M68K)
10880       {
10881           TaskState *ts = cpu->opaque;
10882           ts->tp_value = arg1;
10883           return 0;
10884       }
10885 #else
10886       return -TARGET_ENOSYS;
10887 #endif
10888 #endif
10889 #ifdef TARGET_NR_get_thread_area
10890     case TARGET_NR_get_thread_area:
10891 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10892         return do_get_thread_area(cpu_env, arg1);
10893 #elif defined(TARGET_M68K)
10894         {
10895             TaskState *ts = cpu->opaque;
10896             return ts->tp_value;
10897         }
10898 #else
10899         return -TARGET_ENOSYS;
10900 #endif
10901 #endif
10902 #ifdef TARGET_NR_getdomainname
10903     case TARGET_NR_getdomainname:
10904         return -TARGET_ENOSYS;
10905 #endif
10906 
10907 #ifdef TARGET_NR_clock_settime
10908     case TARGET_NR_clock_settime:
10909     {
10910         struct timespec ts;
10911 
10912         ret = target_to_host_timespec(&ts, arg2);
10913         if (!is_error(ret)) {
10914             ret = get_errno(clock_settime(arg1, &ts));
10915         }
10916         return ret;
10917     }
10918 #endif
10919 #ifdef TARGET_NR_clock_gettime
10920     case TARGET_NR_clock_gettime:
10921     {
10922         struct timespec ts;
10923         ret = get_errno(clock_gettime(arg1, &ts));
10924         if (!is_error(ret)) {
10925             ret = host_to_target_timespec(arg2, &ts);
10926         }
10927         return ret;
10928     }
10929 #endif
10930 #ifdef TARGET_NR_clock_getres
10931     case TARGET_NR_clock_getres:
10932     {
10933         struct timespec ts;
10934         ret = get_errno(clock_getres(arg1, &ts));
10935         if (!is_error(ret)) {
10936             host_to_target_timespec(arg2, &ts);
10937         }
10938         return ret;
10939     }
10940 #endif
10941 #ifdef TARGET_NR_clock_nanosleep
10942     case TARGET_NR_clock_nanosleep:
10943     {
10944         struct timespec ts;
10945         target_to_host_timespec(&ts, arg3);
10946         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10947                                              &ts, arg4 ? &ts : NULL));
10948         if (arg4)
10949             host_to_target_timespec(arg4, &ts);
10950 
10951 #if defined(TARGET_PPC)
10952         /* clock_nanosleep is odd in that it returns positive errno values.
10953          * On PPC, CR0 bit 3 should be set in such a situation. */
10954         if (ret && ret != -TARGET_ERESTARTSYS) {
10955             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10956         }
10957 #endif
10958         return ret;
10959     }
10960 #endif
10961 
10962 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10963     case TARGET_NR_set_tid_address:
10964         return get_errno(set_tid_address((int *)g2h(arg1)));
10965 #endif
10966 
10967     case TARGET_NR_tkill:
10968         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10969 
10970     case TARGET_NR_tgkill:
10971         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10972                          target_to_host_signal(arg3)));
10973 
10974 #ifdef TARGET_NR_set_robust_list
10975     case TARGET_NR_set_robust_list:
10976     case TARGET_NR_get_robust_list:
10977         /* The ABI for supporting robust futexes has userspace pass
10978          * the kernel a pointer to a linked list which is updated by
10979          * userspace after the syscall; the list is walked by the kernel
10980          * when the thread exits. Since the linked list in QEMU guest
10981          * memory isn't a valid linked list for the host and we have
10982          * no way to reliably intercept the thread-death event, we can't
10983          * support these. Silently return ENOSYS so that guest userspace
10984          * falls back to a non-robust futex implementation (which should
10985          * be OK except in the corner case of the guest crashing while
10986          * holding a mutex that is shared with another process via
10987          * shared memory).
10988          */
10989         return -TARGET_ENOSYS;
10990 #endif
10991 
10992 #if defined(TARGET_NR_utimensat)
10993     case TARGET_NR_utimensat:
10994         {
10995             struct timespec *tsp, ts[2];
10996             if (!arg3) {
10997                 tsp = NULL;
10998             } else {
10999                 target_to_host_timespec(ts, arg3);
11000                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11001                 tsp = ts;
11002             }
11003             if (!arg2)
11004                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11005             else {
11006                 if (!(p = lock_user_string(arg2))) {
11007                     return -TARGET_EFAULT;
11008                 }
11009                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11010                 unlock_user(p, arg2, 0);
11011             }
11012         }
11013         return ret;
11014 #endif
11015     case TARGET_NR_futex:
11016         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11017 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11018     case TARGET_NR_inotify_init:
11019         ret = get_errno(sys_inotify_init());
11020         if (ret >= 0) {
11021             fd_trans_register(ret, &target_inotify_trans);
11022         }
11023         return ret;
11024 #endif
11025 #ifdef CONFIG_INOTIFY1
11026 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11027     case TARGET_NR_inotify_init1:
11028         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11029                                           fcntl_flags_tbl)));
11030         if (ret >= 0) {
11031             fd_trans_register(ret, &target_inotify_trans);
11032         }
11033         return ret;
11034 #endif
11035 #endif
11036 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11037     case TARGET_NR_inotify_add_watch:
11038         p = lock_user_string(arg2);
11039         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11040         unlock_user(p, arg2, 0);
11041         return ret;
11042 #endif
11043 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11044     case TARGET_NR_inotify_rm_watch:
11045         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11046 #endif
11047 
11048 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11049     case TARGET_NR_mq_open:
11050         {
11051             struct mq_attr posix_mq_attr;
11052             struct mq_attr *pposix_mq_attr;
11053             int host_flags;
11054 
11055             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11056             pposix_mq_attr = NULL;
11057             if (arg4) {
11058                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11059                     return -TARGET_EFAULT;
11060                 }
11061                 pposix_mq_attr = &posix_mq_attr;
11062             }
11063             p = lock_user_string(arg1 - 1);
11064             if (!p) {
11065                 return -TARGET_EFAULT;
11066             }
11067             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11068             unlock_user (p, arg1, 0);
11069         }
11070         return ret;
11071 
11072     case TARGET_NR_mq_unlink:
11073         p = lock_user_string(arg1 - 1);
11074         if (!p) {
11075             return -TARGET_EFAULT;
11076         }
11077         ret = get_errno(mq_unlink(p));
11078         unlock_user (p, arg1, 0);
11079         return ret;
11080 
11081     case TARGET_NR_mq_timedsend:
11082         {
11083             struct timespec ts;
11084 
11085             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11086             if (arg5 != 0) {
11087                 target_to_host_timespec(&ts, arg5);
11088                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11089                 host_to_target_timespec(arg5, &ts);
11090             } else {
11091                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11092             }
11093             unlock_user (p, arg2, arg3);
11094         }
11095         return ret;
11096 
11097     case TARGET_NR_mq_timedreceive:
11098         {
11099             struct timespec ts;
11100             unsigned int prio;
11101 
11102             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11103             if (arg5 != 0) {
11104                 target_to_host_timespec(&ts, arg5);
11105                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11106                                                      &prio, &ts));
11107                 host_to_target_timespec(arg5, &ts);
11108             } else {
11109                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11110                                                      &prio, NULL));
11111             }
11112             unlock_user (p, arg2, arg3);
11113             if (arg4 != 0)
11114                 put_user_u32(prio, arg4);
11115         }
11116         return ret;
11117 
11118     /* Not implemented for now... */
11119 /*     case TARGET_NR_mq_notify: */
11120 /*         break; */
11121 
11122     case TARGET_NR_mq_getsetattr:
11123         {
11124             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11125             ret = 0;
11126             if (arg2 != 0) {
11127                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11128                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11129                                            &posix_mq_attr_out));
11130             } else if (arg3 != 0) {
11131                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11132             }
11133             if (ret == 0 && arg3 != 0) {
11134                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11135             }
11136         }
11137         return ret;
11138 #endif
11139 
11140 #ifdef CONFIG_SPLICE
11141 #ifdef TARGET_NR_tee
11142     case TARGET_NR_tee:
11143         {
11144             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11145         }
11146         return ret;
11147 #endif
11148 #ifdef TARGET_NR_splice
11149     case TARGET_NR_splice:
11150         {
11151             loff_t loff_in, loff_out;
11152             loff_t *ploff_in = NULL, *ploff_out = NULL;
11153             if (arg2) {
11154                 if (get_user_u64(loff_in, arg2)) {
11155                     return -TARGET_EFAULT;
11156                 }
11157                 ploff_in = &loff_in;
11158             }
11159             if (arg4) {
11160                 if (get_user_u64(loff_out, arg4)) {
11161                     return -TARGET_EFAULT;
11162                 }
11163                 ploff_out = &loff_out;
11164             }
11165             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11166             if (arg2) {
11167                 if (put_user_u64(loff_in, arg2)) {
11168                     return -TARGET_EFAULT;
11169                 }
11170             }
11171             if (arg4) {
11172                 if (put_user_u64(loff_out, arg4)) {
11173                     return -TARGET_EFAULT;
11174                 }
11175             }
11176         }
11177         return ret;
11178 #endif
11179 #ifdef TARGET_NR_vmsplice
11180 	case TARGET_NR_vmsplice:
11181         {
11182             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11183             if (vec != NULL) {
11184                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11185                 unlock_iovec(vec, arg2, arg3, 0);
11186             } else {
11187                 ret = -host_to_target_errno(errno);
11188             }
11189         }
11190         return ret;
11191 #endif
11192 #endif /* CONFIG_SPLICE */
11193 #ifdef CONFIG_EVENTFD
11194 #if defined(TARGET_NR_eventfd)
11195     case TARGET_NR_eventfd:
11196         ret = get_errno(eventfd(arg1, 0));
11197         if (ret >= 0) {
11198             fd_trans_register(ret, &target_eventfd_trans);
11199         }
11200         return ret;
11201 #endif
11202 #if defined(TARGET_NR_eventfd2)
11203     case TARGET_NR_eventfd2:
11204     {
11205         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11206         if (arg2 & TARGET_O_NONBLOCK) {
11207             host_flags |= O_NONBLOCK;
11208         }
11209         if (arg2 & TARGET_O_CLOEXEC) {
11210             host_flags |= O_CLOEXEC;
11211         }
11212         ret = get_errno(eventfd(arg1, host_flags));
11213         if (ret >= 0) {
11214             fd_trans_register(ret, &target_eventfd_trans);
11215         }
11216         return ret;
11217     }
11218 #endif
11219 #endif /* CONFIG_EVENTFD  */
11220 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11221     case TARGET_NR_fallocate:
11222 #if TARGET_ABI_BITS == 32
11223         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11224                                   target_offset64(arg5, arg6)));
11225 #else
11226         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11227 #endif
11228         return ret;
11229 #endif
11230 #if defined(CONFIG_SYNC_FILE_RANGE)
11231 #if defined(TARGET_NR_sync_file_range)
11232     case TARGET_NR_sync_file_range:
11233 #if TARGET_ABI_BITS == 32
11234 #if defined(TARGET_MIPS)
11235         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11236                                         target_offset64(arg5, arg6), arg7));
11237 #else
11238         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11239                                         target_offset64(arg4, arg5), arg6));
11240 #endif /* !TARGET_MIPS */
11241 #else
11242         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11243 #endif
11244         return ret;
11245 #endif
11246 #if defined(TARGET_NR_sync_file_range2)
11247     case TARGET_NR_sync_file_range2:
11248         /* This is like sync_file_range but the arguments are reordered */
11249 #if TARGET_ABI_BITS == 32
11250         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11251                                         target_offset64(arg5, arg6), arg2));
11252 #else
11253         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11254 #endif
11255         return ret;
11256 #endif
11257 #endif
11258 #if defined(TARGET_NR_signalfd4)
11259     case TARGET_NR_signalfd4:
11260         return do_signalfd4(arg1, arg2, arg4);
11261 #endif
11262 #if defined(TARGET_NR_signalfd)
11263     case TARGET_NR_signalfd:
11264         return do_signalfd4(arg1, arg2, 0);
11265 #endif
11266 #if defined(CONFIG_EPOLL)
11267 #if defined(TARGET_NR_epoll_create)
11268     case TARGET_NR_epoll_create:
11269         return get_errno(epoll_create(arg1));
11270 #endif
11271 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11272     case TARGET_NR_epoll_create1:
11273         return get_errno(epoll_create1(arg1));
11274 #endif
11275 #if defined(TARGET_NR_epoll_ctl)
11276     case TARGET_NR_epoll_ctl:
11277     {
11278         struct epoll_event ep;
11279         struct epoll_event *epp = 0;
11280         if (arg4) {
11281             struct target_epoll_event *target_ep;
11282             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11283                 return -TARGET_EFAULT;
11284             }
11285             ep.events = tswap32(target_ep->events);
11286             /* The epoll_data_t union is just opaque data to the kernel,
11287              * so we transfer all 64 bits across and need not worry what
11288              * actual data type it is.
11289              */
11290             ep.data.u64 = tswap64(target_ep->data.u64);
11291             unlock_user_struct(target_ep, arg4, 0);
11292             epp = &ep;
11293         }
11294         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11295     }
11296 #endif
11297 
11298 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11299 #if defined(TARGET_NR_epoll_wait)
11300     case TARGET_NR_epoll_wait:
11301 #endif
11302 #if defined(TARGET_NR_epoll_pwait)
11303     case TARGET_NR_epoll_pwait:
11304 #endif
11305     {
11306         struct target_epoll_event *target_ep;
11307         struct epoll_event *ep;
11308         int epfd = arg1;
11309         int maxevents = arg3;
11310         int timeout = arg4;
11311 
11312         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11313             return -TARGET_EINVAL;
11314         }
11315 
11316         target_ep = lock_user(VERIFY_WRITE, arg2,
11317                               maxevents * sizeof(struct target_epoll_event), 1);
11318         if (!target_ep) {
11319             return -TARGET_EFAULT;
11320         }
11321 
11322         ep = g_try_new(struct epoll_event, maxevents);
11323         if (!ep) {
11324             unlock_user(target_ep, arg2, 0);
11325             return -TARGET_ENOMEM;
11326         }
11327 
11328         switch (num) {
11329 #if defined(TARGET_NR_epoll_pwait)
11330         case TARGET_NR_epoll_pwait:
11331         {
11332             target_sigset_t *target_set;
11333             sigset_t _set, *set = &_set;
11334 
11335             if (arg5) {
11336                 if (arg6 != sizeof(target_sigset_t)) {
11337                     ret = -TARGET_EINVAL;
11338                     break;
11339                 }
11340 
11341                 target_set = lock_user(VERIFY_READ, arg5,
11342                                        sizeof(target_sigset_t), 1);
11343                 if (!target_set) {
11344                     ret = -TARGET_EFAULT;
11345                     break;
11346                 }
11347                 target_to_host_sigset(set, target_set);
11348                 unlock_user(target_set, arg5, 0);
11349             } else {
11350                 set = NULL;
11351             }
11352 
11353             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11354                                              set, SIGSET_T_SIZE));
11355             break;
11356         }
11357 #endif
11358 #if defined(TARGET_NR_epoll_wait)
11359         case TARGET_NR_epoll_wait:
11360             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11361                                              NULL, 0));
11362             break;
11363 #endif
11364         default:
11365             ret = -TARGET_ENOSYS;
11366         }
11367         if (!is_error(ret)) {
11368             int i;
11369             for (i = 0; i < ret; i++) {
11370                 target_ep[i].events = tswap32(ep[i].events);
11371                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11372             }
11373             unlock_user(target_ep, arg2,
11374                         ret * sizeof(struct target_epoll_event));
11375         } else {
11376             unlock_user(target_ep, arg2, 0);
11377         }
11378         g_free(ep);
11379         return ret;
11380     }
11381 #endif
11382 #endif
11383 #ifdef TARGET_NR_prlimit64
11384     case TARGET_NR_prlimit64:
11385     {
11386         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11387         struct target_rlimit64 *target_rnew, *target_rold;
11388         struct host_rlimit64 rnew, rold, *rnewp = 0;
11389         int resource = target_to_host_resource(arg2);
11390         if (arg3) {
11391             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11392                 return -TARGET_EFAULT;
11393             }
11394             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11395             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11396             unlock_user_struct(target_rnew, arg3, 0);
11397             rnewp = &rnew;
11398         }
11399 
11400         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11401         if (!is_error(ret) && arg4) {
11402             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11403                 return -TARGET_EFAULT;
11404             }
11405             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11406             target_rold->rlim_max = tswap64(rold.rlim_max);
11407             unlock_user_struct(target_rold, arg4, 1);
11408         }
11409         return ret;
11410     }
11411 #endif
11412 #ifdef TARGET_NR_gethostname
11413     case TARGET_NR_gethostname:
11414     {
11415         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11416         if (name) {
11417             ret = get_errno(gethostname(name, arg2));
11418             unlock_user(name, arg1, arg2);
11419         } else {
11420             ret = -TARGET_EFAULT;
11421         }
11422         return ret;
11423     }
11424 #endif
11425 #ifdef TARGET_NR_atomic_cmpxchg_32
11426     case TARGET_NR_atomic_cmpxchg_32:
11427     {
11428         /* should use start_exclusive from main.c */
11429         abi_ulong mem_value;
11430         if (get_user_u32(mem_value, arg6)) {
11431             target_siginfo_t info;
11432             info.si_signo = SIGSEGV;
11433             info.si_errno = 0;
11434             info.si_code = TARGET_SEGV_MAPERR;
11435             info._sifields._sigfault._addr = arg6;
11436             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11437                          QEMU_SI_FAULT, &info);
11438             ret = 0xdeadbeef;
11439 
11440         }
11441         if (mem_value == arg2)
11442             put_user_u32(arg1, arg6);
11443         return mem_value;
11444     }
11445 #endif
11446 #ifdef TARGET_NR_atomic_barrier
11447     case TARGET_NR_atomic_barrier:
11448         /* Like the kernel implementation and the
11449            qemu arm barrier, no-op this? */
11450         return 0;
11451 #endif
11452 
11453 #ifdef TARGET_NR_timer_create
11454     case TARGET_NR_timer_create:
11455     {
11456         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11457 
11458         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11459 
11460         int clkid = arg1;
11461         int timer_index = next_free_host_timer();
11462 
11463         if (timer_index < 0) {
11464             ret = -TARGET_EAGAIN;
11465         } else {
11466             timer_t *phtimer = g_posix_timers  + timer_index;
11467 
11468             if (arg2) {
11469                 phost_sevp = &host_sevp;
11470                 ret = target_to_host_sigevent(phost_sevp, arg2);
11471                 if (ret != 0) {
11472                     return ret;
11473                 }
11474             }
11475 
11476             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11477             if (ret) {
11478                 phtimer = NULL;
11479             } else {
11480                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11481                     return -TARGET_EFAULT;
11482                 }
11483             }
11484         }
11485         return ret;
11486     }
11487 #endif
11488 
11489 #ifdef TARGET_NR_timer_settime
11490     case TARGET_NR_timer_settime:
11491     {
11492         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11493          * struct itimerspec * old_value */
11494         target_timer_t timerid = get_timer_id(arg1);
11495 
11496         if (timerid < 0) {
11497             ret = timerid;
11498         } else if (arg3 == 0) {
11499             ret = -TARGET_EINVAL;
11500         } else {
11501             timer_t htimer = g_posix_timers[timerid];
11502             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11503 
11504             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11505                 return -TARGET_EFAULT;
11506             }
11507             ret = get_errno(
11508                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11509             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11510                 return -TARGET_EFAULT;
11511             }
11512         }
11513         return ret;
11514     }
11515 #endif
11516 
11517 #ifdef TARGET_NR_timer_gettime
11518     case TARGET_NR_timer_gettime:
11519     {
11520         /* args: timer_t timerid, struct itimerspec *curr_value */
11521         target_timer_t timerid = get_timer_id(arg1);
11522 
11523         if (timerid < 0) {
11524             ret = timerid;
11525         } else if (!arg2) {
11526             ret = -TARGET_EFAULT;
11527         } else {
11528             timer_t htimer = g_posix_timers[timerid];
11529             struct itimerspec hspec;
11530             ret = get_errno(timer_gettime(htimer, &hspec));
11531 
11532             if (host_to_target_itimerspec(arg2, &hspec)) {
11533                 ret = -TARGET_EFAULT;
11534             }
11535         }
11536         return ret;
11537     }
11538 #endif
11539 
11540 #ifdef TARGET_NR_timer_getoverrun
11541     case TARGET_NR_timer_getoverrun:
11542     {
11543         /* args: timer_t timerid */
11544         target_timer_t timerid = get_timer_id(arg1);
11545 
11546         if (timerid < 0) {
11547             ret = timerid;
11548         } else {
11549             timer_t htimer = g_posix_timers[timerid];
11550             ret = get_errno(timer_getoverrun(htimer));
11551         }
11552         fd_trans_unregister(ret);
11553         return ret;
11554     }
11555 #endif
11556 
11557 #ifdef TARGET_NR_timer_delete
11558     case TARGET_NR_timer_delete:
11559     {
11560         /* args: timer_t timerid */
11561         target_timer_t timerid = get_timer_id(arg1);
11562 
11563         if (timerid < 0) {
11564             ret = timerid;
11565         } else {
11566             timer_t htimer = g_posix_timers[timerid];
11567             ret = get_errno(timer_delete(htimer));
11568             g_posix_timers[timerid] = 0;
11569         }
11570         return ret;
11571     }
11572 #endif
11573 
11574 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11575     case TARGET_NR_timerfd_create:
11576         return get_errno(timerfd_create(arg1,
11577                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11578 #endif
11579 
11580 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11581     case TARGET_NR_timerfd_gettime:
11582         {
11583             struct itimerspec its_curr;
11584 
11585             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11586 
11587             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11588                 return -TARGET_EFAULT;
11589             }
11590         }
11591         return ret;
11592 #endif
11593 
11594 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11595     case TARGET_NR_timerfd_settime:
11596         {
11597             struct itimerspec its_new, its_old, *p_new;
11598 
11599             if (arg3) {
11600                 if (target_to_host_itimerspec(&its_new, arg3)) {
11601                     return -TARGET_EFAULT;
11602                 }
11603                 p_new = &its_new;
11604             } else {
11605                 p_new = NULL;
11606             }
11607 
11608             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11609 
11610             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11611                 return -TARGET_EFAULT;
11612             }
11613         }
11614         return ret;
11615 #endif
11616 
11617 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11618     case TARGET_NR_ioprio_get:
11619         return get_errno(ioprio_get(arg1, arg2));
11620 #endif
11621 
11622 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11623     case TARGET_NR_ioprio_set:
11624         return get_errno(ioprio_set(arg1, arg2, arg3));
11625 #endif
11626 
11627 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11628     case TARGET_NR_setns:
11629         return get_errno(setns(arg1, arg2));
11630 #endif
11631 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11632     case TARGET_NR_unshare:
11633         return get_errno(unshare(arg1));
11634 #endif
11635 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11636     case TARGET_NR_kcmp:
11637         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11638 #endif
11639 #ifdef TARGET_NR_swapcontext
11640     case TARGET_NR_swapcontext:
11641         /* PowerPC specific.  */
11642         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11643 #endif
11644 
11645     default:
11646         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11647         return -TARGET_ENOSYS;
11648     }
11649     return ret;
11650 }
11651 
11652 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11653                     abi_long arg2, abi_long arg3, abi_long arg4,
11654                     abi_long arg5, abi_long arg6, abi_long arg7,
11655                     abi_long arg8)
11656 {
11657     CPUState *cpu = ENV_GET_CPU(cpu_env);
11658     abi_long ret;
11659 
11660 #ifdef DEBUG_ERESTARTSYS
11661     /* Debug-only code for exercising the syscall-restart code paths
11662      * in the per-architecture cpu main loops: restart every syscall
11663      * the guest makes once before letting it through.
11664      */
11665     {
11666         static bool flag;
11667         flag = !flag;
11668         if (flag) {
11669             return -TARGET_ERESTARTSYS;
11670         }
11671     }
11672 #endif
11673 
11674     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11675                              arg5, arg6, arg7, arg8);
11676 
11677     if (unlikely(do_strace)) {
11678         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11679         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11680                           arg5, arg6, arg7, arg8);
11681         print_syscall_ret(num, ret);
11682     } else {
11683         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11684                           arg5, arg6, arg7, arg8);
11685     }
11686 
11687     trace_guest_user_syscall_ret(cpu, num, ret);
11688     return ret;
11689 }
11690