xref: /openbmc/qemu/linux-user/syscall.c (revision 135b03cb)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/queue.h"
24 #include <elf.h>
25 #include <endian.h>
26 #include <grp.h>
27 #include <sys/ipc.h>
28 #include <sys/msg.h>
29 #include <sys/wait.h>
30 #include <sys/mount.h>
31 #include <sys/file.h>
32 #include <sys/fsuid.h>
33 #include <sys/personality.h>
34 #include <sys/prctl.h>
35 #include <sys/resource.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #include <sys/timex.h>
40 #include <sys/socket.h>
41 #include <linux/sockios.h>
42 #include <sys/un.h>
43 #include <sys/uio.h>
44 #include <poll.h>
45 #include <sys/times.h>
46 #include <sys/shm.h>
47 #include <sys/sem.h>
48 #include <sys/statfs.h>
49 #include <utime.h>
50 #include <sys/sysinfo.h>
51 #include <sys/signalfd.h>
52 //#include <sys/user.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
55 #include <linux/wireless.h>
56 #include <linux/icmp.h>
57 #include <linux/icmpv6.h>
58 #include <linux/errqueue.h>
59 #include <linux/random.h>
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef CONFIG_EVENTFD
64 #include <sys/eventfd.h>
65 #endif
66 #ifdef CONFIG_EPOLL
67 #include <sys/epoll.h>
68 #endif
69 #ifdef CONFIG_ATTR
70 #include "qemu/xattr.h"
71 #endif
72 #ifdef CONFIG_SENDFILE
73 #include <sys/sendfile.h>
74 #endif
75 
76 #define termios host_termios
77 #define winsize host_winsize
78 #define termio host_termio
79 #define sgttyb host_sgttyb /* same as target */
80 #define tchars host_tchars /* same as target */
81 #define ltchars host_ltchars /* same as target */
82 
83 #include <linux/termios.h>
84 #include <linux/unistd.h>
85 #include <linux/cdrom.h>
86 #include <linux/hdreg.h>
87 #include <linux/soundcard.h>
88 #include <linux/kd.h>
89 #include <linux/mtio.h>
90 #include <linux/fs.h>
91 #if defined(CONFIG_FIEMAP)
92 #include <linux/fiemap.h>
93 #endif
94 #include <linux/fb.h>
95 #if defined(CONFIG_USBFS)
96 #include <linux/usbdevice_fs.h>
97 #include <linux/usb/ch9.h>
98 #endif
99 #include <linux/vt.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #include <linux/if_alg.h>
108 #include "linux_loop.h"
109 #include "uname.h"
110 
111 #include "qemu.h"
112 #include "qemu/guest-random.h"
113 #include "qapi/error.h"
114 #include "fd-trans.h"
115 
116 #ifndef CLONE_IO
117 #define CLONE_IO                0x80000000      /* Clone io context */
118 #endif
119 
120 /* We can't directly call the host clone syscall, because this will
121  * badly confuse libc (breaking mutexes, for example). So we must
122  * divide clone flags into:
123  *  * flag combinations that look like pthread_create()
124  *  * flag combinations that look like fork()
125  *  * flags we can implement within QEMU itself
126  *  * flags we can't support and will return an error for
127  */
128 /* For thread creation, all these flags must be present; for
129  * fork, none must be present.
130  */
131 #define CLONE_THREAD_FLAGS                              \
132     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
133      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 
135 /* These flags are ignored:
136  * CLONE_DETACHED is now ignored by the kernel;
137  * CLONE_IO is just an optimisation hint to the I/O scheduler
138  */
139 #define CLONE_IGNORED_FLAGS                     \
140     (CLONE_DETACHED | CLONE_IO)
141 
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS               \
144     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
145      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
149     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
150      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 
152 #define CLONE_INVALID_FORK_FLAGS                                        \
153     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 
155 #define CLONE_INVALID_THREAD_FLAGS                                      \
156     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
157        CLONE_IGNORED_FLAGS))
158 
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160  * have almost all been allocated. We cannot support any of
161  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163  * The checks against the invalid thread masks above will catch these.
164  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
165  */
166 
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168  * once. This exercises the codepaths for restart.
169  */
170 //#define DEBUG_ERESTARTSYS
171 
172 //#include <linux/msdos_fs.h>
173 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
174 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
175 
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183 
184 #define _syscall0(type,name)		\
185 static type name (void)			\
186 {					\
187 	return syscall(__NR_##name);	\
188 }
189 
190 #define _syscall1(type,name,type1,arg1)		\
191 static type name (type1 arg1)			\
192 {						\
193 	return syscall(__NR_##name, arg1);	\
194 }
195 
196 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
197 static type name (type1 arg1,type2 arg2)		\
198 {							\
199 	return syscall(__NR_##name, arg1, arg2);	\
200 }
201 
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
203 static type name (type1 arg1,type2 arg2,type3 arg3)		\
204 {								\
205 	return syscall(__NR_##name, arg1, arg2, arg3);		\
206 }
207 
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
210 {										\
211 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
212 }
213 
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
215 		  type5,arg5)							\
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
217 {										\
218 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
219 }
220 
221 
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5,type6,arg6)					\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
225                   type6 arg6)							\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
228 }
229 
230 
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #define __NR_sys_statx __NR_statx
244 
245 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
246 #define __NR__llseek __NR_lseek
247 #endif
248 
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
252 #endif
253 
254 #define __NR_sys_gettid __NR_gettid
255 _syscall0(int, sys_gettid)
256 
257 /* For the 64-bit guest on 32-bit host case we must emulate
258  * getdents using getdents64, because otherwise the host
259  * might hand us back more dirent records than we can fit
260  * into the guest buffer after structure format conversion.
261  * Otherwise we emulate getdents with getdents if the host has it.
262  */
263 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
264 #define EMULATE_GETDENTS_WITH_GETDENTS
265 #endif
266 
267 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
268 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
269 #endif
270 #if (defined(TARGET_NR_getdents) && \
271       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
272     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
273 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
274 #endif
275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
276 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
277           loff_t *, res, uint, wh);
278 #endif
279 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
280 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
281           siginfo_t *, uinfo)
282 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
283 #ifdef __NR_exit_group
284 _syscall1(int,exit_group,int,error_code)
285 #endif
286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
287 _syscall1(int,set_tid_address,int *,tidptr)
288 #endif
289 #if defined(TARGET_NR_futex) && defined(__NR_futex)
290 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
291           const struct timespec *,timeout,int *,uaddr2,int,val3)
292 #endif
293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
294 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
295           unsigned long *, user_mask_ptr);
296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
297 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
298           unsigned long *, user_mask_ptr);
299 #define __NR_sys_getcpu __NR_getcpu
300 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
301 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
302           void *, arg);
303 _syscall2(int, capget, struct __user_cap_header_struct *, header,
304           struct __user_cap_data_struct *, data);
305 _syscall2(int, capset, struct __user_cap_header_struct *, header,
306           struct __user_cap_data_struct *, data);
307 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
308 _syscall2(int, ioprio_get, int, which, int, who)
309 #endif
310 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
311 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
312 #endif
313 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
314 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
315 #endif
316 
317 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
318 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
319           unsigned long, idx1, unsigned long, idx2)
320 #endif
321 
322 /*
323  * It is assumed that struct statx is architecture independent.
324  */
325 #if defined(TARGET_NR_statx) && defined(__NR_statx)
326 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
327           unsigned int, mask, struct target_statx *, statxbuf)
328 #endif
329 
330 static bitmask_transtbl fcntl_flags_tbl[] = {
331   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
332   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
333   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
334   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
335   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
336   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
337   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
338   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
339   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
340   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
341   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
342   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
343   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
344 #if defined(O_DIRECT)
345   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
346 #endif
347 #if defined(O_NOATIME)
348   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
349 #endif
350 #if defined(O_CLOEXEC)
351   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
352 #endif
353 #if defined(O_PATH)
354   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
355 #endif
356 #if defined(O_TMPFILE)
357   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
358 #endif
359   /* Don't terminate the list prematurely on 64-bit host+guest.  */
360 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
361   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
362 #endif
363   { 0, 0, 0, 0 }
364 };
365 
366 static int sys_getcwd1(char *buf, size_t size)
367 {
368   if (getcwd(buf, size) == NULL) {
369       /* getcwd() sets errno */
370       return (-1);
371   }
372   return strlen(buf)+1;
373 }
374 
375 #ifdef TARGET_NR_utimensat
376 #if defined(__NR_utimensat)
377 #define __NR_sys_utimensat __NR_utimensat
378 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
379           const struct timespec *,tsp,int,flags)
380 #else
381 static int sys_utimensat(int dirfd, const char *pathname,
382                          const struct timespec times[2], int flags)
383 {
384     errno = ENOSYS;
385     return -1;
386 }
387 #endif
388 #endif /* TARGET_NR_utimensat */
389 
390 #ifdef TARGET_NR_renameat2
391 #if defined(__NR_renameat2)
392 #define __NR_sys_renameat2 __NR_renameat2
393 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
394           const char *, new, unsigned int, flags)
395 #else
396 static int sys_renameat2(int oldfd, const char *old,
397                          int newfd, const char *new, int flags)
398 {
399     if (flags == 0) {
400         return renameat(oldfd, old, newfd, new);
401     }
402     errno = ENOSYS;
403     return -1;
404 }
405 #endif
406 #endif /* TARGET_NR_renameat2 */
407 
408 #ifdef CONFIG_INOTIFY
409 #include <sys/inotify.h>
410 
411 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
412 static int sys_inotify_init(void)
413 {
414   return (inotify_init());
415 }
416 #endif
417 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
418 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
419 {
420   return (inotify_add_watch(fd, pathname, mask));
421 }
422 #endif
423 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
424 static int sys_inotify_rm_watch(int fd, int32_t wd)
425 {
426   return (inotify_rm_watch(fd, wd));
427 }
428 #endif
429 #ifdef CONFIG_INOTIFY1
430 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
431 static int sys_inotify_init1(int flags)
432 {
433   return (inotify_init1(flags));
434 }
435 #endif
436 #endif
437 #else
438 /* Userspace can usually survive runtime without inotify */
439 #undef TARGET_NR_inotify_init
440 #undef TARGET_NR_inotify_init1
441 #undef TARGET_NR_inotify_add_watch
442 #undef TARGET_NR_inotify_rm_watch
443 #endif /* CONFIG_INOTIFY  */
444 
445 #if defined(TARGET_NR_prlimit64)
446 #ifndef __NR_prlimit64
447 # define __NR_prlimit64 -1
448 #endif
449 #define __NR_sys_prlimit64 __NR_prlimit64
450 /* The glibc rlimit structure may not be that used by the underlying syscall */
451 struct host_rlimit64 {
452     uint64_t rlim_cur;
453     uint64_t rlim_max;
454 };
455 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
456           const struct host_rlimit64 *, new_limit,
457           struct host_rlimit64 *, old_limit)
458 #endif
459 
460 
461 #if defined(TARGET_NR_timer_create)
462 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
463 static timer_t g_posix_timers[32] = { 0, } ;
464 
465 static inline int next_free_host_timer(void)
466 {
467     int k ;
468     /* FIXME: Does finding the next free slot require a lock? */
469     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
470         if (g_posix_timers[k] == 0) {
471             g_posix_timers[k] = (timer_t) 1;
472             return k;
473         }
474     }
475     return -1;
476 }
477 #endif
478 
479 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
480 #ifdef TARGET_ARM
481 static inline int regpairs_aligned(void *cpu_env, int num)
482 {
483     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
484 }
485 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
486 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
487 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
488 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
489  * of registers which translates to the same as ARM/MIPS, because we start with
490  * r3 as arg1 */
491 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
492 #elif defined(TARGET_SH4)
493 /* SH4 doesn't align register pairs, except for p{read,write}64 */
494 static inline int regpairs_aligned(void *cpu_env, int num)
495 {
496     switch (num) {
497     case TARGET_NR_pread64:
498     case TARGET_NR_pwrite64:
499         return 1;
500 
501     default:
502         return 0;
503     }
504 }
505 #elif defined(TARGET_XTENSA)
506 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
507 #else
508 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
509 #endif
510 
511 #define ERRNO_TABLE_SIZE 1200
512 
513 /* target_to_host_errno_table[] is initialized from
514  * host_to_target_errno_table[] in syscall_init(). */
515 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
516 };
517 
518 /*
519  * This list is the union of errno values overridden in asm-<arch>/errno.h
520  * minus the errnos that are not actually generic to all archs.
521  */
522 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
523     [EAGAIN]		= TARGET_EAGAIN,
524     [EIDRM]		= TARGET_EIDRM,
525     [ECHRNG]		= TARGET_ECHRNG,
526     [EL2NSYNC]		= TARGET_EL2NSYNC,
527     [EL3HLT]		= TARGET_EL3HLT,
528     [EL3RST]		= TARGET_EL3RST,
529     [ELNRNG]		= TARGET_ELNRNG,
530     [EUNATCH]		= TARGET_EUNATCH,
531     [ENOCSI]		= TARGET_ENOCSI,
532     [EL2HLT]		= TARGET_EL2HLT,
533     [EDEADLK]		= TARGET_EDEADLK,
534     [ENOLCK]		= TARGET_ENOLCK,
535     [EBADE]		= TARGET_EBADE,
536     [EBADR]		= TARGET_EBADR,
537     [EXFULL]		= TARGET_EXFULL,
538     [ENOANO]		= TARGET_ENOANO,
539     [EBADRQC]		= TARGET_EBADRQC,
540     [EBADSLT]		= TARGET_EBADSLT,
541     [EBFONT]		= TARGET_EBFONT,
542     [ENOSTR]		= TARGET_ENOSTR,
543     [ENODATA]		= TARGET_ENODATA,
544     [ETIME]		= TARGET_ETIME,
545     [ENOSR]		= TARGET_ENOSR,
546     [ENONET]		= TARGET_ENONET,
547     [ENOPKG]		= TARGET_ENOPKG,
548     [EREMOTE]		= TARGET_EREMOTE,
549     [ENOLINK]		= TARGET_ENOLINK,
550     [EADV]		= TARGET_EADV,
551     [ESRMNT]		= TARGET_ESRMNT,
552     [ECOMM]		= TARGET_ECOMM,
553     [EPROTO]		= TARGET_EPROTO,
554     [EDOTDOT]		= TARGET_EDOTDOT,
555     [EMULTIHOP]		= TARGET_EMULTIHOP,
556     [EBADMSG]		= TARGET_EBADMSG,
557     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
558     [EOVERFLOW]		= TARGET_EOVERFLOW,
559     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
560     [EBADFD]		= TARGET_EBADFD,
561     [EREMCHG]		= TARGET_EREMCHG,
562     [ELIBACC]		= TARGET_ELIBACC,
563     [ELIBBAD]		= TARGET_ELIBBAD,
564     [ELIBSCN]		= TARGET_ELIBSCN,
565     [ELIBMAX]		= TARGET_ELIBMAX,
566     [ELIBEXEC]		= TARGET_ELIBEXEC,
567     [EILSEQ]		= TARGET_EILSEQ,
568     [ENOSYS]		= TARGET_ENOSYS,
569     [ELOOP]		= TARGET_ELOOP,
570     [ERESTART]		= TARGET_ERESTART,
571     [ESTRPIPE]		= TARGET_ESTRPIPE,
572     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
573     [EUSERS]		= TARGET_EUSERS,
574     [ENOTSOCK]		= TARGET_ENOTSOCK,
575     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
576     [EMSGSIZE]		= TARGET_EMSGSIZE,
577     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
578     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
579     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
580     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
581     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
582     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
583     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
584     [EADDRINUSE]	= TARGET_EADDRINUSE,
585     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
586     [ENETDOWN]		= TARGET_ENETDOWN,
587     [ENETUNREACH]	= TARGET_ENETUNREACH,
588     [ENETRESET]		= TARGET_ENETRESET,
589     [ECONNABORTED]	= TARGET_ECONNABORTED,
590     [ECONNRESET]	= TARGET_ECONNRESET,
591     [ENOBUFS]		= TARGET_ENOBUFS,
592     [EISCONN]		= TARGET_EISCONN,
593     [ENOTCONN]		= TARGET_ENOTCONN,
594     [EUCLEAN]		= TARGET_EUCLEAN,
595     [ENOTNAM]		= TARGET_ENOTNAM,
596     [ENAVAIL]		= TARGET_ENAVAIL,
597     [EISNAM]		= TARGET_EISNAM,
598     [EREMOTEIO]		= TARGET_EREMOTEIO,
599     [EDQUOT]            = TARGET_EDQUOT,
600     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
601     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
602     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
603     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
604     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
605     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
606     [EALREADY]		= TARGET_EALREADY,
607     [EINPROGRESS]	= TARGET_EINPROGRESS,
608     [ESTALE]		= TARGET_ESTALE,
609     [ECANCELED]		= TARGET_ECANCELED,
610     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
611     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
612 #ifdef ENOKEY
613     [ENOKEY]		= TARGET_ENOKEY,
614 #endif
615 #ifdef EKEYEXPIRED
616     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
617 #endif
618 #ifdef EKEYREVOKED
619     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
620 #endif
621 #ifdef EKEYREJECTED
622     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
623 #endif
624 #ifdef EOWNERDEAD
625     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
626 #endif
627 #ifdef ENOTRECOVERABLE
628     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
629 #endif
630 #ifdef ENOMSG
631     [ENOMSG]            = TARGET_ENOMSG,
632 #endif
633 #ifdef ERKFILL
634     [ERFKILL]           = TARGET_ERFKILL,
635 #endif
636 #ifdef EHWPOISON
637     [EHWPOISON]         = TARGET_EHWPOISON,
638 #endif
639 };
640 
641 static inline int host_to_target_errno(int err)
642 {
643     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
644         host_to_target_errno_table[err]) {
645         return host_to_target_errno_table[err];
646     }
647     return err;
648 }
649 
650 static inline int target_to_host_errno(int err)
651 {
652     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
653         target_to_host_errno_table[err]) {
654         return target_to_host_errno_table[err];
655     }
656     return err;
657 }
658 
659 static inline abi_long get_errno(abi_long ret)
660 {
661     if (ret == -1)
662         return -host_to_target_errno(errno);
663     else
664         return ret;
665 }
666 
667 const char *target_strerror(int err)
668 {
669     if (err == TARGET_ERESTARTSYS) {
670         return "To be restarted";
671     }
672     if (err == TARGET_QEMU_ESIGRETURN) {
673         return "Successful exit from sigreturn";
674     }
675 
676     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
677         return NULL;
678     }
679     return strerror(target_to_host_errno(err));
680 }
681 
682 #define safe_syscall0(type, name) \
683 static type safe_##name(void) \
684 { \
685     return safe_syscall(__NR_##name); \
686 }
687 
688 #define safe_syscall1(type, name, type1, arg1) \
689 static type safe_##name(type1 arg1) \
690 { \
691     return safe_syscall(__NR_##name, arg1); \
692 }
693 
694 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
695 static type safe_##name(type1 arg1, type2 arg2) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2); \
698 }
699 
700 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
701 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
702 { \
703     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
704 }
705 
706 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
707     type4, arg4) \
708 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
709 { \
710     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
711 }
712 
713 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
714     type4, arg4, type5, arg5) \
715 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
716     type5 arg5) \
717 { \
718     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
719 }
720 
721 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
722     type4, arg4, type5, arg5, type6, arg6) \
723 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
724     type5 arg5, type6 arg6) \
725 { \
726     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
727 }
728 
729 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
730 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
731 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
732               int, flags, mode_t, mode)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
736               int, options, struct rusage *, rusage)
737 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
738 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
739               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
740 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
741               struct timespec *, tsp, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
744               int, maxevents, int, timeout, const sigset_t *, sigmask,
745               size_t, sigsetsize)
746 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
747               const struct timespec *,timeout,int *,uaddr2,int,val3)
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755               unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757               unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759               socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
768               const struct timespec *, uts, size_t, sigsetsize)
769 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
770               int, flags)
771 safe_syscall2(int, nanosleep, const struct timespec *, req,
772               struct timespec *, rem)
773 #ifdef TARGET_NR_clock_nanosleep
774 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
775               const struct timespec *, req, struct timespec *, rem)
776 #endif
777 #ifdef __NR_ipc
778 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
779               void *, ptr, long, fifth)
780 #endif
781 #ifdef __NR_msgsnd
782 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
783               int, flags)
784 #endif
785 #ifdef __NR_msgrcv
786 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
787               long, msgtype, int, flags)
788 #endif
789 #ifdef __NR_semtimedop
790 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
791               unsigned, nsops, const struct timespec *, timeout)
792 #endif
793 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
794 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
795               size_t, len, unsigned, prio, const struct timespec *, timeout)
796 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
797               size_t, len, unsigned *, prio, const struct timespec *, timeout)
798 #endif
799 /* We do ioctl like this rather than via safe_syscall3 to preserve the
800  * "third argument might be integer or pointer or not present" behaviour of
801  * the libc function.
802  */
803 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
804 /* Similarly for fcntl. Note that callers must always:
805  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
806  *  use the flock64 struct rather than unsuffixed flock
807  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
808  */
809 #ifdef __NR_fcntl64
810 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
811 #else
812 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
813 #endif
814 
815 static inline int host_to_target_sock_type(int host_type)
816 {
817     int target_type;
818 
819     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
820     case SOCK_DGRAM:
821         target_type = TARGET_SOCK_DGRAM;
822         break;
823     case SOCK_STREAM:
824         target_type = TARGET_SOCK_STREAM;
825         break;
826     default:
827         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
828         break;
829     }
830 
831 #if defined(SOCK_CLOEXEC)
832     if (host_type & SOCK_CLOEXEC) {
833         target_type |= TARGET_SOCK_CLOEXEC;
834     }
835 #endif
836 
837 #if defined(SOCK_NONBLOCK)
838     if (host_type & SOCK_NONBLOCK) {
839         target_type |= TARGET_SOCK_NONBLOCK;
840     }
841 #endif
842 
843     return target_type;
844 }
845 
846 static abi_ulong target_brk;
847 static abi_ulong target_original_brk;
848 static abi_ulong brk_page;
849 
850 void target_set_brk(abi_ulong new_brk)
851 {
852     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
853     brk_page = HOST_PAGE_ALIGN(target_brk);
854 }
855 
856 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
857 #define DEBUGF_BRK(message, args...)
858 
859 /* do_brk() must return target values and target errnos. */
860 abi_long do_brk(abi_ulong new_brk)
861 {
862     abi_long mapped_addr;
863     abi_ulong new_alloc_size;
864 
865     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
866 
867     if (!new_brk) {
868         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
869         return target_brk;
870     }
871     if (new_brk < target_original_brk) {
872         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
873                    target_brk);
874         return target_brk;
875     }
876 
877     /* If the new brk is less than the highest page reserved to the
878      * target heap allocation, set it and we're almost done...  */
879     if (new_brk <= brk_page) {
880         /* Heap contents are initialized to zero, as for anonymous
881          * mapped pages.  */
882         if (new_brk > target_brk) {
883             memset(g2h(target_brk), 0, new_brk - target_brk);
884         }
885 	target_brk = new_brk;
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
887 	return target_brk;
888     }
889 
890     /* We need to allocate more memory after the brk... Note that
891      * we don't use MAP_FIXED because that will map over the top of
892      * any existing mapping (like the one with the host libc or qemu
893      * itself); instead we treat "mapped but at wrong address" as
894      * a failure and unmap again.
895      */
896     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
897     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
898                                         PROT_READ|PROT_WRITE,
899                                         MAP_ANON|MAP_PRIVATE, 0, 0));
900 
901     if (mapped_addr == brk_page) {
902         /* Heap contents are initialized to zero, as for anonymous
903          * mapped pages.  Technically the new pages are already
904          * initialized to zero since they *are* anonymous mapped
905          * pages, however we have to take care with the contents that
906          * come from the remaining part of the previous page: it may
907          * contains garbage data due to a previous heap usage (grown
908          * then shrunken).  */
909         memset(g2h(target_brk), 0, brk_page - target_brk);
910 
911         target_brk = new_brk;
912         brk_page = HOST_PAGE_ALIGN(target_brk);
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
914             target_brk);
915         return target_brk;
916     } else if (mapped_addr != -1) {
917         /* Mapped but at wrong address, meaning there wasn't actually
918          * enough space for this brk.
919          */
920         target_munmap(mapped_addr, new_alloc_size);
921         mapped_addr = -1;
922         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
923     }
924     else {
925         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
926     }
927 
928 #if defined(TARGET_ALPHA)
929     /* We (partially) emulate OSF/1 on Alpha, which requires we
930        return a proper errno, not an unchanged brk value.  */
931     return -TARGET_ENOMEM;
932 #endif
933     /* For everything else, return the previous break. */
934     return target_brk;
935 }
936 
937 static inline abi_long copy_from_user_fdset(fd_set *fds,
938                                             abi_ulong target_fds_addr,
939                                             int n)
940 {
941     int i, nw, j, k;
942     abi_ulong b, *target_fds;
943 
944     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
945     if (!(target_fds = lock_user(VERIFY_READ,
946                                  target_fds_addr,
947                                  sizeof(abi_ulong) * nw,
948                                  1)))
949         return -TARGET_EFAULT;
950 
951     FD_ZERO(fds);
952     k = 0;
953     for (i = 0; i < nw; i++) {
954         /* grab the abi_ulong */
955         __get_user(b, &target_fds[i]);
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             /* check the bit inside the abi_ulong */
958             if ((b >> j) & 1)
959                 FD_SET(k, fds);
960             k++;
961         }
962     }
963 
964     unlock_user(target_fds, target_fds_addr, 0);
965 
966     return 0;
967 }
968 
969 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
970                                                  abi_ulong target_fds_addr,
971                                                  int n)
972 {
973     if (target_fds_addr) {
974         if (copy_from_user_fdset(fds, target_fds_addr, n))
975             return -TARGET_EFAULT;
976         *fds_ptr = fds;
977     } else {
978         *fds_ptr = NULL;
979     }
980     return 0;
981 }
982 
983 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
984                                           const fd_set *fds,
985                                           int n)
986 {
987     int i, nw, j, k;
988     abi_long v;
989     abi_ulong *target_fds;
990 
991     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
992     if (!(target_fds = lock_user(VERIFY_WRITE,
993                                  target_fds_addr,
994                                  sizeof(abi_ulong) * nw,
995                                  0)))
996         return -TARGET_EFAULT;
997 
998     k = 0;
999     for (i = 0; i < nw; i++) {
1000         v = 0;
1001         for (j = 0; j < TARGET_ABI_BITS; j++) {
1002             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1003             k++;
1004         }
1005         __put_user(v, &target_fds[i]);
1006     }
1007 
1008     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1009 
1010     return 0;
1011 }
1012 
1013 #if defined(__alpha__)
1014 #define HOST_HZ 1024
1015 #else
1016 #define HOST_HZ 100
1017 #endif
1018 
1019 static inline abi_long host_to_target_clock_t(long ticks)
1020 {
1021 #if HOST_HZ == TARGET_HZ
1022     return ticks;
1023 #else
1024     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1025 #endif
1026 }
1027 
1028 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1029                                              const struct rusage *rusage)
1030 {
1031     struct target_rusage *target_rusage;
1032 
1033     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1034         return -TARGET_EFAULT;
1035     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1036     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1037     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1038     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1039     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1040     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1041     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1042     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1043     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1044     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1045     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1046     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1047     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1048     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1049     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1050     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1051     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1052     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1053     unlock_user_struct(target_rusage, target_addr, 1);
1054 
1055     return 0;
1056 }
1057 
1058 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1059 {
1060     abi_ulong target_rlim_swap;
1061     rlim_t result;
1062 
1063     target_rlim_swap = tswapal(target_rlim);
1064     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1065         return RLIM_INFINITY;
1066 
1067     result = target_rlim_swap;
1068     if (target_rlim_swap != (rlim_t)result)
1069         return RLIM_INFINITY;
1070 
1071     return result;
1072 }
1073 
1074 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1075 {
1076     abi_ulong target_rlim_swap;
1077     abi_ulong result;
1078 
1079     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1080         target_rlim_swap = TARGET_RLIM_INFINITY;
1081     else
1082         target_rlim_swap = rlim;
1083     result = tswapal(target_rlim_swap);
1084 
1085     return result;
1086 }
1087 
1088 static inline int target_to_host_resource(int code)
1089 {
1090     switch (code) {
1091     case TARGET_RLIMIT_AS:
1092         return RLIMIT_AS;
1093     case TARGET_RLIMIT_CORE:
1094         return RLIMIT_CORE;
1095     case TARGET_RLIMIT_CPU:
1096         return RLIMIT_CPU;
1097     case TARGET_RLIMIT_DATA:
1098         return RLIMIT_DATA;
1099     case TARGET_RLIMIT_FSIZE:
1100         return RLIMIT_FSIZE;
1101     case TARGET_RLIMIT_LOCKS:
1102         return RLIMIT_LOCKS;
1103     case TARGET_RLIMIT_MEMLOCK:
1104         return RLIMIT_MEMLOCK;
1105     case TARGET_RLIMIT_MSGQUEUE:
1106         return RLIMIT_MSGQUEUE;
1107     case TARGET_RLIMIT_NICE:
1108         return RLIMIT_NICE;
1109     case TARGET_RLIMIT_NOFILE:
1110         return RLIMIT_NOFILE;
1111     case TARGET_RLIMIT_NPROC:
1112         return RLIMIT_NPROC;
1113     case TARGET_RLIMIT_RSS:
1114         return RLIMIT_RSS;
1115     case TARGET_RLIMIT_RTPRIO:
1116         return RLIMIT_RTPRIO;
1117     case TARGET_RLIMIT_SIGPENDING:
1118         return RLIMIT_SIGPENDING;
1119     case TARGET_RLIMIT_STACK:
1120         return RLIMIT_STACK;
1121     default:
1122         return code;
1123     }
1124 }
1125 
1126 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1127                                               abi_ulong target_tv_addr)
1128 {
1129     struct target_timeval *target_tv;
1130 
1131     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1132         return -TARGET_EFAULT;
1133     }
1134 
1135     __get_user(tv->tv_sec, &target_tv->tv_sec);
1136     __get_user(tv->tv_usec, &target_tv->tv_usec);
1137 
1138     unlock_user_struct(target_tv, target_tv_addr, 0);
1139 
1140     return 0;
1141 }
1142 
1143 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1144                                             const struct timeval *tv)
1145 {
1146     struct target_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1161                                              const struct timeval *tv)
1162 {
1163     struct target__kernel_sock_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __put_user(tv->tv_sec, &target_tv->tv_sec);
1170     __put_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 1);
1173 
1174     return 0;
1175 }
1176 
1177 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1178                                                abi_ulong target_addr)
1179 {
1180     struct target_timespec *target_ts;
1181 
1182     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1183         return -TARGET_EFAULT;
1184     }
1185     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1186     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1187     unlock_user_struct(target_ts, target_addr, 0);
1188     return 0;
1189 }
1190 
1191 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1192                                                struct timespec *host_ts)
1193 {
1194     struct target_timespec *target_ts;
1195 
1196     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1197         return -TARGET_EFAULT;
1198     }
1199     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1200     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1201     unlock_user_struct(target_ts, target_addr, 1);
1202     return 0;
1203 }
1204 
1205 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1206                                                  struct timespec *host_ts)
1207 {
1208     struct target__kernel_timespec *target_ts;
1209 
1210     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1211         return -TARGET_EFAULT;
1212     }
1213     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1214     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1215     unlock_user_struct(target_ts, target_addr, 1);
1216     return 0;
1217 }
1218 
1219 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1220                                                abi_ulong target_tz_addr)
1221 {
1222     struct target_timezone *target_tz;
1223 
1224     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1225         return -TARGET_EFAULT;
1226     }
1227 
1228     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1229     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1230 
1231     unlock_user_struct(target_tz, target_tz_addr, 0);
1232 
1233     return 0;
1234 }
1235 
1236 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1237 #include <mqueue.h>
1238 
1239 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1240                                               abi_ulong target_mq_attr_addr)
1241 {
1242     struct target_mq_attr *target_mq_attr;
1243 
1244     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1245                           target_mq_attr_addr, 1))
1246         return -TARGET_EFAULT;
1247 
1248     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1249     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1250     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1251     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1252 
1253     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1254 
1255     return 0;
1256 }
1257 
1258 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1259                                             const struct mq_attr *attr)
1260 {
1261     struct target_mq_attr *target_mq_attr;
1262 
1263     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1264                           target_mq_attr_addr, 0))
1265         return -TARGET_EFAULT;
1266 
1267     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1268     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1269     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1270     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1271 
1272     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1279 /* do_select() must return target values and target errnos. */
1280 static abi_long do_select(int n,
1281                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1282                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1283 {
1284     fd_set rfds, wfds, efds;
1285     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1286     struct timeval tv;
1287     struct timespec ts, *ts_ptr;
1288     abi_long ret;
1289 
1290     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1291     if (ret) {
1292         return ret;
1293     }
1294     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1295     if (ret) {
1296         return ret;
1297     }
1298     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1299     if (ret) {
1300         return ret;
1301     }
1302 
1303     if (target_tv_addr) {
1304         if (copy_from_user_timeval(&tv, target_tv_addr))
1305             return -TARGET_EFAULT;
1306         ts.tv_sec = tv.tv_sec;
1307         ts.tv_nsec = tv.tv_usec * 1000;
1308         ts_ptr = &ts;
1309     } else {
1310         ts_ptr = NULL;
1311     }
1312 
1313     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1314                                   ts_ptr, NULL));
1315 
1316     if (!is_error(ret)) {
1317         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1318             return -TARGET_EFAULT;
1319         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1320             return -TARGET_EFAULT;
1321         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1322             return -TARGET_EFAULT;
1323 
1324         if (target_tv_addr) {
1325             tv.tv_sec = ts.tv_sec;
1326             tv.tv_usec = ts.tv_nsec / 1000;
1327             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1328                 return -TARGET_EFAULT;
1329             }
1330         }
1331     }
1332 
1333     return ret;
1334 }
1335 
1336 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1337 static abi_long do_old_select(abi_ulong arg1)
1338 {
1339     struct target_sel_arg_struct *sel;
1340     abi_ulong inp, outp, exp, tvp;
1341     long nsel;
1342 
1343     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1344         return -TARGET_EFAULT;
1345     }
1346 
1347     nsel = tswapal(sel->n);
1348     inp = tswapal(sel->inp);
1349     outp = tswapal(sel->outp);
1350     exp = tswapal(sel->exp);
1351     tvp = tswapal(sel->tvp);
1352 
1353     unlock_user_struct(sel, arg1, 0);
1354 
1355     return do_select(nsel, inp, outp, exp, tvp);
1356 }
1357 #endif
1358 #endif
1359 
1360 static abi_long do_pipe2(int host_pipe[], int flags)
1361 {
1362 #ifdef CONFIG_PIPE2
1363     return pipe2(host_pipe, flags);
1364 #else
1365     return -ENOSYS;
1366 #endif
1367 }
1368 
1369 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1370                         int flags, int is_pipe2)
1371 {
1372     int host_pipe[2];
1373     abi_long ret;
1374     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1375 
1376     if (is_error(ret))
1377         return get_errno(ret);
1378 
1379     /* Several targets have special calling conventions for the original
1380        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1381     if (!is_pipe2) {
1382 #if defined(TARGET_ALPHA)
1383         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1384         return host_pipe[0];
1385 #elif defined(TARGET_MIPS)
1386         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1387         return host_pipe[0];
1388 #elif defined(TARGET_SH4)
1389         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1390         return host_pipe[0];
1391 #elif defined(TARGET_SPARC)
1392         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1393         return host_pipe[0];
1394 #endif
1395     }
1396 
1397     if (put_user_s32(host_pipe[0], pipedes)
1398         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1399         return -TARGET_EFAULT;
1400     return get_errno(ret);
1401 }
1402 
1403 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1404                                               abi_ulong target_addr,
1405                                               socklen_t len)
1406 {
1407     struct target_ip_mreqn *target_smreqn;
1408 
1409     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1410     if (!target_smreqn)
1411         return -TARGET_EFAULT;
1412     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1413     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1414     if (len == sizeof(struct target_ip_mreqn))
1415         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1416     unlock_user(target_smreqn, target_addr, 0);
1417 
1418     return 0;
1419 }
1420 
1421 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1422                                                abi_ulong target_addr,
1423                                                socklen_t len)
1424 {
1425     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1426     sa_family_t sa_family;
1427     struct target_sockaddr *target_saddr;
1428 
1429     if (fd_trans_target_to_host_addr(fd)) {
1430         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1431     }
1432 
1433     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1434     if (!target_saddr)
1435         return -TARGET_EFAULT;
1436 
1437     sa_family = tswap16(target_saddr->sa_family);
1438 
1439     /* Oops. The caller might send a incomplete sun_path; sun_path
1440      * must be terminated by \0 (see the manual page), but
1441      * unfortunately it is quite common to specify sockaddr_un
1442      * length as "strlen(x->sun_path)" while it should be
1443      * "strlen(...) + 1". We'll fix that here if needed.
1444      * Linux kernel has a similar feature.
1445      */
1446 
1447     if (sa_family == AF_UNIX) {
1448         if (len < unix_maxlen && len > 0) {
1449             char *cp = (char*)target_saddr;
1450 
1451             if ( cp[len-1] && !cp[len] )
1452                 len++;
1453         }
1454         if (len > unix_maxlen)
1455             len = unix_maxlen;
1456     }
1457 
1458     memcpy(addr, target_saddr, len);
1459     addr->sa_family = sa_family;
1460     if (sa_family == AF_NETLINK) {
1461         struct sockaddr_nl *nladdr;
1462 
1463         nladdr = (struct sockaddr_nl *)addr;
1464         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1465         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1466     } else if (sa_family == AF_PACKET) {
1467 	struct target_sockaddr_ll *lladdr;
1468 
1469 	lladdr = (struct target_sockaddr_ll *)addr;
1470 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1471 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1472     }
1473     unlock_user(target_saddr, target_addr, 0);
1474 
1475     return 0;
1476 }
1477 
1478 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1479                                                struct sockaddr *addr,
1480                                                socklen_t len)
1481 {
1482     struct target_sockaddr *target_saddr;
1483 
1484     if (len == 0) {
1485         return 0;
1486     }
1487     assert(addr);
1488 
1489     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1490     if (!target_saddr)
1491         return -TARGET_EFAULT;
1492     memcpy(target_saddr, addr, len);
1493     if (len >= offsetof(struct target_sockaddr, sa_family) +
1494         sizeof(target_saddr->sa_family)) {
1495         target_saddr->sa_family = tswap16(addr->sa_family);
1496     }
1497     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1498         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1499         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1500         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1501     } else if (addr->sa_family == AF_PACKET) {
1502         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1503         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1504         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1505     } else if (addr->sa_family == AF_INET6 &&
1506                len >= sizeof(struct target_sockaddr_in6)) {
1507         struct target_sockaddr_in6 *target_in6 =
1508                (struct target_sockaddr_in6 *)target_saddr;
1509         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1510     }
1511     unlock_user(target_saddr, target_addr, len);
1512 
1513     return 0;
1514 }
1515 
1516 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1517                                            struct target_msghdr *target_msgh)
1518 {
1519     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1520     abi_long msg_controllen;
1521     abi_ulong target_cmsg_addr;
1522     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1523     socklen_t space = 0;
1524 
1525     msg_controllen = tswapal(target_msgh->msg_controllen);
1526     if (msg_controllen < sizeof (struct target_cmsghdr))
1527         goto the_end;
1528     target_cmsg_addr = tswapal(target_msgh->msg_control);
1529     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1530     target_cmsg_start = target_cmsg;
1531     if (!target_cmsg)
1532         return -TARGET_EFAULT;
1533 
1534     while (cmsg && target_cmsg) {
1535         void *data = CMSG_DATA(cmsg);
1536         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1537 
1538         int len = tswapal(target_cmsg->cmsg_len)
1539             - sizeof(struct target_cmsghdr);
1540 
1541         space += CMSG_SPACE(len);
1542         if (space > msgh->msg_controllen) {
1543             space -= CMSG_SPACE(len);
1544             /* This is a QEMU bug, since we allocated the payload
1545              * area ourselves (unlike overflow in host-to-target
1546              * conversion, which is just the guest giving us a buffer
1547              * that's too small). It can't happen for the payload types
1548              * we currently support; if it becomes an issue in future
1549              * we would need to improve our allocation strategy to
1550              * something more intelligent than "twice the size of the
1551              * target buffer we're reading from".
1552              */
1553             gemu_log("Host cmsg overflow\n");
1554             break;
1555         }
1556 
1557         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1558             cmsg->cmsg_level = SOL_SOCKET;
1559         } else {
1560             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1561         }
1562         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1563         cmsg->cmsg_len = CMSG_LEN(len);
1564 
1565         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1566             int *fd = (int *)data;
1567             int *target_fd = (int *)target_data;
1568             int i, numfds = len / sizeof(int);
1569 
1570             for (i = 0; i < numfds; i++) {
1571                 __get_user(fd[i], target_fd + i);
1572             }
1573         } else if (cmsg->cmsg_level == SOL_SOCKET
1574                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1575             struct ucred *cred = (struct ucred *)data;
1576             struct target_ucred *target_cred =
1577                 (struct target_ucred *)target_data;
1578 
1579             __get_user(cred->pid, &target_cred->pid);
1580             __get_user(cred->uid, &target_cred->uid);
1581             __get_user(cred->gid, &target_cred->gid);
1582         } else {
1583             gemu_log("Unsupported ancillary data: %d/%d\n",
1584                                         cmsg->cmsg_level, cmsg->cmsg_type);
1585             memcpy(data, target_data, len);
1586         }
1587 
1588         cmsg = CMSG_NXTHDR(msgh, cmsg);
1589         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1590                                          target_cmsg_start);
1591     }
1592     unlock_user(target_cmsg, target_cmsg_addr, 0);
1593  the_end:
1594     msgh->msg_controllen = space;
1595     return 0;
1596 }
1597 
1598 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1599                                            struct msghdr *msgh)
1600 {
1601     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1602     abi_long msg_controllen;
1603     abi_ulong target_cmsg_addr;
1604     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1605     socklen_t space = 0;
1606 
1607     msg_controllen = tswapal(target_msgh->msg_controllen);
1608     if (msg_controllen < sizeof (struct target_cmsghdr))
1609         goto the_end;
1610     target_cmsg_addr = tswapal(target_msgh->msg_control);
1611     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1612     target_cmsg_start = target_cmsg;
1613     if (!target_cmsg)
1614         return -TARGET_EFAULT;
1615 
1616     while (cmsg && target_cmsg) {
1617         void *data = CMSG_DATA(cmsg);
1618         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1619 
1620         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1621         int tgt_len, tgt_space;
1622 
1623         /* We never copy a half-header but may copy half-data;
1624          * this is Linux's behaviour in put_cmsg(). Note that
1625          * truncation here is a guest problem (which we report
1626          * to the guest via the CTRUNC bit), unlike truncation
1627          * in target_to_host_cmsg, which is a QEMU bug.
1628          */
1629         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1630             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1631             break;
1632         }
1633 
1634         if (cmsg->cmsg_level == SOL_SOCKET) {
1635             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1636         } else {
1637             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1638         }
1639         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1640 
1641         /* Payload types which need a different size of payload on
1642          * the target must adjust tgt_len here.
1643          */
1644         tgt_len = len;
1645         switch (cmsg->cmsg_level) {
1646         case SOL_SOCKET:
1647             switch (cmsg->cmsg_type) {
1648             case SO_TIMESTAMP:
1649                 tgt_len = sizeof(struct target_timeval);
1650                 break;
1651             default:
1652                 break;
1653             }
1654             break;
1655         default:
1656             break;
1657         }
1658 
1659         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1660             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1661             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1662         }
1663 
1664         /* We must now copy-and-convert len bytes of payload
1665          * into tgt_len bytes of destination space. Bear in mind
1666          * that in both source and destination we may be dealing
1667          * with a truncated value!
1668          */
1669         switch (cmsg->cmsg_level) {
1670         case SOL_SOCKET:
1671             switch (cmsg->cmsg_type) {
1672             case SCM_RIGHTS:
1673             {
1674                 int *fd = (int *)data;
1675                 int *target_fd = (int *)target_data;
1676                 int i, numfds = tgt_len / sizeof(int);
1677 
1678                 for (i = 0; i < numfds; i++) {
1679                     __put_user(fd[i], target_fd + i);
1680                 }
1681                 break;
1682             }
1683             case SO_TIMESTAMP:
1684             {
1685                 struct timeval *tv = (struct timeval *)data;
1686                 struct target_timeval *target_tv =
1687                     (struct target_timeval *)target_data;
1688 
1689                 if (len != sizeof(struct timeval) ||
1690                     tgt_len != sizeof(struct target_timeval)) {
1691                     goto unimplemented;
1692                 }
1693 
1694                 /* copy struct timeval to target */
1695                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1696                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1697                 break;
1698             }
1699             case SCM_CREDENTIALS:
1700             {
1701                 struct ucred *cred = (struct ucred *)data;
1702                 struct target_ucred *target_cred =
1703                     (struct target_ucred *)target_data;
1704 
1705                 __put_user(cred->pid, &target_cred->pid);
1706                 __put_user(cred->uid, &target_cred->uid);
1707                 __put_user(cred->gid, &target_cred->gid);
1708                 break;
1709             }
1710             default:
1711                 goto unimplemented;
1712             }
1713             break;
1714 
1715         case SOL_IP:
1716             switch (cmsg->cmsg_type) {
1717             case IP_TTL:
1718             {
1719                 uint32_t *v = (uint32_t *)data;
1720                 uint32_t *t_int = (uint32_t *)target_data;
1721 
1722                 if (len != sizeof(uint32_t) ||
1723                     tgt_len != sizeof(uint32_t)) {
1724                     goto unimplemented;
1725                 }
1726                 __put_user(*v, t_int);
1727                 break;
1728             }
1729             case IP_RECVERR:
1730             {
1731                 struct errhdr_t {
1732                    struct sock_extended_err ee;
1733                    struct sockaddr_in offender;
1734                 };
1735                 struct errhdr_t *errh = (struct errhdr_t *)data;
1736                 struct errhdr_t *target_errh =
1737                     (struct errhdr_t *)target_data;
1738 
1739                 if (len != sizeof(struct errhdr_t) ||
1740                     tgt_len != sizeof(struct errhdr_t)) {
1741                     goto unimplemented;
1742                 }
1743                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1744                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1745                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1746                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1747                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1748                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1749                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1750                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1751                     (void *) &errh->offender, sizeof(errh->offender));
1752                 break;
1753             }
1754             default:
1755                 goto unimplemented;
1756             }
1757             break;
1758 
1759         case SOL_IPV6:
1760             switch (cmsg->cmsg_type) {
1761             case IPV6_HOPLIMIT:
1762             {
1763                 uint32_t *v = (uint32_t *)data;
1764                 uint32_t *t_int = (uint32_t *)target_data;
1765 
1766                 if (len != sizeof(uint32_t) ||
1767                     tgt_len != sizeof(uint32_t)) {
1768                     goto unimplemented;
1769                 }
1770                 __put_user(*v, t_int);
1771                 break;
1772             }
1773             case IPV6_RECVERR:
1774             {
1775                 struct errhdr6_t {
1776                    struct sock_extended_err ee;
1777                    struct sockaddr_in6 offender;
1778                 };
1779                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1780                 struct errhdr6_t *target_errh =
1781                     (struct errhdr6_t *)target_data;
1782 
1783                 if (len != sizeof(struct errhdr6_t) ||
1784                     tgt_len != sizeof(struct errhdr6_t)) {
1785                     goto unimplemented;
1786                 }
1787                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1788                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1789                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1790                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1791                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1792                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1793                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1794                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1795                     (void *) &errh->offender, sizeof(errh->offender));
1796                 break;
1797             }
1798             default:
1799                 goto unimplemented;
1800             }
1801             break;
1802 
1803         default:
1804         unimplemented:
1805             gemu_log("Unsupported ancillary data: %d/%d\n",
1806                                         cmsg->cmsg_level, cmsg->cmsg_type);
1807             memcpy(target_data, data, MIN(len, tgt_len));
1808             if (tgt_len > len) {
1809                 memset(target_data + len, 0, tgt_len - len);
1810             }
1811         }
1812 
1813         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1814         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1815         if (msg_controllen < tgt_space) {
1816             tgt_space = msg_controllen;
1817         }
1818         msg_controllen -= tgt_space;
1819         space += tgt_space;
1820         cmsg = CMSG_NXTHDR(msgh, cmsg);
1821         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1822                                          target_cmsg_start);
1823     }
1824     unlock_user(target_cmsg, target_cmsg_addr, space);
1825  the_end:
1826     target_msgh->msg_controllen = tswapal(space);
1827     return 0;
1828 }
1829 
1830 /* do_setsockopt() Must return target values and target errnos. */
1831 static abi_long do_setsockopt(int sockfd, int level, int optname,
1832                               abi_ulong optval_addr, socklen_t optlen)
1833 {
1834     abi_long ret;
1835     int val;
1836     struct ip_mreqn *ip_mreq;
1837     struct ip_mreq_source *ip_mreq_source;
1838 
1839     switch(level) {
1840     case SOL_TCP:
1841         /* TCP options all take an 'int' value.  */
1842         if (optlen < sizeof(uint32_t))
1843             return -TARGET_EINVAL;
1844 
1845         if (get_user_u32(val, optval_addr))
1846             return -TARGET_EFAULT;
1847         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1848         break;
1849     case SOL_IP:
1850         switch(optname) {
1851         case IP_TOS:
1852         case IP_TTL:
1853         case IP_HDRINCL:
1854         case IP_ROUTER_ALERT:
1855         case IP_RECVOPTS:
1856         case IP_RETOPTS:
1857         case IP_PKTINFO:
1858         case IP_MTU_DISCOVER:
1859         case IP_RECVERR:
1860         case IP_RECVTTL:
1861         case IP_RECVTOS:
1862 #ifdef IP_FREEBIND
1863         case IP_FREEBIND:
1864 #endif
1865         case IP_MULTICAST_TTL:
1866         case IP_MULTICAST_LOOP:
1867             val = 0;
1868             if (optlen >= sizeof(uint32_t)) {
1869                 if (get_user_u32(val, optval_addr))
1870                     return -TARGET_EFAULT;
1871             } else if (optlen >= 1) {
1872                 if (get_user_u8(val, optval_addr))
1873                     return -TARGET_EFAULT;
1874             }
1875             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1876             break;
1877         case IP_ADD_MEMBERSHIP:
1878         case IP_DROP_MEMBERSHIP:
1879             if (optlen < sizeof (struct target_ip_mreq) ||
1880                 optlen > sizeof (struct target_ip_mreqn))
1881                 return -TARGET_EINVAL;
1882 
1883             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1884             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1885             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1886             break;
1887 
1888         case IP_BLOCK_SOURCE:
1889         case IP_UNBLOCK_SOURCE:
1890         case IP_ADD_SOURCE_MEMBERSHIP:
1891         case IP_DROP_SOURCE_MEMBERSHIP:
1892             if (optlen != sizeof (struct target_ip_mreq_source))
1893                 return -TARGET_EINVAL;
1894 
1895             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1896             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1897             unlock_user (ip_mreq_source, optval_addr, 0);
1898             break;
1899 
1900         default:
1901             goto unimplemented;
1902         }
1903         break;
1904     case SOL_IPV6:
1905         switch (optname) {
1906         case IPV6_MTU_DISCOVER:
1907         case IPV6_MTU:
1908         case IPV6_V6ONLY:
1909         case IPV6_RECVPKTINFO:
1910         case IPV6_UNICAST_HOPS:
1911         case IPV6_MULTICAST_HOPS:
1912         case IPV6_MULTICAST_LOOP:
1913         case IPV6_RECVERR:
1914         case IPV6_RECVHOPLIMIT:
1915         case IPV6_2292HOPLIMIT:
1916         case IPV6_CHECKSUM:
1917         case IPV6_ADDRFORM:
1918         case IPV6_2292PKTINFO:
1919         case IPV6_RECVTCLASS:
1920         case IPV6_RECVRTHDR:
1921         case IPV6_2292RTHDR:
1922         case IPV6_RECVHOPOPTS:
1923         case IPV6_2292HOPOPTS:
1924         case IPV6_RECVDSTOPTS:
1925         case IPV6_2292DSTOPTS:
1926         case IPV6_TCLASS:
1927 #ifdef IPV6_RECVPATHMTU
1928         case IPV6_RECVPATHMTU:
1929 #endif
1930 #ifdef IPV6_TRANSPARENT
1931         case IPV6_TRANSPARENT:
1932 #endif
1933 #ifdef IPV6_FREEBIND
1934         case IPV6_FREEBIND:
1935 #endif
1936 #ifdef IPV6_RECVORIGDSTADDR
1937         case IPV6_RECVORIGDSTADDR:
1938 #endif
1939             val = 0;
1940             if (optlen < sizeof(uint32_t)) {
1941                 return -TARGET_EINVAL;
1942             }
1943             if (get_user_u32(val, optval_addr)) {
1944                 return -TARGET_EFAULT;
1945             }
1946             ret = get_errno(setsockopt(sockfd, level, optname,
1947                                        &val, sizeof(val)));
1948             break;
1949         case IPV6_PKTINFO:
1950         {
1951             struct in6_pktinfo pki;
1952 
1953             if (optlen < sizeof(pki)) {
1954                 return -TARGET_EINVAL;
1955             }
1956 
1957             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1958                 return -TARGET_EFAULT;
1959             }
1960 
1961             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1962 
1963             ret = get_errno(setsockopt(sockfd, level, optname,
1964                                        &pki, sizeof(pki)));
1965             break;
1966         }
1967         case IPV6_ADD_MEMBERSHIP:
1968         case IPV6_DROP_MEMBERSHIP:
1969         {
1970             struct ipv6_mreq ipv6mreq;
1971 
1972             if (optlen < sizeof(ipv6mreq)) {
1973                 return -TARGET_EINVAL;
1974             }
1975 
1976             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1977                 return -TARGET_EFAULT;
1978             }
1979 
1980             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1981 
1982             ret = get_errno(setsockopt(sockfd, level, optname,
1983                                        &ipv6mreq, sizeof(ipv6mreq)));
1984             break;
1985         }
1986         default:
1987             goto unimplemented;
1988         }
1989         break;
1990     case SOL_ICMPV6:
1991         switch (optname) {
1992         case ICMPV6_FILTER:
1993         {
1994             struct icmp6_filter icmp6f;
1995 
1996             if (optlen > sizeof(icmp6f)) {
1997                 optlen = sizeof(icmp6f);
1998             }
1999 
2000             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2001                 return -TARGET_EFAULT;
2002             }
2003 
2004             for (val = 0; val < 8; val++) {
2005                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2006             }
2007 
2008             ret = get_errno(setsockopt(sockfd, level, optname,
2009                                        &icmp6f, optlen));
2010             break;
2011         }
2012         default:
2013             goto unimplemented;
2014         }
2015         break;
2016     case SOL_RAW:
2017         switch (optname) {
2018         case ICMP_FILTER:
2019         case IPV6_CHECKSUM:
2020             /* those take an u32 value */
2021             if (optlen < sizeof(uint32_t)) {
2022                 return -TARGET_EINVAL;
2023             }
2024 
2025             if (get_user_u32(val, optval_addr)) {
2026                 return -TARGET_EFAULT;
2027             }
2028             ret = get_errno(setsockopt(sockfd, level, optname,
2029                                        &val, sizeof(val)));
2030             break;
2031 
2032         default:
2033             goto unimplemented;
2034         }
2035         break;
2036 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2037     case SOL_ALG:
2038         switch (optname) {
2039         case ALG_SET_KEY:
2040         {
2041             char *alg_key = g_malloc(optlen);
2042 
2043             if (!alg_key) {
2044                 return -TARGET_ENOMEM;
2045             }
2046             if (copy_from_user(alg_key, optval_addr, optlen)) {
2047                 g_free(alg_key);
2048                 return -TARGET_EFAULT;
2049             }
2050             ret = get_errno(setsockopt(sockfd, level, optname,
2051                                        alg_key, optlen));
2052             g_free(alg_key);
2053             break;
2054         }
2055         case ALG_SET_AEAD_AUTHSIZE:
2056         {
2057             ret = get_errno(setsockopt(sockfd, level, optname,
2058                                        NULL, optlen));
2059             break;
2060         }
2061         default:
2062             goto unimplemented;
2063         }
2064         break;
2065 #endif
2066     case TARGET_SOL_SOCKET:
2067         switch (optname) {
2068         case TARGET_SO_RCVTIMEO:
2069         {
2070                 struct timeval tv;
2071 
2072                 optname = SO_RCVTIMEO;
2073 
2074 set_timeout:
2075                 if (optlen != sizeof(struct target_timeval)) {
2076                     return -TARGET_EINVAL;
2077                 }
2078 
2079                 if (copy_from_user_timeval(&tv, optval_addr)) {
2080                     return -TARGET_EFAULT;
2081                 }
2082 
2083                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2084                                 &tv, sizeof(tv)));
2085                 return ret;
2086         }
2087         case TARGET_SO_SNDTIMEO:
2088                 optname = SO_SNDTIMEO;
2089                 goto set_timeout;
2090         case TARGET_SO_ATTACH_FILTER:
2091         {
2092                 struct target_sock_fprog *tfprog;
2093                 struct target_sock_filter *tfilter;
2094                 struct sock_fprog fprog;
2095                 struct sock_filter *filter;
2096                 int i;
2097 
2098                 if (optlen != sizeof(*tfprog)) {
2099                     return -TARGET_EINVAL;
2100                 }
2101                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2102                     return -TARGET_EFAULT;
2103                 }
2104                 if (!lock_user_struct(VERIFY_READ, tfilter,
2105                                       tswapal(tfprog->filter), 0)) {
2106                     unlock_user_struct(tfprog, optval_addr, 1);
2107                     return -TARGET_EFAULT;
2108                 }
2109 
2110                 fprog.len = tswap16(tfprog->len);
2111                 filter = g_try_new(struct sock_filter, fprog.len);
2112                 if (filter == NULL) {
2113                     unlock_user_struct(tfilter, tfprog->filter, 1);
2114                     unlock_user_struct(tfprog, optval_addr, 1);
2115                     return -TARGET_ENOMEM;
2116                 }
2117                 for (i = 0; i < fprog.len; i++) {
2118                     filter[i].code = tswap16(tfilter[i].code);
2119                     filter[i].jt = tfilter[i].jt;
2120                     filter[i].jf = tfilter[i].jf;
2121                     filter[i].k = tswap32(tfilter[i].k);
2122                 }
2123                 fprog.filter = filter;
2124 
2125                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2126                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2127                 g_free(filter);
2128 
2129                 unlock_user_struct(tfilter, tfprog->filter, 1);
2130                 unlock_user_struct(tfprog, optval_addr, 1);
2131                 return ret;
2132         }
2133 	case TARGET_SO_BINDTODEVICE:
2134 	{
2135 		char *dev_ifname, *addr_ifname;
2136 
2137 		if (optlen > IFNAMSIZ - 1) {
2138 		    optlen = IFNAMSIZ - 1;
2139 		}
2140 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2141 		if (!dev_ifname) {
2142 		    return -TARGET_EFAULT;
2143 		}
2144 		optname = SO_BINDTODEVICE;
2145 		addr_ifname = alloca(IFNAMSIZ);
2146 		memcpy(addr_ifname, dev_ifname, optlen);
2147 		addr_ifname[optlen] = 0;
2148 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2149                                            addr_ifname, optlen));
2150 		unlock_user (dev_ifname, optval_addr, 0);
2151 		return ret;
2152 	}
2153         case TARGET_SO_LINGER:
2154         {
2155                 struct linger lg;
2156                 struct target_linger *tlg;
2157 
2158                 if (optlen != sizeof(struct target_linger)) {
2159                     return -TARGET_EINVAL;
2160                 }
2161                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2162                     return -TARGET_EFAULT;
2163                 }
2164                 __get_user(lg.l_onoff, &tlg->l_onoff);
2165                 __get_user(lg.l_linger, &tlg->l_linger);
2166                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2167                                 &lg, sizeof(lg)));
2168                 unlock_user_struct(tlg, optval_addr, 0);
2169                 return ret;
2170         }
2171             /* Options with 'int' argument.  */
2172         case TARGET_SO_DEBUG:
2173 		optname = SO_DEBUG;
2174 		break;
2175         case TARGET_SO_REUSEADDR:
2176 		optname = SO_REUSEADDR;
2177 		break;
2178 #ifdef SO_REUSEPORT
2179         case TARGET_SO_REUSEPORT:
2180                 optname = SO_REUSEPORT;
2181                 break;
2182 #endif
2183         case TARGET_SO_TYPE:
2184 		optname = SO_TYPE;
2185 		break;
2186         case TARGET_SO_ERROR:
2187 		optname = SO_ERROR;
2188 		break;
2189         case TARGET_SO_DONTROUTE:
2190 		optname = SO_DONTROUTE;
2191 		break;
2192         case TARGET_SO_BROADCAST:
2193 		optname = SO_BROADCAST;
2194 		break;
2195         case TARGET_SO_SNDBUF:
2196 		optname = SO_SNDBUF;
2197 		break;
2198         case TARGET_SO_SNDBUFFORCE:
2199                 optname = SO_SNDBUFFORCE;
2200                 break;
2201         case TARGET_SO_RCVBUF:
2202 		optname = SO_RCVBUF;
2203 		break;
2204         case TARGET_SO_RCVBUFFORCE:
2205                 optname = SO_RCVBUFFORCE;
2206                 break;
2207         case TARGET_SO_KEEPALIVE:
2208 		optname = SO_KEEPALIVE;
2209 		break;
2210         case TARGET_SO_OOBINLINE:
2211 		optname = SO_OOBINLINE;
2212 		break;
2213         case TARGET_SO_NO_CHECK:
2214 		optname = SO_NO_CHECK;
2215 		break;
2216         case TARGET_SO_PRIORITY:
2217 		optname = SO_PRIORITY;
2218 		break;
2219 #ifdef SO_BSDCOMPAT
2220         case TARGET_SO_BSDCOMPAT:
2221 		optname = SO_BSDCOMPAT;
2222 		break;
2223 #endif
2224         case TARGET_SO_PASSCRED:
2225 		optname = SO_PASSCRED;
2226 		break;
2227         case TARGET_SO_PASSSEC:
2228                 optname = SO_PASSSEC;
2229                 break;
2230         case TARGET_SO_TIMESTAMP:
2231 		optname = SO_TIMESTAMP;
2232 		break;
2233         case TARGET_SO_RCVLOWAT:
2234 		optname = SO_RCVLOWAT;
2235 		break;
2236         default:
2237             goto unimplemented;
2238         }
2239 	if (optlen < sizeof(uint32_t))
2240             return -TARGET_EINVAL;
2241 
2242 	if (get_user_u32(val, optval_addr))
2243             return -TARGET_EFAULT;
2244 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2245         break;
2246     default:
2247     unimplemented:
2248         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2249         ret = -TARGET_ENOPROTOOPT;
2250     }
2251     return ret;
2252 }
2253 
2254 /* do_getsockopt() Must return target values and target errnos. */
2255 static abi_long do_getsockopt(int sockfd, int level, int optname,
2256                               abi_ulong optval_addr, abi_ulong optlen)
2257 {
2258     abi_long ret;
2259     int len, val;
2260     socklen_t lv;
2261 
2262     switch(level) {
2263     case TARGET_SOL_SOCKET:
2264         level = SOL_SOCKET;
2265         switch (optname) {
2266         /* These don't just return a single integer */
2267         case TARGET_SO_RCVTIMEO:
2268         case TARGET_SO_SNDTIMEO:
2269         case TARGET_SO_PEERNAME:
2270             goto unimplemented;
2271         case TARGET_SO_PEERCRED: {
2272             struct ucred cr;
2273             socklen_t crlen;
2274             struct target_ucred *tcr;
2275 
2276             if (get_user_u32(len, optlen)) {
2277                 return -TARGET_EFAULT;
2278             }
2279             if (len < 0) {
2280                 return -TARGET_EINVAL;
2281             }
2282 
2283             crlen = sizeof(cr);
2284             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2285                                        &cr, &crlen));
2286             if (ret < 0) {
2287                 return ret;
2288             }
2289             if (len > crlen) {
2290                 len = crlen;
2291             }
2292             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2293                 return -TARGET_EFAULT;
2294             }
2295             __put_user(cr.pid, &tcr->pid);
2296             __put_user(cr.uid, &tcr->uid);
2297             __put_user(cr.gid, &tcr->gid);
2298             unlock_user_struct(tcr, optval_addr, 1);
2299             if (put_user_u32(len, optlen)) {
2300                 return -TARGET_EFAULT;
2301             }
2302             break;
2303         }
2304         case TARGET_SO_LINGER:
2305         {
2306             struct linger lg;
2307             socklen_t lglen;
2308             struct target_linger *tlg;
2309 
2310             if (get_user_u32(len, optlen)) {
2311                 return -TARGET_EFAULT;
2312             }
2313             if (len < 0) {
2314                 return -TARGET_EINVAL;
2315             }
2316 
2317             lglen = sizeof(lg);
2318             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2319                                        &lg, &lglen));
2320             if (ret < 0) {
2321                 return ret;
2322             }
2323             if (len > lglen) {
2324                 len = lglen;
2325             }
2326             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2327                 return -TARGET_EFAULT;
2328             }
2329             __put_user(lg.l_onoff, &tlg->l_onoff);
2330             __put_user(lg.l_linger, &tlg->l_linger);
2331             unlock_user_struct(tlg, optval_addr, 1);
2332             if (put_user_u32(len, optlen)) {
2333                 return -TARGET_EFAULT;
2334             }
2335             break;
2336         }
2337         /* Options with 'int' argument.  */
2338         case TARGET_SO_DEBUG:
2339             optname = SO_DEBUG;
2340             goto int_case;
2341         case TARGET_SO_REUSEADDR:
2342             optname = SO_REUSEADDR;
2343             goto int_case;
2344 #ifdef SO_REUSEPORT
2345         case TARGET_SO_REUSEPORT:
2346             optname = SO_REUSEPORT;
2347             goto int_case;
2348 #endif
2349         case TARGET_SO_TYPE:
2350             optname = SO_TYPE;
2351             goto int_case;
2352         case TARGET_SO_ERROR:
2353             optname = SO_ERROR;
2354             goto int_case;
2355         case TARGET_SO_DONTROUTE:
2356             optname = SO_DONTROUTE;
2357             goto int_case;
2358         case TARGET_SO_BROADCAST:
2359             optname = SO_BROADCAST;
2360             goto int_case;
2361         case TARGET_SO_SNDBUF:
2362             optname = SO_SNDBUF;
2363             goto int_case;
2364         case TARGET_SO_RCVBUF:
2365             optname = SO_RCVBUF;
2366             goto int_case;
2367         case TARGET_SO_KEEPALIVE:
2368             optname = SO_KEEPALIVE;
2369             goto int_case;
2370         case TARGET_SO_OOBINLINE:
2371             optname = SO_OOBINLINE;
2372             goto int_case;
2373         case TARGET_SO_NO_CHECK:
2374             optname = SO_NO_CHECK;
2375             goto int_case;
2376         case TARGET_SO_PRIORITY:
2377             optname = SO_PRIORITY;
2378             goto int_case;
2379 #ifdef SO_BSDCOMPAT
2380         case TARGET_SO_BSDCOMPAT:
2381             optname = SO_BSDCOMPAT;
2382             goto int_case;
2383 #endif
2384         case TARGET_SO_PASSCRED:
2385             optname = SO_PASSCRED;
2386             goto int_case;
2387         case TARGET_SO_TIMESTAMP:
2388             optname = SO_TIMESTAMP;
2389             goto int_case;
2390         case TARGET_SO_RCVLOWAT:
2391             optname = SO_RCVLOWAT;
2392             goto int_case;
2393         case TARGET_SO_ACCEPTCONN:
2394             optname = SO_ACCEPTCONN;
2395             goto int_case;
2396         default:
2397             goto int_case;
2398         }
2399         break;
2400     case SOL_TCP:
2401         /* TCP options all take an 'int' value.  */
2402     int_case:
2403         if (get_user_u32(len, optlen))
2404             return -TARGET_EFAULT;
2405         if (len < 0)
2406             return -TARGET_EINVAL;
2407         lv = sizeof(lv);
2408         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2409         if (ret < 0)
2410             return ret;
2411         if (optname == SO_TYPE) {
2412             val = host_to_target_sock_type(val);
2413         }
2414         if (len > lv)
2415             len = lv;
2416         if (len == 4) {
2417             if (put_user_u32(val, optval_addr))
2418                 return -TARGET_EFAULT;
2419         } else {
2420             if (put_user_u8(val, optval_addr))
2421                 return -TARGET_EFAULT;
2422         }
2423         if (put_user_u32(len, optlen))
2424             return -TARGET_EFAULT;
2425         break;
2426     case SOL_IP:
2427         switch(optname) {
2428         case IP_TOS:
2429         case IP_TTL:
2430         case IP_HDRINCL:
2431         case IP_ROUTER_ALERT:
2432         case IP_RECVOPTS:
2433         case IP_RETOPTS:
2434         case IP_PKTINFO:
2435         case IP_MTU_DISCOVER:
2436         case IP_RECVERR:
2437         case IP_RECVTOS:
2438 #ifdef IP_FREEBIND
2439         case IP_FREEBIND:
2440 #endif
2441         case IP_MULTICAST_TTL:
2442         case IP_MULTICAST_LOOP:
2443             if (get_user_u32(len, optlen))
2444                 return -TARGET_EFAULT;
2445             if (len < 0)
2446                 return -TARGET_EINVAL;
2447             lv = sizeof(lv);
2448             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2449             if (ret < 0)
2450                 return ret;
2451             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2452                 len = 1;
2453                 if (put_user_u32(len, optlen)
2454                     || put_user_u8(val, optval_addr))
2455                     return -TARGET_EFAULT;
2456             } else {
2457                 if (len > sizeof(int))
2458                     len = sizeof(int);
2459                 if (put_user_u32(len, optlen)
2460                     || put_user_u32(val, optval_addr))
2461                     return -TARGET_EFAULT;
2462             }
2463             break;
2464         default:
2465             ret = -TARGET_ENOPROTOOPT;
2466             break;
2467         }
2468         break;
2469     case SOL_IPV6:
2470         switch (optname) {
2471         case IPV6_MTU_DISCOVER:
2472         case IPV6_MTU:
2473         case IPV6_V6ONLY:
2474         case IPV6_RECVPKTINFO:
2475         case IPV6_UNICAST_HOPS:
2476         case IPV6_MULTICAST_HOPS:
2477         case IPV6_MULTICAST_LOOP:
2478         case IPV6_RECVERR:
2479         case IPV6_RECVHOPLIMIT:
2480         case IPV6_2292HOPLIMIT:
2481         case IPV6_CHECKSUM:
2482         case IPV6_ADDRFORM:
2483         case IPV6_2292PKTINFO:
2484         case IPV6_RECVTCLASS:
2485         case IPV6_RECVRTHDR:
2486         case IPV6_2292RTHDR:
2487         case IPV6_RECVHOPOPTS:
2488         case IPV6_2292HOPOPTS:
2489         case IPV6_RECVDSTOPTS:
2490         case IPV6_2292DSTOPTS:
2491         case IPV6_TCLASS:
2492 #ifdef IPV6_RECVPATHMTU
2493         case IPV6_RECVPATHMTU:
2494 #endif
2495 #ifdef IPV6_TRANSPARENT
2496         case IPV6_TRANSPARENT:
2497 #endif
2498 #ifdef IPV6_FREEBIND
2499         case IPV6_FREEBIND:
2500 #endif
2501 #ifdef IPV6_RECVORIGDSTADDR
2502         case IPV6_RECVORIGDSTADDR:
2503 #endif
2504             if (get_user_u32(len, optlen))
2505                 return -TARGET_EFAULT;
2506             if (len < 0)
2507                 return -TARGET_EINVAL;
2508             lv = sizeof(lv);
2509             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2510             if (ret < 0)
2511                 return ret;
2512             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2513                 len = 1;
2514                 if (put_user_u32(len, optlen)
2515                     || put_user_u8(val, optval_addr))
2516                     return -TARGET_EFAULT;
2517             } else {
2518                 if (len > sizeof(int))
2519                     len = sizeof(int);
2520                 if (put_user_u32(len, optlen)
2521                     || put_user_u32(val, optval_addr))
2522                     return -TARGET_EFAULT;
2523             }
2524             break;
2525         default:
2526             ret = -TARGET_ENOPROTOOPT;
2527             break;
2528         }
2529         break;
2530     default:
2531     unimplemented:
2532         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2533                  level, optname);
2534         ret = -TARGET_EOPNOTSUPP;
2535         break;
2536     }
2537     return ret;
2538 }
2539 
2540 /* Convert target low/high pair representing file offset into the host
2541  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2542  * as the kernel doesn't handle them either.
2543  */
2544 static void target_to_host_low_high(abi_ulong tlow,
2545                                     abi_ulong thigh,
2546                                     unsigned long *hlow,
2547                                     unsigned long *hhigh)
2548 {
2549     uint64_t off = tlow |
2550         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2551         TARGET_LONG_BITS / 2;
2552 
2553     *hlow = off;
2554     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2555 }
2556 
2557 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2558                                 abi_ulong count, int copy)
2559 {
2560     struct target_iovec *target_vec;
2561     struct iovec *vec;
2562     abi_ulong total_len, max_len;
2563     int i;
2564     int err = 0;
2565     bool bad_address = false;
2566 
2567     if (count == 0) {
2568         errno = 0;
2569         return NULL;
2570     }
2571     if (count > IOV_MAX) {
2572         errno = EINVAL;
2573         return NULL;
2574     }
2575 
2576     vec = g_try_new0(struct iovec, count);
2577     if (vec == NULL) {
2578         errno = ENOMEM;
2579         return NULL;
2580     }
2581 
2582     target_vec = lock_user(VERIFY_READ, target_addr,
2583                            count * sizeof(struct target_iovec), 1);
2584     if (target_vec == NULL) {
2585         err = EFAULT;
2586         goto fail2;
2587     }
2588 
2589     /* ??? If host page size > target page size, this will result in a
2590        value larger than what we can actually support.  */
2591     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2592     total_len = 0;
2593 
2594     for (i = 0; i < count; i++) {
2595         abi_ulong base = tswapal(target_vec[i].iov_base);
2596         abi_long len = tswapal(target_vec[i].iov_len);
2597 
2598         if (len < 0) {
2599             err = EINVAL;
2600             goto fail;
2601         } else if (len == 0) {
2602             /* Zero length pointer is ignored.  */
2603             vec[i].iov_base = 0;
2604         } else {
2605             vec[i].iov_base = lock_user(type, base, len, copy);
2606             /* If the first buffer pointer is bad, this is a fault.  But
2607              * subsequent bad buffers will result in a partial write; this
2608              * is realized by filling the vector with null pointers and
2609              * zero lengths. */
2610             if (!vec[i].iov_base) {
2611                 if (i == 0) {
2612                     err = EFAULT;
2613                     goto fail;
2614                 } else {
2615                     bad_address = true;
2616                 }
2617             }
2618             if (bad_address) {
2619                 len = 0;
2620             }
2621             if (len > max_len - total_len) {
2622                 len = max_len - total_len;
2623             }
2624         }
2625         vec[i].iov_len = len;
2626         total_len += len;
2627     }
2628 
2629     unlock_user(target_vec, target_addr, 0);
2630     return vec;
2631 
2632  fail:
2633     while (--i >= 0) {
2634         if (tswapal(target_vec[i].iov_len) > 0) {
2635             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2636         }
2637     }
2638     unlock_user(target_vec, target_addr, 0);
2639  fail2:
2640     g_free(vec);
2641     errno = err;
2642     return NULL;
2643 }
2644 
2645 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2646                          abi_ulong count, int copy)
2647 {
2648     struct target_iovec *target_vec;
2649     int i;
2650 
2651     target_vec = lock_user(VERIFY_READ, target_addr,
2652                            count * sizeof(struct target_iovec), 1);
2653     if (target_vec) {
2654         for (i = 0; i < count; i++) {
2655             abi_ulong base = tswapal(target_vec[i].iov_base);
2656             abi_long len = tswapal(target_vec[i].iov_len);
2657             if (len < 0) {
2658                 break;
2659             }
2660             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2661         }
2662         unlock_user(target_vec, target_addr, 0);
2663     }
2664 
2665     g_free(vec);
2666 }
2667 
2668 static inline int target_to_host_sock_type(int *type)
2669 {
2670     int host_type = 0;
2671     int target_type = *type;
2672 
2673     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2674     case TARGET_SOCK_DGRAM:
2675         host_type = SOCK_DGRAM;
2676         break;
2677     case TARGET_SOCK_STREAM:
2678         host_type = SOCK_STREAM;
2679         break;
2680     default:
2681         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2682         break;
2683     }
2684     if (target_type & TARGET_SOCK_CLOEXEC) {
2685 #if defined(SOCK_CLOEXEC)
2686         host_type |= SOCK_CLOEXEC;
2687 #else
2688         return -TARGET_EINVAL;
2689 #endif
2690     }
2691     if (target_type & TARGET_SOCK_NONBLOCK) {
2692 #if defined(SOCK_NONBLOCK)
2693         host_type |= SOCK_NONBLOCK;
2694 #elif !defined(O_NONBLOCK)
2695         return -TARGET_EINVAL;
2696 #endif
2697     }
2698     *type = host_type;
2699     return 0;
2700 }
2701 
2702 /* Try to emulate socket type flags after socket creation.  */
2703 static int sock_flags_fixup(int fd, int target_type)
2704 {
2705 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2706     if (target_type & TARGET_SOCK_NONBLOCK) {
2707         int flags = fcntl(fd, F_GETFL);
2708         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2709             close(fd);
2710             return -TARGET_EINVAL;
2711         }
2712     }
2713 #endif
2714     return fd;
2715 }
2716 
2717 /* do_socket() Must return target values and target errnos. */
2718 static abi_long do_socket(int domain, int type, int protocol)
2719 {
2720     int target_type = type;
2721     int ret;
2722 
2723     ret = target_to_host_sock_type(&type);
2724     if (ret) {
2725         return ret;
2726     }
2727 
2728     if (domain == PF_NETLINK && !(
2729 #ifdef CONFIG_RTNETLINK
2730          protocol == NETLINK_ROUTE ||
2731 #endif
2732          protocol == NETLINK_KOBJECT_UEVENT ||
2733          protocol == NETLINK_AUDIT)) {
2734         return -EPFNOSUPPORT;
2735     }
2736 
2737     if (domain == AF_PACKET ||
2738         (domain == AF_INET && type == SOCK_PACKET)) {
2739         protocol = tswap16(protocol);
2740     }
2741 
2742     ret = get_errno(socket(domain, type, protocol));
2743     if (ret >= 0) {
2744         ret = sock_flags_fixup(ret, target_type);
2745         if (type == SOCK_PACKET) {
2746             /* Manage an obsolete case :
2747              * if socket type is SOCK_PACKET, bind by name
2748              */
2749             fd_trans_register(ret, &target_packet_trans);
2750         } else if (domain == PF_NETLINK) {
2751             switch (protocol) {
2752 #ifdef CONFIG_RTNETLINK
2753             case NETLINK_ROUTE:
2754                 fd_trans_register(ret, &target_netlink_route_trans);
2755                 break;
2756 #endif
2757             case NETLINK_KOBJECT_UEVENT:
2758                 /* nothing to do: messages are strings */
2759                 break;
2760             case NETLINK_AUDIT:
2761                 fd_trans_register(ret, &target_netlink_audit_trans);
2762                 break;
2763             default:
2764                 g_assert_not_reached();
2765             }
2766         }
2767     }
2768     return ret;
2769 }
2770 
2771 /* do_bind() Must return target values and target errnos. */
2772 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2773                         socklen_t addrlen)
2774 {
2775     void *addr;
2776     abi_long ret;
2777 
2778     if ((int)addrlen < 0) {
2779         return -TARGET_EINVAL;
2780     }
2781 
2782     addr = alloca(addrlen+1);
2783 
2784     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2785     if (ret)
2786         return ret;
2787 
2788     return get_errno(bind(sockfd, addr, addrlen));
2789 }
2790 
2791 /* do_connect() Must return target values and target errnos. */
2792 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2793                            socklen_t addrlen)
2794 {
2795     void *addr;
2796     abi_long ret;
2797 
2798     if ((int)addrlen < 0) {
2799         return -TARGET_EINVAL;
2800     }
2801 
2802     addr = alloca(addrlen+1);
2803 
2804     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2805     if (ret)
2806         return ret;
2807 
2808     return get_errno(safe_connect(sockfd, addr, addrlen));
2809 }
2810 
2811 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2812 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2813                                       int flags, int send)
2814 {
2815     abi_long ret, len;
2816     struct msghdr msg;
2817     abi_ulong count;
2818     struct iovec *vec;
2819     abi_ulong target_vec;
2820 
2821     if (msgp->msg_name) {
2822         msg.msg_namelen = tswap32(msgp->msg_namelen);
2823         msg.msg_name = alloca(msg.msg_namelen+1);
2824         ret = target_to_host_sockaddr(fd, msg.msg_name,
2825                                       tswapal(msgp->msg_name),
2826                                       msg.msg_namelen);
2827         if (ret == -TARGET_EFAULT) {
2828             /* For connected sockets msg_name and msg_namelen must
2829              * be ignored, so returning EFAULT immediately is wrong.
2830              * Instead, pass a bad msg_name to the host kernel, and
2831              * let it decide whether to return EFAULT or not.
2832              */
2833             msg.msg_name = (void *)-1;
2834         } else if (ret) {
2835             goto out2;
2836         }
2837     } else {
2838         msg.msg_name = NULL;
2839         msg.msg_namelen = 0;
2840     }
2841     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2842     msg.msg_control = alloca(msg.msg_controllen);
2843     memset(msg.msg_control, 0, msg.msg_controllen);
2844 
2845     msg.msg_flags = tswap32(msgp->msg_flags);
2846 
2847     count = tswapal(msgp->msg_iovlen);
2848     target_vec = tswapal(msgp->msg_iov);
2849 
2850     if (count > IOV_MAX) {
2851         /* sendrcvmsg returns a different errno for this condition than
2852          * readv/writev, so we must catch it here before lock_iovec() does.
2853          */
2854         ret = -TARGET_EMSGSIZE;
2855         goto out2;
2856     }
2857 
2858     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2859                      target_vec, count, send);
2860     if (vec == NULL) {
2861         ret = -host_to_target_errno(errno);
2862         goto out2;
2863     }
2864     msg.msg_iovlen = count;
2865     msg.msg_iov = vec;
2866 
2867     if (send) {
2868         if (fd_trans_target_to_host_data(fd)) {
2869             void *host_msg;
2870 
2871             host_msg = g_malloc(msg.msg_iov->iov_len);
2872             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2873             ret = fd_trans_target_to_host_data(fd)(host_msg,
2874                                                    msg.msg_iov->iov_len);
2875             if (ret >= 0) {
2876                 msg.msg_iov->iov_base = host_msg;
2877                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2878             }
2879             g_free(host_msg);
2880         } else {
2881             ret = target_to_host_cmsg(&msg, msgp);
2882             if (ret == 0) {
2883                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2884             }
2885         }
2886     } else {
2887         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2888         if (!is_error(ret)) {
2889             len = ret;
2890             if (fd_trans_host_to_target_data(fd)) {
2891                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2892                                                MIN(msg.msg_iov->iov_len, len));
2893             } else {
2894                 ret = host_to_target_cmsg(msgp, &msg);
2895             }
2896             if (!is_error(ret)) {
2897                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2898                 msgp->msg_flags = tswap32(msg.msg_flags);
2899                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2900                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2901                                     msg.msg_name, msg.msg_namelen);
2902                     if (ret) {
2903                         goto out;
2904                     }
2905                 }
2906 
2907                 ret = len;
2908             }
2909         }
2910     }
2911 
2912 out:
2913     unlock_iovec(vec, target_vec, count, !send);
2914 out2:
2915     return ret;
2916 }
2917 
2918 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2919                                int flags, int send)
2920 {
2921     abi_long ret;
2922     struct target_msghdr *msgp;
2923 
2924     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2925                           msgp,
2926                           target_msg,
2927                           send ? 1 : 0)) {
2928         return -TARGET_EFAULT;
2929     }
2930     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2931     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2932     return ret;
2933 }
2934 
2935 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2936  * so it might not have this *mmsg-specific flag either.
2937  */
2938 #ifndef MSG_WAITFORONE
2939 #define MSG_WAITFORONE 0x10000
2940 #endif
2941 
2942 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2943                                 unsigned int vlen, unsigned int flags,
2944                                 int send)
2945 {
2946     struct target_mmsghdr *mmsgp;
2947     abi_long ret = 0;
2948     int i;
2949 
2950     if (vlen > UIO_MAXIOV) {
2951         vlen = UIO_MAXIOV;
2952     }
2953 
2954     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2955     if (!mmsgp) {
2956         return -TARGET_EFAULT;
2957     }
2958 
2959     for (i = 0; i < vlen; i++) {
2960         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2961         if (is_error(ret)) {
2962             break;
2963         }
2964         mmsgp[i].msg_len = tswap32(ret);
2965         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2966         if (flags & MSG_WAITFORONE) {
2967             flags |= MSG_DONTWAIT;
2968         }
2969     }
2970 
2971     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2972 
2973     /* Return number of datagrams sent if we sent any at all;
2974      * otherwise return the error.
2975      */
2976     if (i) {
2977         return i;
2978     }
2979     return ret;
2980 }
2981 
2982 /* do_accept4() Must return target values and target errnos. */
2983 static abi_long do_accept4(int fd, abi_ulong target_addr,
2984                            abi_ulong target_addrlen_addr, int flags)
2985 {
2986     socklen_t addrlen, ret_addrlen;
2987     void *addr;
2988     abi_long ret;
2989     int host_flags;
2990 
2991     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2992 
2993     if (target_addr == 0) {
2994         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2995     }
2996 
2997     /* linux returns EINVAL if addrlen pointer is invalid */
2998     if (get_user_u32(addrlen, target_addrlen_addr))
2999         return -TARGET_EINVAL;
3000 
3001     if ((int)addrlen < 0) {
3002         return -TARGET_EINVAL;
3003     }
3004 
3005     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3006         return -TARGET_EINVAL;
3007 
3008     addr = alloca(addrlen);
3009 
3010     ret_addrlen = addrlen;
3011     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3012     if (!is_error(ret)) {
3013         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3014         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3015             ret = -TARGET_EFAULT;
3016         }
3017     }
3018     return ret;
3019 }
3020 
3021 /* do_getpeername() Must return target values and target errnos. */
3022 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3023                                abi_ulong target_addrlen_addr)
3024 {
3025     socklen_t addrlen, ret_addrlen;
3026     void *addr;
3027     abi_long ret;
3028 
3029     if (get_user_u32(addrlen, target_addrlen_addr))
3030         return -TARGET_EFAULT;
3031 
3032     if ((int)addrlen < 0) {
3033         return -TARGET_EINVAL;
3034     }
3035 
3036     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3037         return -TARGET_EFAULT;
3038 
3039     addr = alloca(addrlen);
3040 
3041     ret_addrlen = addrlen;
3042     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3043     if (!is_error(ret)) {
3044         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3045         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3046             ret = -TARGET_EFAULT;
3047         }
3048     }
3049     return ret;
3050 }
3051 
3052 /* do_getsockname() Must return target values and target errnos. */
3053 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3054                                abi_ulong target_addrlen_addr)
3055 {
3056     socklen_t addrlen, ret_addrlen;
3057     void *addr;
3058     abi_long ret;
3059 
3060     if (get_user_u32(addrlen, target_addrlen_addr))
3061         return -TARGET_EFAULT;
3062 
3063     if ((int)addrlen < 0) {
3064         return -TARGET_EINVAL;
3065     }
3066 
3067     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3068         return -TARGET_EFAULT;
3069 
3070     addr = alloca(addrlen);
3071 
3072     ret_addrlen = addrlen;
3073     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3074     if (!is_error(ret)) {
3075         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3076         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3077             ret = -TARGET_EFAULT;
3078         }
3079     }
3080     return ret;
3081 }
3082 
3083 /* do_socketpair() Must return target values and target errnos. */
3084 static abi_long do_socketpair(int domain, int type, int protocol,
3085                               abi_ulong target_tab_addr)
3086 {
3087     int tab[2];
3088     abi_long ret;
3089 
3090     target_to_host_sock_type(&type);
3091 
3092     ret = get_errno(socketpair(domain, type, protocol, tab));
3093     if (!is_error(ret)) {
3094         if (put_user_s32(tab[0], target_tab_addr)
3095             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3096             ret = -TARGET_EFAULT;
3097     }
3098     return ret;
3099 }
3100 
3101 /* do_sendto() Must return target values and target errnos. */
3102 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3103                           abi_ulong target_addr, socklen_t addrlen)
3104 {
3105     void *addr;
3106     void *host_msg;
3107     void *copy_msg = NULL;
3108     abi_long ret;
3109 
3110     if ((int)addrlen < 0) {
3111         return -TARGET_EINVAL;
3112     }
3113 
3114     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3115     if (!host_msg)
3116         return -TARGET_EFAULT;
3117     if (fd_trans_target_to_host_data(fd)) {
3118         copy_msg = host_msg;
3119         host_msg = g_malloc(len);
3120         memcpy(host_msg, copy_msg, len);
3121         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3122         if (ret < 0) {
3123             goto fail;
3124         }
3125     }
3126     if (target_addr) {
3127         addr = alloca(addrlen+1);
3128         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3129         if (ret) {
3130             goto fail;
3131         }
3132         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3133     } else {
3134         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3135     }
3136 fail:
3137     if (copy_msg) {
3138         g_free(host_msg);
3139         host_msg = copy_msg;
3140     }
3141     unlock_user(host_msg, msg, 0);
3142     return ret;
3143 }
3144 
3145 /* do_recvfrom() Must return target values and target errnos. */
3146 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3147                             abi_ulong target_addr,
3148                             abi_ulong target_addrlen)
3149 {
3150     socklen_t addrlen, ret_addrlen;
3151     void *addr;
3152     void *host_msg;
3153     abi_long ret;
3154 
3155     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3156     if (!host_msg)
3157         return -TARGET_EFAULT;
3158     if (target_addr) {
3159         if (get_user_u32(addrlen, target_addrlen)) {
3160             ret = -TARGET_EFAULT;
3161             goto fail;
3162         }
3163         if ((int)addrlen < 0) {
3164             ret = -TARGET_EINVAL;
3165             goto fail;
3166         }
3167         addr = alloca(addrlen);
3168         ret_addrlen = addrlen;
3169         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3170                                       addr, &ret_addrlen));
3171     } else {
3172         addr = NULL; /* To keep compiler quiet.  */
3173         addrlen = 0; /* To keep compiler quiet.  */
3174         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3175     }
3176     if (!is_error(ret)) {
3177         if (fd_trans_host_to_target_data(fd)) {
3178             abi_long trans;
3179             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3180             if (is_error(trans)) {
3181                 ret = trans;
3182                 goto fail;
3183             }
3184         }
3185         if (target_addr) {
3186             host_to_target_sockaddr(target_addr, addr,
3187                                     MIN(addrlen, ret_addrlen));
3188             if (put_user_u32(ret_addrlen, target_addrlen)) {
3189                 ret = -TARGET_EFAULT;
3190                 goto fail;
3191             }
3192         }
3193         unlock_user(host_msg, msg, len);
3194     } else {
3195 fail:
3196         unlock_user(host_msg, msg, 0);
3197     }
3198     return ret;
3199 }
3200 
3201 #ifdef TARGET_NR_socketcall
3202 /* do_socketcall() must return target values and target errnos. */
3203 static abi_long do_socketcall(int num, abi_ulong vptr)
3204 {
3205     static const unsigned nargs[] = { /* number of arguments per operation */
3206         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3207         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3208         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3209         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3210         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3211         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3212         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3213         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3214         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3215         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3216         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3217         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3218         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3219         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3220         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3221         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3222         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3223         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3224         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3225         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3226     };
3227     abi_long a[6]; /* max 6 args */
3228     unsigned i;
3229 
3230     /* check the range of the first argument num */
3231     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3232     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3233         return -TARGET_EINVAL;
3234     }
3235     /* ensure we have space for args */
3236     if (nargs[num] > ARRAY_SIZE(a)) {
3237         return -TARGET_EINVAL;
3238     }
3239     /* collect the arguments in a[] according to nargs[] */
3240     for (i = 0; i < nargs[num]; ++i) {
3241         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3242             return -TARGET_EFAULT;
3243         }
3244     }
3245     /* now when we have the args, invoke the appropriate underlying function */
3246     switch (num) {
3247     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3248         return do_socket(a[0], a[1], a[2]);
3249     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3250         return do_bind(a[0], a[1], a[2]);
3251     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3252         return do_connect(a[0], a[1], a[2]);
3253     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3254         return get_errno(listen(a[0], a[1]));
3255     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3256         return do_accept4(a[0], a[1], a[2], 0);
3257     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3258         return do_getsockname(a[0], a[1], a[2]);
3259     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3260         return do_getpeername(a[0], a[1], a[2]);
3261     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3262         return do_socketpair(a[0], a[1], a[2], a[3]);
3263     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3264         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3265     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3266         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3267     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3268         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3269     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3270         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3271     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3272         return get_errno(shutdown(a[0], a[1]));
3273     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3274         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3275     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3276         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3277     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3278         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3279     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3280         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3281     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3282         return do_accept4(a[0], a[1], a[2], a[3]);
3283     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3284         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3285     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3286         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3287     default:
3288         gemu_log("Unsupported socketcall: %d\n", num);
3289         return -TARGET_EINVAL;
3290     }
3291 }
3292 #endif
3293 
3294 #define N_SHM_REGIONS	32
3295 
3296 static struct shm_region {
3297     abi_ulong start;
3298     abi_ulong size;
3299     bool in_use;
3300 } shm_regions[N_SHM_REGIONS];
3301 
3302 #ifndef TARGET_SEMID64_DS
3303 /* asm-generic version of this struct */
3304 struct target_semid64_ds
3305 {
3306   struct target_ipc_perm sem_perm;
3307   abi_ulong sem_otime;
3308 #if TARGET_ABI_BITS == 32
3309   abi_ulong __unused1;
3310 #endif
3311   abi_ulong sem_ctime;
3312 #if TARGET_ABI_BITS == 32
3313   abi_ulong __unused2;
3314 #endif
3315   abi_ulong sem_nsems;
3316   abi_ulong __unused3;
3317   abi_ulong __unused4;
3318 };
3319 #endif
3320 
3321 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3322                                                abi_ulong target_addr)
3323 {
3324     struct target_ipc_perm *target_ip;
3325     struct target_semid64_ds *target_sd;
3326 
3327     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3328         return -TARGET_EFAULT;
3329     target_ip = &(target_sd->sem_perm);
3330     host_ip->__key = tswap32(target_ip->__key);
3331     host_ip->uid = tswap32(target_ip->uid);
3332     host_ip->gid = tswap32(target_ip->gid);
3333     host_ip->cuid = tswap32(target_ip->cuid);
3334     host_ip->cgid = tswap32(target_ip->cgid);
3335 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3336     host_ip->mode = tswap32(target_ip->mode);
3337 #else
3338     host_ip->mode = tswap16(target_ip->mode);
3339 #endif
3340 #if defined(TARGET_PPC)
3341     host_ip->__seq = tswap32(target_ip->__seq);
3342 #else
3343     host_ip->__seq = tswap16(target_ip->__seq);
3344 #endif
3345     unlock_user_struct(target_sd, target_addr, 0);
3346     return 0;
3347 }
3348 
3349 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3350                                                struct ipc_perm *host_ip)
3351 {
3352     struct target_ipc_perm *target_ip;
3353     struct target_semid64_ds *target_sd;
3354 
3355     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3356         return -TARGET_EFAULT;
3357     target_ip = &(target_sd->sem_perm);
3358     target_ip->__key = tswap32(host_ip->__key);
3359     target_ip->uid = tswap32(host_ip->uid);
3360     target_ip->gid = tswap32(host_ip->gid);
3361     target_ip->cuid = tswap32(host_ip->cuid);
3362     target_ip->cgid = tswap32(host_ip->cgid);
3363 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3364     target_ip->mode = tswap32(host_ip->mode);
3365 #else
3366     target_ip->mode = tswap16(host_ip->mode);
3367 #endif
3368 #if defined(TARGET_PPC)
3369     target_ip->__seq = tswap32(host_ip->__seq);
3370 #else
3371     target_ip->__seq = tswap16(host_ip->__seq);
3372 #endif
3373     unlock_user_struct(target_sd, target_addr, 1);
3374     return 0;
3375 }
3376 
3377 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3378                                                abi_ulong target_addr)
3379 {
3380     struct target_semid64_ds *target_sd;
3381 
3382     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3383         return -TARGET_EFAULT;
3384     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3385         return -TARGET_EFAULT;
3386     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3387     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3388     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3389     unlock_user_struct(target_sd, target_addr, 0);
3390     return 0;
3391 }
3392 
3393 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3394                                                struct semid_ds *host_sd)
3395 {
3396     struct target_semid64_ds *target_sd;
3397 
3398     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3399         return -TARGET_EFAULT;
3400     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3401         return -TARGET_EFAULT;
3402     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3403     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3404     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3405     unlock_user_struct(target_sd, target_addr, 1);
3406     return 0;
3407 }
3408 
3409 struct target_seminfo {
3410     int semmap;
3411     int semmni;
3412     int semmns;
3413     int semmnu;
3414     int semmsl;
3415     int semopm;
3416     int semume;
3417     int semusz;
3418     int semvmx;
3419     int semaem;
3420 };
3421 
3422 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3423                                               struct seminfo *host_seminfo)
3424 {
3425     struct target_seminfo *target_seminfo;
3426     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3427         return -TARGET_EFAULT;
3428     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3429     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3430     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3431     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3432     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3433     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3434     __put_user(host_seminfo->semume, &target_seminfo->semume);
3435     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3436     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3437     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3438     unlock_user_struct(target_seminfo, target_addr, 1);
3439     return 0;
3440 }
3441 
3442 union semun {
3443 	int val;
3444 	struct semid_ds *buf;
3445 	unsigned short *array;
3446 	struct seminfo *__buf;
3447 };
3448 
3449 union target_semun {
3450 	int val;
3451 	abi_ulong buf;
3452 	abi_ulong array;
3453 	abi_ulong __buf;
3454 };
3455 
3456 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3457                                                abi_ulong target_addr)
3458 {
3459     int nsems;
3460     unsigned short *array;
3461     union semun semun;
3462     struct semid_ds semid_ds;
3463     int i, ret;
3464 
3465     semun.buf = &semid_ds;
3466 
3467     ret = semctl(semid, 0, IPC_STAT, semun);
3468     if (ret == -1)
3469         return get_errno(ret);
3470 
3471     nsems = semid_ds.sem_nsems;
3472 
3473     *host_array = g_try_new(unsigned short, nsems);
3474     if (!*host_array) {
3475         return -TARGET_ENOMEM;
3476     }
3477     array = lock_user(VERIFY_READ, target_addr,
3478                       nsems*sizeof(unsigned short), 1);
3479     if (!array) {
3480         g_free(*host_array);
3481         return -TARGET_EFAULT;
3482     }
3483 
3484     for(i=0; i<nsems; i++) {
3485         __get_user((*host_array)[i], &array[i]);
3486     }
3487     unlock_user(array, target_addr, 0);
3488 
3489     return 0;
3490 }
3491 
3492 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3493                                                unsigned short **host_array)
3494 {
3495     int nsems;
3496     unsigned short *array;
3497     union semun semun;
3498     struct semid_ds semid_ds;
3499     int i, ret;
3500 
3501     semun.buf = &semid_ds;
3502 
3503     ret = semctl(semid, 0, IPC_STAT, semun);
3504     if (ret == -1)
3505         return get_errno(ret);
3506 
3507     nsems = semid_ds.sem_nsems;
3508 
3509     array = lock_user(VERIFY_WRITE, target_addr,
3510                       nsems*sizeof(unsigned short), 0);
3511     if (!array)
3512         return -TARGET_EFAULT;
3513 
3514     for(i=0; i<nsems; i++) {
3515         __put_user((*host_array)[i], &array[i]);
3516     }
3517     g_free(*host_array);
3518     unlock_user(array, target_addr, 1);
3519 
3520     return 0;
3521 }
3522 
3523 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3524                                  abi_ulong target_arg)
3525 {
3526     union target_semun target_su = { .buf = target_arg };
3527     union semun arg;
3528     struct semid_ds dsarg;
3529     unsigned short *array = NULL;
3530     struct seminfo seminfo;
3531     abi_long ret = -TARGET_EINVAL;
3532     abi_long err;
3533     cmd &= 0xff;
3534 
3535     switch( cmd ) {
3536 	case GETVAL:
3537 	case SETVAL:
3538             /* In 64 bit cross-endian situations, we will erroneously pick up
3539              * the wrong half of the union for the "val" element.  To rectify
3540              * this, the entire 8-byte structure is byteswapped, followed by
3541 	     * a swap of the 4 byte val field. In other cases, the data is
3542 	     * already in proper host byte order. */
3543 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3544 		target_su.buf = tswapal(target_su.buf);
3545 		arg.val = tswap32(target_su.val);
3546 	    } else {
3547 		arg.val = target_su.val;
3548 	    }
3549             ret = get_errno(semctl(semid, semnum, cmd, arg));
3550             break;
3551 	case GETALL:
3552 	case SETALL:
3553             err = target_to_host_semarray(semid, &array, target_su.array);
3554             if (err)
3555                 return err;
3556             arg.array = array;
3557             ret = get_errno(semctl(semid, semnum, cmd, arg));
3558             err = host_to_target_semarray(semid, target_su.array, &array);
3559             if (err)
3560                 return err;
3561             break;
3562 	case IPC_STAT:
3563 	case IPC_SET:
3564 	case SEM_STAT:
3565             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3566             if (err)
3567                 return err;
3568             arg.buf = &dsarg;
3569             ret = get_errno(semctl(semid, semnum, cmd, arg));
3570             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3571             if (err)
3572                 return err;
3573             break;
3574 	case IPC_INFO:
3575 	case SEM_INFO:
3576             arg.__buf = &seminfo;
3577             ret = get_errno(semctl(semid, semnum, cmd, arg));
3578             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3579             if (err)
3580                 return err;
3581             break;
3582 	case IPC_RMID:
3583 	case GETPID:
3584 	case GETNCNT:
3585 	case GETZCNT:
3586             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3587             break;
3588     }
3589 
3590     return ret;
3591 }
3592 
3593 struct target_sembuf {
3594     unsigned short sem_num;
3595     short sem_op;
3596     short sem_flg;
3597 };
3598 
3599 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3600                                              abi_ulong target_addr,
3601                                              unsigned nsops)
3602 {
3603     struct target_sembuf *target_sembuf;
3604     int i;
3605 
3606     target_sembuf = lock_user(VERIFY_READ, target_addr,
3607                               nsops*sizeof(struct target_sembuf), 1);
3608     if (!target_sembuf)
3609         return -TARGET_EFAULT;
3610 
3611     for(i=0; i<nsops; i++) {
3612         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3613         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3614         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3615     }
3616 
3617     unlock_user(target_sembuf, target_addr, 0);
3618 
3619     return 0;
3620 }
3621 
3622 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3623 {
3624     struct sembuf sops[nsops];
3625     abi_long ret;
3626 
3627     if (target_to_host_sembuf(sops, ptr, nsops))
3628         return -TARGET_EFAULT;
3629 
3630     ret = -TARGET_ENOSYS;
3631 #ifdef __NR_semtimedop
3632     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3633 #endif
3634 #ifdef __NR_ipc
3635     if (ret == -TARGET_ENOSYS) {
3636         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3637     }
3638 #endif
3639     return ret;
3640 }
3641 
3642 struct target_msqid_ds
3643 {
3644     struct target_ipc_perm msg_perm;
3645     abi_ulong msg_stime;
3646 #if TARGET_ABI_BITS == 32
3647     abi_ulong __unused1;
3648 #endif
3649     abi_ulong msg_rtime;
3650 #if TARGET_ABI_BITS == 32
3651     abi_ulong __unused2;
3652 #endif
3653     abi_ulong msg_ctime;
3654 #if TARGET_ABI_BITS == 32
3655     abi_ulong __unused3;
3656 #endif
3657     abi_ulong __msg_cbytes;
3658     abi_ulong msg_qnum;
3659     abi_ulong msg_qbytes;
3660     abi_ulong msg_lspid;
3661     abi_ulong msg_lrpid;
3662     abi_ulong __unused4;
3663     abi_ulong __unused5;
3664 };
3665 
3666 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3667                                                abi_ulong target_addr)
3668 {
3669     struct target_msqid_ds *target_md;
3670 
3671     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3672         return -TARGET_EFAULT;
3673     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3674         return -TARGET_EFAULT;
3675     host_md->msg_stime = tswapal(target_md->msg_stime);
3676     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3677     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3678     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3679     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3680     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3681     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3682     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3683     unlock_user_struct(target_md, target_addr, 0);
3684     return 0;
3685 }
3686 
3687 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3688                                                struct msqid_ds *host_md)
3689 {
3690     struct target_msqid_ds *target_md;
3691 
3692     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3693         return -TARGET_EFAULT;
3694     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3695         return -TARGET_EFAULT;
3696     target_md->msg_stime = tswapal(host_md->msg_stime);
3697     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3698     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3699     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3700     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3701     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3702     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3703     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3704     unlock_user_struct(target_md, target_addr, 1);
3705     return 0;
3706 }
3707 
3708 struct target_msginfo {
3709     int msgpool;
3710     int msgmap;
3711     int msgmax;
3712     int msgmnb;
3713     int msgmni;
3714     int msgssz;
3715     int msgtql;
3716     unsigned short int msgseg;
3717 };
3718 
3719 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3720                                               struct msginfo *host_msginfo)
3721 {
3722     struct target_msginfo *target_msginfo;
3723     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3724         return -TARGET_EFAULT;
3725     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3726     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3727     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3728     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3729     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3730     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3731     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3732     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3733     unlock_user_struct(target_msginfo, target_addr, 1);
3734     return 0;
3735 }
3736 
3737 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3738 {
3739     struct msqid_ds dsarg;
3740     struct msginfo msginfo;
3741     abi_long ret = -TARGET_EINVAL;
3742 
3743     cmd &= 0xff;
3744 
3745     switch (cmd) {
3746     case IPC_STAT:
3747     case IPC_SET:
3748     case MSG_STAT:
3749         if (target_to_host_msqid_ds(&dsarg,ptr))
3750             return -TARGET_EFAULT;
3751         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3752         if (host_to_target_msqid_ds(ptr,&dsarg))
3753             return -TARGET_EFAULT;
3754         break;
3755     case IPC_RMID:
3756         ret = get_errno(msgctl(msgid, cmd, NULL));
3757         break;
3758     case IPC_INFO:
3759     case MSG_INFO:
3760         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3761         if (host_to_target_msginfo(ptr, &msginfo))
3762             return -TARGET_EFAULT;
3763         break;
3764     }
3765 
3766     return ret;
3767 }
3768 
3769 struct target_msgbuf {
3770     abi_long mtype;
3771     char	mtext[1];
3772 };
3773 
3774 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3775                                  ssize_t msgsz, int msgflg)
3776 {
3777     struct target_msgbuf *target_mb;
3778     struct msgbuf *host_mb;
3779     abi_long ret = 0;
3780 
3781     if (msgsz < 0) {
3782         return -TARGET_EINVAL;
3783     }
3784 
3785     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3786         return -TARGET_EFAULT;
3787     host_mb = g_try_malloc(msgsz + sizeof(long));
3788     if (!host_mb) {
3789         unlock_user_struct(target_mb, msgp, 0);
3790         return -TARGET_ENOMEM;
3791     }
3792     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3793     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3794     ret = -TARGET_ENOSYS;
3795 #ifdef __NR_msgsnd
3796     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3797 #endif
3798 #ifdef __NR_ipc
3799     if (ret == -TARGET_ENOSYS) {
3800         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3801                                  host_mb, 0));
3802     }
3803 #endif
3804     g_free(host_mb);
3805     unlock_user_struct(target_mb, msgp, 0);
3806 
3807     return ret;
3808 }
3809 
3810 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3811                                  ssize_t msgsz, abi_long msgtyp,
3812                                  int msgflg)
3813 {
3814     struct target_msgbuf *target_mb;
3815     char *target_mtext;
3816     struct msgbuf *host_mb;
3817     abi_long ret = 0;
3818 
3819     if (msgsz < 0) {
3820         return -TARGET_EINVAL;
3821     }
3822 
3823     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3824         return -TARGET_EFAULT;
3825 
3826     host_mb = g_try_malloc(msgsz + sizeof(long));
3827     if (!host_mb) {
3828         ret = -TARGET_ENOMEM;
3829         goto end;
3830     }
3831     ret = -TARGET_ENOSYS;
3832 #ifdef __NR_msgrcv
3833     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3834 #endif
3835 #ifdef __NR_ipc
3836     if (ret == -TARGET_ENOSYS) {
3837         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3838                         msgflg, host_mb, msgtyp));
3839     }
3840 #endif
3841 
3842     if (ret > 0) {
3843         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3844         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3845         if (!target_mtext) {
3846             ret = -TARGET_EFAULT;
3847             goto end;
3848         }
3849         memcpy(target_mb->mtext, host_mb->mtext, ret);
3850         unlock_user(target_mtext, target_mtext_addr, ret);
3851     }
3852 
3853     target_mb->mtype = tswapal(host_mb->mtype);
3854 
3855 end:
3856     if (target_mb)
3857         unlock_user_struct(target_mb, msgp, 1);
3858     g_free(host_mb);
3859     return ret;
3860 }
3861 
3862 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3863                                                abi_ulong target_addr)
3864 {
3865     struct target_shmid_ds *target_sd;
3866 
3867     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3868         return -TARGET_EFAULT;
3869     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3870         return -TARGET_EFAULT;
3871     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3872     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3873     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3874     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3875     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3876     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3877     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3878     unlock_user_struct(target_sd, target_addr, 0);
3879     return 0;
3880 }
3881 
3882 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3883                                                struct shmid_ds *host_sd)
3884 {
3885     struct target_shmid_ds *target_sd;
3886 
3887     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3888         return -TARGET_EFAULT;
3889     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3890         return -TARGET_EFAULT;
3891     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3892     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3893     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3894     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3895     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3896     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3897     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3898     unlock_user_struct(target_sd, target_addr, 1);
3899     return 0;
3900 }
3901 
3902 struct  target_shminfo {
3903     abi_ulong shmmax;
3904     abi_ulong shmmin;
3905     abi_ulong shmmni;
3906     abi_ulong shmseg;
3907     abi_ulong shmall;
3908 };
3909 
3910 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3911                                               struct shminfo *host_shminfo)
3912 {
3913     struct target_shminfo *target_shminfo;
3914     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3915         return -TARGET_EFAULT;
3916     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3917     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3918     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3919     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3920     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3921     unlock_user_struct(target_shminfo, target_addr, 1);
3922     return 0;
3923 }
3924 
3925 struct target_shm_info {
3926     int used_ids;
3927     abi_ulong shm_tot;
3928     abi_ulong shm_rss;
3929     abi_ulong shm_swp;
3930     abi_ulong swap_attempts;
3931     abi_ulong swap_successes;
3932 };
3933 
3934 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3935                                                struct shm_info *host_shm_info)
3936 {
3937     struct target_shm_info *target_shm_info;
3938     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3939         return -TARGET_EFAULT;
3940     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3941     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3942     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3943     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3944     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3945     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3946     unlock_user_struct(target_shm_info, target_addr, 1);
3947     return 0;
3948 }
3949 
3950 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3951 {
3952     struct shmid_ds dsarg;
3953     struct shminfo shminfo;
3954     struct shm_info shm_info;
3955     abi_long ret = -TARGET_EINVAL;
3956 
3957     cmd &= 0xff;
3958 
3959     switch(cmd) {
3960     case IPC_STAT:
3961     case IPC_SET:
3962     case SHM_STAT:
3963         if (target_to_host_shmid_ds(&dsarg, buf))
3964             return -TARGET_EFAULT;
3965         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3966         if (host_to_target_shmid_ds(buf, &dsarg))
3967             return -TARGET_EFAULT;
3968         break;
3969     case IPC_INFO:
3970         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3971         if (host_to_target_shminfo(buf, &shminfo))
3972             return -TARGET_EFAULT;
3973         break;
3974     case SHM_INFO:
3975         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3976         if (host_to_target_shm_info(buf, &shm_info))
3977             return -TARGET_EFAULT;
3978         break;
3979     case IPC_RMID:
3980     case SHM_LOCK:
3981     case SHM_UNLOCK:
3982         ret = get_errno(shmctl(shmid, cmd, NULL));
3983         break;
3984     }
3985 
3986     return ret;
3987 }
3988 
3989 #ifndef TARGET_FORCE_SHMLBA
3990 /* For most architectures, SHMLBA is the same as the page size;
3991  * some architectures have larger values, in which case they should
3992  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3993  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3994  * and defining its own value for SHMLBA.
3995  *
3996  * The kernel also permits SHMLBA to be set by the architecture to a
3997  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3998  * this means that addresses are rounded to the large size if
3999  * SHM_RND is set but addresses not aligned to that size are not rejected
4000  * as long as they are at least page-aligned. Since the only architecture
4001  * which uses this is ia64 this code doesn't provide for that oddity.
4002  */
4003 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4004 {
4005     return TARGET_PAGE_SIZE;
4006 }
4007 #endif
4008 
4009 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4010                                  int shmid, abi_ulong shmaddr, int shmflg)
4011 {
4012     abi_long raddr;
4013     void *host_raddr;
4014     struct shmid_ds shm_info;
4015     int i,ret;
4016     abi_ulong shmlba;
4017 
4018     /* find out the length of the shared memory segment */
4019     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4020     if (is_error(ret)) {
4021         /* can't get length, bail out */
4022         return ret;
4023     }
4024 
4025     shmlba = target_shmlba(cpu_env);
4026 
4027     if (shmaddr & (shmlba - 1)) {
4028         if (shmflg & SHM_RND) {
4029             shmaddr &= ~(shmlba - 1);
4030         } else {
4031             return -TARGET_EINVAL;
4032         }
4033     }
4034     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4035         return -TARGET_EINVAL;
4036     }
4037 
4038     mmap_lock();
4039 
4040     if (shmaddr)
4041         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4042     else {
4043         abi_ulong mmap_start;
4044 
4045         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4046         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4047 
4048         if (mmap_start == -1) {
4049             errno = ENOMEM;
4050             host_raddr = (void *)-1;
4051         } else
4052             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4053     }
4054 
4055     if (host_raddr == (void *)-1) {
4056         mmap_unlock();
4057         return get_errno((long)host_raddr);
4058     }
4059     raddr=h2g((unsigned long)host_raddr);
4060 
4061     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4062                    PAGE_VALID | PAGE_READ |
4063                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4064 
4065     for (i = 0; i < N_SHM_REGIONS; i++) {
4066         if (!shm_regions[i].in_use) {
4067             shm_regions[i].in_use = true;
4068             shm_regions[i].start = raddr;
4069             shm_regions[i].size = shm_info.shm_segsz;
4070             break;
4071         }
4072     }
4073 
4074     mmap_unlock();
4075     return raddr;
4076 
4077 }
4078 
4079 static inline abi_long do_shmdt(abi_ulong shmaddr)
4080 {
4081     int i;
4082     abi_long rv;
4083 
4084     mmap_lock();
4085 
4086     for (i = 0; i < N_SHM_REGIONS; ++i) {
4087         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4088             shm_regions[i].in_use = false;
4089             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4090             break;
4091         }
4092     }
4093     rv = get_errno(shmdt(g2h(shmaddr)));
4094 
4095     mmap_unlock();
4096 
4097     return rv;
4098 }
4099 
4100 #ifdef TARGET_NR_ipc
4101 /* ??? This only works with linear mappings.  */
4102 /* do_ipc() must return target values and target errnos. */
4103 static abi_long do_ipc(CPUArchState *cpu_env,
4104                        unsigned int call, abi_long first,
4105                        abi_long second, abi_long third,
4106                        abi_long ptr, abi_long fifth)
4107 {
4108     int version;
4109     abi_long ret = 0;
4110 
4111     version = call >> 16;
4112     call &= 0xffff;
4113 
4114     switch (call) {
4115     case IPCOP_semop:
4116         ret = do_semop(first, ptr, second);
4117         break;
4118 
4119     case IPCOP_semget:
4120         ret = get_errno(semget(first, second, third));
4121         break;
4122 
4123     case IPCOP_semctl: {
4124         /* The semun argument to semctl is passed by value, so dereference the
4125          * ptr argument. */
4126         abi_ulong atptr;
4127         get_user_ual(atptr, ptr);
4128         ret = do_semctl(first, second, third, atptr);
4129         break;
4130     }
4131 
4132     case IPCOP_msgget:
4133         ret = get_errno(msgget(first, second));
4134         break;
4135 
4136     case IPCOP_msgsnd:
4137         ret = do_msgsnd(first, ptr, second, third);
4138         break;
4139 
4140     case IPCOP_msgctl:
4141         ret = do_msgctl(first, second, ptr);
4142         break;
4143 
4144     case IPCOP_msgrcv:
4145         switch (version) {
4146         case 0:
4147             {
4148                 struct target_ipc_kludge {
4149                     abi_long msgp;
4150                     abi_long msgtyp;
4151                 } *tmp;
4152 
4153                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4154                     ret = -TARGET_EFAULT;
4155                     break;
4156                 }
4157 
4158                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4159 
4160                 unlock_user_struct(tmp, ptr, 0);
4161                 break;
4162             }
4163         default:
4164             ret = do_msgrcv(first, ptr, second, fifth, third);
4165         }
4166         break;
4167 
4168     case IPCOP_shmat:
4169         switch (version) {
4170         default:
4171         {
4172             abi_ulong raddr;
4173             raddr = do_shmat(cpu_env, first, ptr, second);
4174             if (is_error(raddr))
4175                 return get_errno(raddr);
4176             if (put_user_ual(raddr, third))
4177                 return -TARGET_EFAULT;
4178             break;
4179         }
4180         case 1:
4181             ret = -TARGET_EINVAL;
4182             break;
4183         }
4184 	break;
4185     case IPCOP_shmdt:
4186         ret = do_shmdt(ptr);
4187 	break;
4188 
4189     case IPCOP_shmget:
4190 	/* IPC_* flag values are the same on all linux platforms */
4191 	ret = get_errno(shmget(first, second, third));
4192 	break;
4193 
4194 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4195     case IPCOP_shmctl:
4196         ret = do_shmctl(first, second, ptr);
4197         break;
4198     default:
4199 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4200 	ret = -TARGET_ENOSYS;
4201 	break;
4202     }
4203     return ret;
4204 }
4205 #endif
4206 
4207 /* kernel structure types definitions */
4208 
4209 #define STRUCT(name, ...) STRUCT_ ## name,
4210 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4211 enum {
4212 #include "syscall_types.h"
4213 STRUCT_MAX
4214 };
4215 #undef STRUCT
4216 #undef STRUCT_SPECIAL
4217 
4218 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4219 #define STRUCT_SPECIAL(name)
4220 #include "syscall_types.h"
4221 #undef STRUCT
4222 #undef STRUCT_SPECIAL
4223 
4224 typedef struct IOCTLEntry IOCTLEntry;
4225 
4226 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4227                              int fd, int cmd, abi_long arg);
4228 
4229 struct IOCTLEntry {
4230     int target_cmd;
4231     unsigned int host_cmd;
4232     const char *name;
4233     int access;
4234     do_ioctl_fn *do_ioctl;
4235     const argtype arg_type[5];
4236 };
4237 
4238 #define IOC_R 0x0001
4239 #define IOC_W 0x0002
4240 #define IOC_RW (IOC_R | IOC_W)
4241 
4242 #define MAX_STRUCT_SIZE 4096
4243 
4244 #ifdef CONFIG_FIEMAP
4245 /* So fiemap access checks don't overflow on 32 bit systems.
4246  * This is very slightly smaller than the limit imposed by
4247  * the underlying kernel.
4248  */
4249 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4250                             / sizeof(struct fiemap_extent))
4251 
4252 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4253                                        int fd, int cmd, abi_long arg)
4254 {
4255     /* The parameter for this ioctl is a struct fiemap followed
4256      * by an array of struct fiemap_extent whose size is set
4257      * in fiemap->fm_extent_count. The array is filled in by the
4258      * ioctl.
4259      */
4260     int target_size_in, target_size_out;
4261     struct fiemap *fm;
4262     const argtype *arg_type = ie->arg_type;
4263     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4264     void *argptr, *p;
4265     abi_long ret;
4266     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4267     uint32_t outbufsz;
4268     int free_fm = 0;
4269 
4270     assert(arg_type[0] == TYPE_PTR);
4271     assert(ie->access == IOC_RW);
4272     arg_type++;
4273     target_size_in = thunk_type_size(arg_type, 0);
4274     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4275     if (!argptr) {
4276         return -TARGET_EFAULT;
4277     }
4278     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4279     unlock_user(argptr, arg, 0);
4280     fm = (struct fiemap *)buf_temp;
4281     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4282         return -TARGET_EINVAL;
4283     }
4284 
4285     outbufsz = sizeof (*fm) +
4286         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4287 
4288     if (outbufsz > MAX_STRUCT_SIZE) {
4289         /* We can't fit all the extents into the fixed size buffer.
4290          * Allocate one that is large enough and use it instead.
4291          */
4292         fm = g_try_malloc(outbufsz);
4293         if (!fm) {
4294             return -TARGET_ENOMEM;
4295         }
4296         memcpy(fm, buf_temp, sizeof(struct fiemap));
4297         free_fm = 1;
4298     }
4299     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4300     if (!is_error(ret)) {
4301         target_size_out = target_size_in;
4302         /* An extent_count of 0 means we were only counting the extents
4303          * so there are no structs to copy
4304          */
4305         if (fm->fm_extent_count != 0) {
4306             target_size_out += fm->fm_mapped_extents * extent_size;
4307         }
4308         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4309         if (!argptr) {
4310             ret = -TARGET_EFAULT;
4311         } else {
4312             /* Convert the struct fiemap */
4313             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4314             if (fm->fm_extent_count != 0) {
4315                 p = argptr + target_size_in;
4316                 /* ...and then all the struct fiemap_extents */
4317                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4318                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4319                                   THUNK_TARGET);
4320                     p += extent_size;
4321                 }
4322             }
4323             unlock_user(argptr, arg, target_size_out);
4324         }
4325     }
4326     if (free_fm) {
4327         g_free(fm);
4328     }
4329     return ret;
4330 }
4331 #endif
4332 
4333 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4334                                 int fd, int cmd, abi_long arg)
4335 {
4336     const argtype *arg_type = ie->arg_type;
4337     int target_size;
4338     void *argptr;
4339     int ret;
4340     struct ifconf *host_ifconf;
4341     uint32_t outbufsz;
4342     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4343     int target_ifreq_size;
4344     int nb_ifreq;
4345     int free_buf = 0;
4346     int i;
4347     int target_ifc_len;
4348     abi_long target_ifc_buf;
4349     int host_ifc_len;
4350     char *host_ifc_buf;
4351 
4352     assert(arg_type[0] == TYPE_PTR);
4353     assert(ie->access == IOC_RW);
4354 
4355     arg_type++;
4356     target_size = thunk_type_size(arg_type, 0);
4357 
4358     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4359     if (!argptr)
4360         return -TARGET_EFAULT;
4361     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4362     unlock_user(argptr, arg, 0);
4363 
4364     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4365     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4366     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4367 
4368     if (target_ifc_buf != 0) {
4369         target_ifc_len = host_ifconf->ifc_len;
4370         nb_ifreq = target_ifc_len / target_ifreq_size;
4371         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4372 
4373         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4374         if (outbufsz > MAX_STRUCT_SIZE) {
4375             /*
4376              * We can't fit all the extents into the fixed size buffer.
4377              * Allocate one that is large enough and use it instead.
4378              */
4379             host_ifconf = malloc(outbufsz);
4380             if (!host_ifconf) {
4381                 return -TARGET_ENOMEM;
4382             }
4383             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4384             free_buf = 1;
4385         }
4386         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4387 
4388         host_ifconf->ifc_len = host_ifc_len;
4389     } else {
4390       host_ifc_buf = NULL;
4391     }
4392     host_ifconf->ifc_buf = host_ifc_buf;
4393 
4394     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4395     if (!is_error(ret)) {
4396 	/* convert host ifc_len to target ifc_len */
4397 
4398         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4399         target_ifc_len = nb_ifreq * target_ifreq_size;
4400         host_ifconf->ifc_len = target_ifc_len;
4401 
4402 	/* restore target ifc_buf */
4403 
4404         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4405 
4406 	/* copy struct ifconf to target user */
4407 
4408         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4409         if (!argptr)
4410             return -TARGET_EFAULT;
4411         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4412         unlock_user(argptr, arg, target_size);
4413 
4414         if (target_ifc_buf != 0) {
4415             /* copy ifreq[] to target user */
4416             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4417             for (i = 0; i < nb_ifreq ; i++) {
4418                 thunk_convert(argptr + i * target_ifreq_size,
4419                               host_ifc_buf + i * sizeof(struct ifreq),
4420                               ifreq_arg_type, THUNK_TARGET);
4421             }
4422             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4423         }
4424     }
4425 
4426     if (free_buf) {
4427         free(host_ifconf);
4428     }
4429 
4430     return ret;
4431 }
4432 
4433 #if defined(CONFIG_USBFS)
4434 #if HOST_LONG_BITS > 64
4435 #error USBDEVFS thunks do not support >64 bit hosts yet.
4436 #endif
4437 struct live_urb {
4438     uint64_t target_urb_adr;
4439     uint64_t target_buf_adr;
4440     char *target_buf_ptr;
4441     struct usbdevfs_urb host_urb;
4442 };
4443 
4444 static GHashTable *usbdevfs_urb_hashtable(void)
4445 {
4446     static GHashTable *urb_hashtable;
4447 
4448     if (!urb_hashtable) {
4449         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4450     }
4451     return urb_hashtable;
4452 }
4453 
4454 static void urb_hashtable_insert(struct live_urb *urb)
4455 {
4456     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4457     g_hash_table_insert(urb_hashtable, urb, urb);
4458 }
4459 
4460 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4461 {
4462     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4463     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4464 }
4465 
4466 static void urb_hashtable_remove(struct live_urb *urb)
4467 {
4468     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4469     g_hash_table_remove(urb_hashtable, urb);
4470 }
4471 
4472 static abi_long
4473 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4474                           int fd, int cmd, abi_long arg)
4475 {
4476     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4477     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4478     struct live_urb *lurb;
4479     void *argptr;
4480     uint64_t hurb;
4481     int target_size;
4482     uintptr_t target_urb_adr;
4483     abi_long ret;
4484 
4485     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4486 
4487     memset(buf_temp, 0, sizeof(uint64_t));
4488     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4489     if (is_error(ret)) {
4490         return ret;
4491     }
4492 
4493     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4494     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4495     if (!lurb->target_urb_adr) {
4496         return -TARGET_EFAULT;
4497     }
4498     urb_hashtable_remove(lurb);
4499     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4500         lurb->host_urb.buffer_length);
4501     lurb->target_buf_ptr = NULL;
4502 
4503     /* restore the guest buffer pointer */
4504     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4505 
4506     /* update the guest urb struct */
4507     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4508     if (!argptr) {
4509         g_free(lurb);
4510         return -TARGET_EFAULT;
4511     }
4512     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4513     unlock_user(argptr, lurb->target_urb_adr, target_size);
4514 
4515     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4516     /* write back the urb handle */
4517     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4518     if (!argptr) {
4519         g_free(lurb);
4520         return -TARGET_EFAULT;
4521     }
4522 
4523     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4524     target_urb_adr = lurb->target_urb_adr;
4525     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4526     unlock_user(argptr, arg, target_size);
4527 
4528     g_free(lurb);
4529     return ret;
4530 }
4531 
4532 static abi_long
4533 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4534                              uint8_t *buf_temp __attribute__((unused)),
4535                              int fd, int cmd, abi_long arg)
4536 {
4537     struct live_urb *lurb;
4538 
4539     /* map target address back to host URB with metadata. */
4540     lurb = urb_hashtable_lookup(arg);
4541     if (!lurb) {
4542         return -TARGET_EFAULT;
4543     }
4544     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4545 }
4546 
4547 static abi_long
4548 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4549                             int fd, int cmd, abi_long arg)
4550 {
4551     const argtype *arg_type = ie->arg_type;
4552     int target_size;
4553     abi_long ret;
4554     void *argptr;
4555     int rw_dir;
4556     struct live_urb *lurb;
4557 
4558     /*
4559      * each submitted URB needs to map to a unique ID for the
4560      * kernel, and that unique ID needs to be a pointer to
4561      * host memory.  hence, we need to malloc for each URB.
4562      * isochronous transfers have a variable length struct.
4563      */
4564     arg_type++;
4565     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4566 
4567     /* construct host copy of urb and metadata */
4568     lurb = g_try_malloc0(sizeof(struct live_urb));
4569     if (!lurb) {
4570         return -TARGET_ENOMEM;
4571     }
4572 
4573     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4574     if (!argptr) {
4575         g_free(lurb);
4576         return -TARGET_EFAULT;
4577     }
4578     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4579     unlock_user(argptr, arg, 0);
4580 
4581     lurb->target_urb_adr = arg;
4582     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4583 
4584     /* buffer space used depends on endpoint type so lock the entire buffer */
4585     /* control type urbs should check the buffer contents for true direction */
4586     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4587     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4588         lurb->host_urb.buffer_length, 1);
4589     if (lurb->target_buf_ptr == NULL) {
4590         g_free(lurb);
4591         return -TARGET_EFAULT;
4592     }
4593 
4594     /* update buffer pointer in host copy */
4595     lurb->host_urb.buffer = lurb->target_buf_ptr;
4596 
4597     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4598     if (is_error(ret)) {
4599         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4600         g_free(lurb);
4601     } else {
4602         urb_hashtable_insert(lurb);
4603     }
4604 
4605     return ret;
4606 }
4607 #endif /* CONFIG_USBFS */
4608 
4609 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4610                             int cmd, abi_long arg)
4611 {
4612     void *argptr;
4613     struct dm_ioctl *host_dm;
4614     abi_long guest_data;
4615     uint32_t guest_data_size;
4616     int target_size;
4617     const argtype *arg_type = ie->arg_type;
4618     abi_long ret;
4619     void *big_buf = NULL;
4620     char *host_data;
4621 
4622     arg_type++;
4623     target_size = thunk_type_size(arg_type, 0);
4624     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4625     if (!argptr) {
4626         ret = -TARGET_EFAULT;
4627         goto out;
4628     }
4629     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4630     unlock_user(argptr, arg, 0);
4631 
4632     /* buf_temp is too small, so fetch things into a bigger buffer */
4633     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4634     memcpy(big_buf, buf_temp, target_size);
4635     buf_temp = big_buf;
4636     host_dm = big_buf;
4637 
4638     guest_data = arg + host_dm->data_start;
4639     if ((guest_data - arg) < 0) {
4640         ret = -TARGET_EINVAL;
4641         goto out;
4642     }
4643     guest_data_size = host_dm->data_size - host_dm->data_start;
4644     host_data = (char*)host_dm + host_dm->data_start;
4645 
4646     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4647     if (!argptr) {
4648         ret = -TARGET_EFAULT;
4649         goto out;
4650     }
4651 
4652     switch (ie->host_cmd) {
4653     case DM_REMOVE_ALL:
4654     case DM_LIST_DEVICES:
4655     case DM_DEV_CREATE:
4656     case DM_DEV_REMOVE:
4657     case DM_DEV_SUSPEND:
4658     case DM_DEV_STATUS:
4659     case DM_DEV_WAIT:
4660     case DM_TABLE_STATUS:
4661     case DM_TABLE_CLEAR:
4662     case DM_TABLE_DEPS:
4663     case DM_LIST_VERSIONS:
4664         /* no input data */
4665         break;
4666     case DM_DEV_RENAME:
4667     case DM_DEV_SET_GEOMETRY:
4668         /* data contains only strings */
4669         memcpy(host_data, argptr, guest_data_size);
4670         break;
4671     case DM_TARGET_MSG:
4672         memcpy(host_data, argptr, guest_data_size);
4673         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4674         break;
4675     case DM_TABLE_LOAD:
4676     {
4677         void *gspec = argptr;
4678         void *cur_data = host_data;
4679         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4680         int spec_size = thunk_type_size(arg_type, 0);
4681         int i;
4682 
4683         for (i = 0; i < host_dm->target_count; i++) {
4684             struct dm_target_spec *spec = cur_data;
4685             uint32_t next;
4686             int slen;
4687 
4688             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4689             slen = strlen((char*)gspec + spec_size) + 1;
4690             next = spec->next;
4691             spec->next = sizeof(*spec) + slen;
4692             strcpy((char*)&spec[1], gspec + spec_size);
4693             gspec += next;
4694             cur_data += spec->next;
4695         }
4696         break;
4697     }
4698     default:
4699         ret = -TARGET_EINVAL;
4700         unlock_user(argptr, guest_data, 0);
4701         goto out;
4702     }
4703     unlock_user(argptr, guest_data, 0);
4704 
4705     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4706     if (!is_error(ret)) {
4707         guest_data = arg + host_dm->data_start;
4708         guest_data_size = host_dm->data_size - host_dm->data_start;
4709         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4710         switch (ie->host_cmd) {
4711         case DM_REMOVE_ALL:
4712         case DM_DEV_CREATE:
4713         case DM_DEV_REMOVE:
4714         case DM_DEV_RENAME:
4715         case DM_DEV_SUSPEND:
4716         case DM_DEV_STATUS:
4717         case DM_TABLE_LOAD:
4718         case DM_TABLE_CLEAR:
4719         case DM_TARGET_MSG:
4720         case DM_DEV_SET_GEOMETRY:
4721             /* no return data */
4722             break;
4723         case DM_LIST_DEVICES:
4724         {
4725             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4726             uint32_t remaining_data = guest_data_size;
4727             void *cur_data = argptr;
4728             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4729             int nl_size = 12; /* can't use thunk_size due to alignment */
4730 
4731             while (1) {
4732                 uint32_t next = nl->next;
4733                 if (next) {
4734                     nl->next = nl_size + (strlen(nl->name) + 1);
4735                 }
4736                 if (remaining_data < nl->next) {
4737                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4738                     break;
4739                 }
4740                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4741                 strcpy(cur_data + nl_size, nl->name);
4742                 cur_data += nl->next;
4743                 remaining_data -= nl->next;
4744                 if (!next) {
4745                     break;
4746                 }
4747                 nl = (void*)nl + next;
4748             }
4749             break;
4750         }
4751         case DM_DEV_WAIT:
4752         case DM_TABLE_STATUS:
4753         {
4754             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4755             void *cur_data = argptr;
4756             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4757             int spec_size = thunk_type_size(arg_type, 0);
4758             int i;
4759 
4760             for (i = 0; i < host_dm->target_count; i++) {
4761                 uint32_t next = spec->next;
4762                 int slen = strlen((char*)&spec[1]) + 1;
4763                 spec->next = (cur_data - argptr) + spec_size + slen;
4764                 if (guest_data_size < spec->next) {
4765                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4766                     break;
4767                 }
4768                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4769                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4770                 cur_data = argptr + spec->next;
4771                 spec = (void*)host_dm + host_dm->data_start + next;
4772             }
4773             break;
4774         }
4775         case DM_TABLE_DEPS:
4776         {
4777             void *hdata = (void*)host_dm + host_dm->data_start;
4778             int count = *(uint32_t*)hdata;
4779             uint64_t *hdev = hdata + 8;
4780             uint64_t *gdev = argptr + 8;
4781             int i;
4782 
4783             *(uint32_t*)argptr = tswap32(count);
4784             for (i = 0; i < count; i++) {
4785                 *gdev = tswap64(*hdev);
4786                 gdev++;
4787                 hdev++;
4788             }
4789             break;
4790         }
4791         case DM_LIST_VERSIONS:
4792         {
4793             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4794             uint32_t remaining_data = guest_data_size;
4795             void *cur_data = argptr;
4796             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4797             int vers_size = thunk_type_size(arg_type, 0);
4798 
4799             while (1) {
4800                 uint32_t next = vers->next;
4801                 if (next) {
4802                     vers->next = vers_size + (strlen(vers->name) + 1);
4803                 }
4804                 if (remaining_data < vers->next) {
4805                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4806                     break;
4807                 }
4808                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4809                 strcpy(cur_data + vers_size, vers->name);
4810                 cur_data += vers->next;
4811                 remaining_data -= vers->next;
4812                 if (!next) {
4813                     break;
4814                 }
4815                 vers = (void*)vers + next;
4816             }
4817             break;
4818         }
4819         default:
4820             unlock_user(argptr, guest_data, 0);
4821             ret = -TARGET_EINVAL;
4822             goto out;
4823         }
4824         unlock_user(argptr, guest_data, guest_data_size);
4825 
4826         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4827         if (!argptr) {
4828             ret = -TARGET_EFAULT;
4829             goto out;
4830         }
4831         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4832         unlock_user(argptr, arg, target_size);
4833     }
4834 out:
4835     g_free(big_buf);
4836     return ret;
4837 }
4838 
4839 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4840                                int cmd, abi_long arg)
4841 {
4842     void *argptr;
4843     int target_size;
4844     const argtype *arg_type = ie->arg_type;
4845     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4846     abi_long ret;
4847 
4848     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4849     struct blkpg_partition host_part;
4850 
4851     /* Read and convert blkpg */
4852     arg_type++;
4853     target_size = thunk_type_size(arg_type, 0);
4854     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4855     if (!argptr) {
4856         ret = -TARGET_EFAULT;
4857         goto out;
4858     }
4859     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4860     unlock_user(argptr, arg, 0);
4861 
4862     switch (host_blkpg->op) {
4863     case BLKPG_ADD_PARTITION:
4864     case BLKPG_DEL_PARTITION:
4865         /* payload is struct blkpg_partition */
4866         break;
4867     default:
4868         /* Unknown opcode */
4869         ret = -TARGET_EINVAL;
4870         goto out;
4871     }
4872 
4873     /* Read and convert blkpg->data */
4874     arg = (abi_long)(uintptr_t)host_blkpg->data;
4875     target_size = thunk_type_size(part_arg_type, 0);
4876     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4877     if (!argptr) {
4878         ret = -TARGET_EFAULT;
4879         goto out;
4880     }
4881     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4882     unlock_user(argptr, arg, 0);
4883 
4884     /* Swizzle the data pointer to our local copy and call! */
4885     host_blkpg->data = &host_part;
4886     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4887 
4888 out:
4889     return ret;
4890 }
4891 
4892 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4893                                 int fd, int cmd, abi_long arg)
4894 {
4895     const argtype *arg_type = ie->arg_type;
4896     const StructEntry *se;
4897     const argtype *field_types;
4898     const int *dst_offsets, *src_offsets;
4899     int target_size;
4900     void *argptr;
4901     abi_ulong *target_rt_dev_ptr = NULL;
4902     unsigned long *host_rt_dev_ptr = NULL;
4903     abi_long ret;
4904     int i;
4905 
4906     assert(ie->access == IOC_W);
4907     assert(*arg_type == TYPE_PTR);
4908     arg_type++;
4909     assert(*arg_type == TYPE_STRUCT);
4910     target_size = thunk_type_size(arg_type, 0);
4911     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4912     if (!argptr) {
4913         return -TARGET_EFAULT;
4914     }
4915     arg_type++;
4916     assert(*arg_type == (int)STRUCT_rtentry);
4917     se = struct_entries + *arg_type++;
4918     assert(se->convert[0] == NULL);
4919     /* convert struct here to be able to catch rt_dev string */
4920     field_types = se->field_types;
4921     dst_offsets = se->field_offsets[THUNK_HOST];
4922     src_offsets = se->field_offsets[THUNK_TARGET];
4923     for (i = 0; i < se->nb_fields; i++) {
4924         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4925             assert(*field_types == TYPE_PTRVOID);
4926             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4927             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4928             if (*target_rt_dev_ptr != 0) {
4929                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4930                                                   tswapal(*target_rt_dev_ptr));
4931                 if (!*host_rt_dev_ptr) {
4932                     unlock_user(argptr, arg, 0);
4933                     return -TARGET_EFAULT;
4934                 }
4935             } else {
4936                 *host_rt_dev_ptr = 0;
4937             }
4938             field_types++;
4939             continue;
4940         }
4941         field_types = thunk_convert(buf_temp + dst_offsets[i],
4942                                     argptr + src_offsets[i],
4943                                     field_types, THUNK_HOST);
4944     }
4945     unlock_user(argptr, arg, 0);
4946 
4947     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4948 
4949     assert(host_rt_dev_ptr != NULL);
4950     assert(target_rt_dev_ptr != NULL);
4951     if (*host_rt_dev_ptr != 0) {
4952         unlock_user((void *)*host_rt_dev_ptr,
4953                     *target_rt_dev_ptr, 0);
4954     }
4955     return ret;
4956 }
4957 
4958 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4959                                      int fd, int cmd, abi_long arg)
4960 {
4961     int sig = target_to_host_signal(arg);
4962     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4963 }
4964 
4965 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4966                                     int fd, int cmd, abi_long arg)
4967 {
4968     struct timeval tv;
4969     abi_long ret;
4970 
4971     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4972     if (is_error(ret)) {
4973         return ret;
4974     }
4975 
4976     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4977         if (copy_to_user_timeval(arg, &tv)) {
4978             return -TARGET_EFAULT;
4979         }
4980     } else {
4981         if (copy_to_user_timeval64(arg, &tv)) {
4982             return -TARGET_EFAULT;
4983         }
4984     }
4985 
4986     return ret;
4987 }
4988 
4989 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4990                                       int fd, int cmd, abi_long arg)
4991 {
4992     struct timespec ts;
4993     abi_long ret;
4994 
4995     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
4996     if (is_error(ret)) {
4997         return ret;
4998     }
4999 
5000     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5001         if (host_to_target_timespec(arg, &ts)) {
5002             return -TARGET_EFAULT;
5003         }
5004     } else{
5005         if (host_to_target_timespec64(arg, &ts)) {
5006             return -TARGET_EFAULT;
5007         }
5008     }
5009 
5010     return ret;
5011 }
5012 
5013 #ifdef TIOCGPTPEER
5014 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5015                                      int fd, int cmd, abi_long arg)
5016 {
5017     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5018     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5019 }
5020 #endif
5021 
5022 static IOCTLEntry ioctl_entries[] = {
5023 #define IOCTL(cmd, access, ...) \
5024     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5025 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5026     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5027 #define IOCTL_IGNORE(cmd) \
5028     { TARGET_ ## cmd, 0, #cmd },
5029 #include "ioctls.h"
5030     { 0, 0, },
5031 };
5032 
5033 /* ??? Implement proper locking for ioctls.  */
5034 /* do_ioctl() Must return target values and target errnos. */
5035 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5036 {
5037     const IOCTLEntry *ie;
5038     const argtype *arg_type;
5039     abi_long ret;
5040     uint8_t buf_temp[MAX_STRUCT_SIZE];
5041     int target_size;
5042     void *argptr;
5043 
5044     ie = ioctl_entries;
5045     for(;;) {
5046         if (ie->target_cmd == 0) {
5047             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5048             return -TARGET_ENOSYS;
5049         }
5050         if (ie->target_cmd == cmd)
5051             break;
5052         ie++;
5053     }
5054     arg_type = ie->arg_type;
5055     if (ie->do_ioctl) {
5056         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5057     } else if (!ie->host_cmd) {
5058         /* Some architectures define BSD ioctls in their headers
5059            that are not implemented in Linux.  */
5060         return -TARGET_ENOSYS;
5061     }
5062 
5063     switch(arg_type[0]) {
5064     case TYPE_NULL:
5065         /* no argument */
5066         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5067         break;
5068     case TYPE_PTRVOID:
5069     case TYPE_INT:
5070         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5071         break;
5072     case TYPE_PTR:
5073         arg_type++;
5074         target_size = thunk_type_size(arg_type, 0);
5075         switch(ie->access) {
5076         case IOC_R:
5077             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5078             if (!is_error(ret)) {
5079                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5080                 if (!argptr)
5081                     return -TARGET_EFAULT;
5082                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5083                 unlock_user(argptr, arg, target_size);
5084             }
5085             break;
5086         case IOC_W:
5087             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5088             if (!argptr)
5089                 return -TARGET_EFAULT;
5090             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5091             unlock_user(argptr, arg, 0);
5092             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5093             break;
5094         default:
5095         case IOC_RW:
5096             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5097             if (!argptr)
5098                 return -TARGET_EFAULT;
5099             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5100             unlock_user(argptr, arg, 0);
5101             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5102             if (!is_error(ret)) {
5103                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5104                 if (!argptr)
5105                     return -TARGET_EFAULT;
5106                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5107                 unlock_user(argptr, arg, target_size);
5108             }
5109             break;
5110         }
5111         break;
5112     default:
5113         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5114                  (long)cmd, arg_type[0]);
5115         ret = -TARGET_ENOSYS;
5116         break;
5117     }
5118     return ret;
5119 }
5120 
5121 static const bitmask_transtbl iflag_tbl[] = {
5122         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5123         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5124         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5125         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5126         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5127         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5128         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5129         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5130         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5131         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5132         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5133         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5134         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5135         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5136         { 0, 0, 0, 0 }
5137 };
5138 
5139 static const bitmask_transtbl oflag_tbl[] = {
5140 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5141 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5142 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5143 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5144 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5145 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5146 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5147 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5148 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5149 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5150 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5151 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5152 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5153 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5154 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5155 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5156 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5157 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5158 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5159 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5160 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5161 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5162 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5163 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5164 	{ 0, 0, 0, 0 }
5165 };
5166 
5167 static const bitmask_transtbl cflag_tbl[] = {
5168 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5169 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5170 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5171 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5172 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5173 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5174 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5175 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5176 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5177 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5178 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5179 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5180 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5181 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5182 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5183 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5184 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5185 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5186 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5187 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5188 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5189 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5190 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5191 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5192 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5193 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5194 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5195 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5196 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5197 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5198 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5199 	{ 0, 0, 0, 0 }
5200 };
5201 
5202 static const bitmask_transtbl lflag_tbl[] = {
5203 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5204 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5205 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5206 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5207 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5208 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5209 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5210 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5211 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5212 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5213 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5214 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5215 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5216 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5217 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5218 	{ 0, 0, 0, 0 }
5219 };
5220 
5221 static void target_to_host_termios (void *dst, const void *src)
5222 {
5223     struct host_termios *host = dst;
5224     const struct target_termios *target = src;
5225 
5226     host->c_iflag =
5227         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5228     host->c_oflag =
5229         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5230     host->c_cflag =
5231         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5232     host->c_lflag =
5233         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5234     host->c_line = target->c_line;
5235 
5236     memset(host->c_cc, 0, sizeof(host->c_cc));
5237     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5238     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5239     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5240     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5241     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5242     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5243     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5244     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5245     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5246     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5247     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5248     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5249     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5250     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5251     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5252     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5253     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5254 }
5255 
5256 static void host_to_target_termios (void *dst, const void *src)
5257 {
5258     struct target_termios *target = dst;
5259     const struct host_termios *host = src;
5260 
5261     target->c_iflag =
5262         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5263     target->c_oflag =
5264         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5265     target->c_cflag =
5266         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5267     target->c_lflag =
5268         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5269     target->c_line = host->c_line;
5270 
5271     memset(target->c_cc, 0, sizeof(target->c_cc));
5272     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5273     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5274     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5275     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5276     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5277     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5278     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5279     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5280     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5281     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5282     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5283     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5284     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5285     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5286     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5287     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5288     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5289 }
5290 
5291 static const StructEntry struct_termios_def = {
5292     .convert = { host_to_target_termios, target_to_host_termios },
5293     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5294     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5295 };
5296 
5297 static bitmask_transtbl mmap_flags_tbl[] = {
5298     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5299     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5300     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5301     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5302       MAP_ANONYMOUS, MAP_ANONYMOUS },
5303     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5304       MAP_GROWSDOWN, MAP_GROWSDOWN },
5305     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5306       MAP_DENYWRITE, MAP_DENYWRITE },
5307     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5308       MAP_EXECUTABLE, MAP_EXECUTABLE },
5309     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5310     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5311       MAP_NORESERVE, MAP_NORESERVE },
5312     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5313     /* MAP_STACK had been ignored by the kernel for quite some time.
5314        Recognize it for the target insofar as we do not want to pass
5315        it through to the host.  */
5316     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5317     { 0, 0, 0, 0 }
5318 };
5319 
5320 #if defined(TARGET_I386)
5321 
5322 /* NOTE: there is really one LDT for all the threads */
5323 static uint8_t *ldt_table;
5324 
5325 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5326 {
5327     int size;
5328     void *p;
5329 
5330     if (!ldt_table)
5331         return 0;
5332     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5333     if (size > bytecount)
5334         size = bytecount;
5335     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5336     if (!p)
5337         return -TARGET_EFAULT;
5338     /* ??? Should this by byteswapped?  */
5339     memcpy(p, ldt_table, size);
5340     unlock_user(p, ptr, size);
5341     return size;
5342 }
5343 
5344 /* XXX: add locking support */
5345 static abi_long write_ldt(CPUX86State *env,
5346                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5347 {
5348     struct target_modify_ldt_ldt_s ldt_info;
5349     struct target_modify_ldt_ldt_s *target_ldt_info;
5350     int seg_32bit, contents, read_exec_only, limit_in_pages;
5351     int seg_not_present, useable, lm;
5352     uint32_t *lp, entry_1, entry_2;
5353 
5354     if (bytecount != sizeof(ldt_info))
5355         return -TARGET_EINVAL;
5356     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5357         return -TARGET_EFAULT;
5358     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5359     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5360     ldt_info.limit = tswap32(target_ldt_info->limit);
5361     ldt_info.flags = tswap32(target_ldt_info->flags);
5362     unlock_user_struct(target_ldt_info, ptr, 0);
5363 
5364     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5365         return -TARGET_EINVAL;
5366     seg_32bit = ldt_info.flags & 1;
5367     contents = (ldt_info.flags >> 1) & 3;
5368     read_exec_only = (ldt_info.flags >> 3) & 1;
5369     limit_in_pages = (ldt_info.flags >> 4) & 1;
5370     seg_not_present = (ldt_info.flags >> 5) & 1;
5371     useable = (ldt_info.flags >> 6) & 1;
5372 #ifdef TARGET_ABI32
5373     lm = 0;
5374 #else
5375     lm = (ldt_info.flags >> 7) & 1;
5376 #endif
5377     if (contents == 3) {
5378         if (oldmode)
5379             return -TARGET_EINVAL;
5380         if (seg_not_present == 0)
5381             return -TARGET_EINVAL;
5382     }
5383     /* allocate the LDT */
5384     if (!ldt_table) {
5385         env->ldt.base = target_mmap(0,
5386                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5387                                     PROT_READ|PROT_WRITE,
5388                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5389         if (env->ldt.base == -1)
5390             return -TARGET_ENOMEM;
5391         memset(g2h(env->ldt.base), 0,
5392                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5393         env->ldt.limit = 0xffff;
5394         ldt_table = g2h(env->ldt.base);
5395     }
5396 
5397     /* NOTE: same code as Linux kernel */
5398     /* Allow LDTs to be cleared by the user. */
5399     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5400         if (oldmode ||
5401             (contents == 0		&&
5402              read_exec_only == 1	&&
5403              seg_32bit == 0		&&
5404              limit_in_pages == 0	&&
5405              seg_not_present == 1	&&
5406              useable == 0 )) {
5407             entry_1 = 0;
5408             entry_2 = 0;
5409             goto install;
5410         }
5411     }
5412 
5413     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5414         (ldt_info.limit & 0x0ffff);
5415     entry_2 = (ldt_info.base_addr & 0xff000000) |
5416         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5417         (ldt_info.limit & 0xf0000) |
5418         ((read_exec_only ^ 1) << 9) |
5419         (contents << 10) |
5420         ((seg_not_present ^ 1) << 15) |
5421         (seg_32bit << 22) |
5422         (limit_in_pages << 23) |
5423         (lm << 21) |
5424         0x7000;
5425     if (!oldmode)
5426         entry_2 |= (useable << 20);
5427 
5428     /* Install the new entry ...  */
5429 install:
5430     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5431     lp[0] = tswap32(entry_1);
5432     lp[1] = tswap32(entry_2);
5433     return 0;
5434 }
5435 
5436 /* specific and weird i386 syscalls */
5437 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5438                               unsigned long bytecount)
5439 {
5440     abi_long ret;
5441 
5442     switch (func) {
5443     case 0:
5444         ret = read_ldt(ptr, bytecount);
5445         break;
5446     case 1:
5447         ret = write_ldt(env, ptr, bytecount, 1);
5448         break;
5449     case 0x11:
5450         ret = write_ldt(env, ptr, bytecount, 0);
5451         break;
5452     default:
5453         ret = -TARGET_ENOSYS;
5454         break;
5455     }
5456     return ret;
5457 }
5458 
5459 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5460 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5461 {
5462     uint64_t *gdt_table = g2h(env->gdt.base);
5463     struct target_modify_ldt_ldt_s ldt_info;
5464     struct target_modify_ldt_ldt_s *target_ldt_info;
5465     int seg_32bit, contents, read_exec_only, limit_in_pages;
5466     int seg_not_present, useable, lm;
5467     uint32_t *lp, entry_1, entry_2;
5468     int i;
5469 
5470     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5471     if (!target_ldt_info)
5472         return -TARGET_EFAULT;
5473     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5474     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5475     ldt_info.limit = tswap32(target_ldt_info->limit);
5476     ldt_info.flags = tswap32(target_ldt_info->flags);
5477     if (ldt_info.entry_number == -1) {
5478         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5479             if (gdt_table[i] == 0) {
5480                 ldt_info.entry_number = i;
5481                 target_ldt_info->entry_number = tswap32(i);
5482                 break;
5483             }
5484         }
5485     }
5486     unlock_user_struct(target_ldt_info, ptr, 1);
5487 
5488     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5489         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5490            return -TARGET_EINVAL;
5491     seg_32bit = ldt_info.flags & 1;
5492     contents = (ldt_info.flags >> 1) & 3;
5493     read_exec_only = (ldt_info.flags >> 3) & 1;
5494     limit_in_pages = (ldt_info.flags >> 4) & 1;
5495     seg_not_present = (ldt_info.flags >> 5) & 1;
5496     useable = (ldt_info.flags >> 6) & 1;
5497 #ifdef TARGET_ABI32
5498     lm = 0;
5499 #else
5500     lm = (ldt_info.flags >> 7) & 1;
5501 #endif
5502 
5503     if (contents == 3) {
5504         if (seg_not_present == 0)
5505             return -TARGET_EINVAL;
5506     }
5507 
5508     /* NOTE: same code as Linux kernel */
5509     /* Allow LDTs to be cleared by the user. */
5510     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5511         if ((contents == 0             &&
5512              read_exec_only == 1       &&
5513              seg_32bit == 0            &&
5514              limit_in_pages == 0       &&
5515              seg_not_present == 1      &&
5516              useable == 0 )) {
5517             entry_1 = 0;
5518             entry_2 = 0;
5519             goto install;
5520         }
5521     }
5522 
5523     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5524         (ldt_info.limit & 0x0ffff);
5525     entry_2 = (ldt_info.base_addr & 0xff000000) |
5526         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5527         (ldt_info.limit & 0xf0000) |
5528         ((read_exec_only ^ 1) << 9) |
5529         (contents << 10) |
5530         ((seg_not_present ^ 1) << 15) |
5531         (seg_32bit << 22) |
5532         (limit_in_pages << 23) |
5533         (useable << 20) |
5534         (lm << 21) |
5535         0x7000;
5536 
5537     /* Install the new entry ...  */
5538 install:
5539     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5540     lp[0] = tswap32(entry_1);
5541     lp[1] = tswap32(entry_2);
5542     return 0;
5543 }
5544 
5545 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5546 {
5547     struct target_modify_ldt_ldt_s *target_ldt_info;
5548     uint64_t *gdt_table = g2h(env->gdt.base);
5549     uint32_t base_addr, limit, flags;
5550     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5551     int seg_not_present, useable, lm;
5552     uint32_t *lp, entry_1, entry_2;
5553 
5554     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5555     if (!target_ldt_info)
5556         return -TARGET_EFAULT;
5557     idx = tswap32(target_ldt_info->entry_number);
5558     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5559         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5560         unlock_user_struct(target_ldt_info, ptr, 1);
5561         return -TARGET_EINVAL;
5562     }
5563     lp = (uint32_t *)(gdt_table + idx);
5564     entry_1 = tswap32(lp[0]);
5565     entry_2 = tswap32(lp[1]);
5566 
5567     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5568     contents = (entry_2 >> 10) & 3;
5569     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5570     seg_32bit = (entry_2 >> 22) & 1;
5571     limit_in_pages = (entry_2 >> 23) & 1;
5572     useable = (entry_2 >> 20) & 1;
5573 #ifdef TARGET_ABI32
5574     lm = 0;
5575 #else
5576     lm = (entry_2 >> 21) & 1;
5577 #endif
5578     flags = (seg_32bit << 0) | (contents << 1) |
5579         (read_exec_only << 3) | (limit_in_pages << 4) |
5580         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5581     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5582     base_addr = (entry_1 >> 16) |
5583         (entry_2 & 0xff000000) |
5584         ((entry_2 & 0xff) << 16);
5585     target_ldt_info->base_addr = tswapal(base_addr);
5586     target_ldt_info->limit = tswap32(limit);
5587     target_ldt_info->flags = tswap32(flags);
5588     unlock_user_struct(target_ldt_info, ptr, 1);
5589     return 0;
5590 }
5591 #endif /* TARGET_I386 && TARGET_ABI32 */
5592 
5593 #ifndef TARGET_ABI32
5594 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5595 {
5596     abi_long ret = 0;
5597     abi_ulong val;
5598     int idx;
5599 
5600     switch(code) {
5601     case TARGET_ARCH_SET_GS:
5602     case TARGET_ARCH_SET_FS:
5603         if (code == TARGET_ARCH_SET_GS)
5604             idx = R_GS;
5605         else
5606             idx = R_FS;
5607         cpu_x86_load_seg(env, idx, 0);
5608         env->segs[idx].base = addr;
5609         break;
5610     case TARGET_ARCH_GET_GS:
5611     case TARGET_ARCH_GET_FS:
5612         if (code == TARGET_ARCH_GET_GS)
5613             idx = R_GS;
5614         else
5615             idx = R_FS;
5616         val = env->segs[idx].base;
5617         if (put_user(val, addr, abi_ulong))
5618             ret = -TARGET_EFAULT;
5619         break;
5620     default:
5621         ret = -TARGET_EINVAL;
5622         break;
5623     }
5624     return ret;
5625 }
5626 #endif
5627 
5628 #endif /* defined(TARGET_I386) */
5629 
5630 #define NEW_STACK_SIZE 0x40000
5631 
5632 
5633 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5634 typedef struct {
5635     CPUArchState *env;
5636     pthread_mutex_t mutex;
5637     pthread_cond_t cond;
5638     pthread_t thread;
5639     uint32_t tid;
5640     abi_ulong child_tidptr;
5641     abi_ulong parent_tidptr;
5642     sigset_t sigmask;
5643 } new_thread_info;
5644 
5645 static void *clone_func(void *arg)
5646 {
5647     new_thread_info *info = arg;
5648     CPUArchState *env;
5649     CPUState *cpu;
5650     TaskState *ts;
5651 
5652     rcu_register_thread();
5653     tcg_register_thread();
5654     env = info->env;
5655     cpu = env_cpu(env);
5656     thread_cpu = cpu;
5657     ts = (TaskState *)cpu->opaque;
5658     info->tid = sys_gettid();
5659     task_settid(ts);
5660     if (info->child_tidptr)
5661         put_user_u32(info->tid, info->child_tidptr);
5662     if (info->parent_tidptr)
5663         put_user_u32(info->tid, info->parent_tidptr);
5664     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5665     /* Enable signals.  */
5666     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5667     /* Signal to the parent that we're ready.  */
5668     pthread_mutex_lock(&info->mutex);
5669     pthread_cond_broadcast(&info->cond);
5670     pthread_mutex_unlock(&info->mutex);
5671     /* Wait until the parent has finished initializing the tls state.  */
5672     pthread_mutex_lock(&clone_lock);
5673     pthread_mutex_unlock(&clone_lock);
5674     cpu_loop(env);
5675     /* never exits */
5676     return NULL;
5677 }
5678 
5679 /* do_fork() Must return host values and target errnos (unlike most
5680    do_*() functions). */
5681 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5682                    abi_ulong parent_tidptr, target_ulong newtls,
5683                    abi_ulong child_tidptr)
5684 {
5685     CPUState *cpu = env_cpu(env);
5686     int ret;
5687     TaskState *ts;
5688     CPUState *new_cpu;
5689     CPUArchState *new_env;
5690     sigset_t sigmask;
5691 
5692     flags &= ~CLONE_IGNORED_FLAGS;
5693 
5694     /* Emulate vfork() with fork() */
5695     if (flags & CLONE_VFORK)
5696         flags &= ~(CLONE_VFORK | CLONE_VM);
5697 
5698     if (flags & CLONE_VM) {
5699         TaskState *parent_ts = (TaskState *)cpu->opaque;
5700         new_thread_info info;
5701         pthread_attr_t attr;
5702 
5703         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5704             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5705             return -TARGET_EINVAL;
5706         }
5707 
5708         ts = g_new0(TaskState, 1);
5709         init_task_state(ts);
5710 
5711         /* Grab a mutex so that thread setup appears atomic.  */
5712         pthread_mutex_lock(&clone_lock);
5713 
5714         /* we create a new CPU instance. */
5715         new_env = cpu_copy(env);
5716         /* Init regs that differ from the parent.  */
5717         cpu_clone_regs(new_env, newsp);
5718         new_cpu = env_cpu(new_env);
5719         new_cpu->opaque = ts;
5720         ts->bprm = parent_ts->bprm;
5721         ts->info = parent_ts->info;
5722         ts->signal_mask = parent_ts->signal_mask;
5723 
5724         if (flags & CLONE_CHILD_CLEARTID) {
5725             ts->child_tidptr = child_tidptr;
5726         }
5727 
5728         if (flags & CLONE_SETTLS) {
5729             cpu_set_tls (new_env, newtls);
5730         }
5731 
5732         memset(&info, 0, sizeof(info));
5733         pthread_mutex_init(&info.mutex, NULL);
5734         pthread_mutex_lock(&info.mutex);
5735         pthread_cond_init(&info.cond, NULL);
5736         info.env = new_env;
5737         if (flags & CLONE_CHILD_SETTID) {
5738             info.child_tidptr = child_tidptr;
5739         }
5740         if (flags & CLONE_PARENT_SETTID) {
5741             info.parent_tidptr = parent_tidptr;
5742         }
5743 
5744         ret = pthread_attr_init(&attr);
5745         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5746         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5747         /* It is not safe to deliver signals until the child has finished
5748            initializing, so temporarily block all signals.  */
5749         sigfillset(&sigmask);
5750         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5751         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5752 
5753         /* If this is our first additional thread, we need to ensure we
5754          * generate code for parallel execution and flush old translations.
5755          */
5756         if (!parallel_cpus) {
5757             parallel_cpus = true;
5758             tb_flush(cpu);
5759         }
5760 
5761         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5762         /* TODO: Free new CPU state if thread creation failed.  */
5763 
5764         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5765         pthread_attr_destroy(&attr);
5766         if (ret == 0) {
5767             /* Wait for the child to initialize.  */
5768             pthread_cond_wait(&info.cond, &info.mutex);
5769             ret = info.tid;
5770         } else {
5771             ret = -1;
5772         }
5773         pthread_mutex_unlock(&info.mutex);
5774         pthread_cond_destroy(&info.cond);
5775         pthread_mutex_destroy(&info.mutex);
5776         pthread_mutex_unlock(&clone_lock);
5777     } else {
5778         /* if no CLONE_VM, we consider it is a fork */
5779         if (flags & CLONE_INVALID_FORK_FLAGS) {
5780             return -TARGET_EINVAL;
5781         }
5782 
5783         /* We can't support custom termination signals */
5784         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5785             return -TARGET_EINVAL;
5786         }
5787 
5788         if (block_signals()) {
5789             return -TARGET_ERESTARTSYS;
5790         }
5791 
5792         fork_start();
5793         ret = fork();
5794         if (ret == 0) {
5795             /* Child Process.  */
5796             cpu_clone_regs(env, newsp);
5797             fork_end(1);
5798             /* There is a race condition here.  The parent process could
5799                theoretically read the TID in the child process before the child
5800                tid is set.  This would require using either ptrace
5801                (not implemented) or having *_tidptr to point at a shared memory
5802                mapping.  We can't repeat the spinlock hack used above because
5803                the child process gets its own copy of the lock.  */
5804             if (flags & CLONE_CHILD_SETTID)
5805                 put_user_u32(sys_gettid(), child_tidptr);
5806             if (flags & CLONE_PARENT_SETTID)
5807                 put_user_u32(sys_gettid(), parent_tidptr);
5808             ts = (TaskState *)cpu->opaque;
5809             if (flags & CLONE_SETTLS)
5810                 cpu_set_tls (env, newtls);
5811             if (flags & CLONE_CHILD_CLEARTID)
5812                 ts->child_tidptr = child_tidptr;
5813         } else {
5814             fork_end(0);
5815         }
5816     }
5817     return ret;
5818 }
5819 
5820 /* warning : doesn't handle linux specific flags... */
5821 static int target_to_host_fcntl_cmd(int cmd)
5822 {
5823     int ret;
5824 
5825     switch(cmd) {
5826     case TARGET_F_DUPFD:
5827     case TARGET_F_GETFD:
5828     case TARGET_F_SETFD:
5829     case TARGET_F_GETFL:
5830     case TARGET_F_SETFL:
5831         ret = cmd;
5832         break;
5833     case TARGET_F_GETLK:
5834         ret = F_GETLK64;
5835         break;
5836     case TARGET_F_SETLK:
5837         ret = F_SETLK64;
5838         break;
5839     case TARGET_F_SETLKW:
5840         ret = F_SETLKW64;
5841         break;
5842     case TARGET_F_GETOWN:
5843         ret = F_GETOWN;
5844         break;
5845     case TARGET_F_SETOWN:
5846         ret = F_SETOWN;
5847         break;
5848     case TARGET_F_GETSIG:
5849         ret = F_GETSIG;
5850         break;
5851     case TARGET_F_SETSIG:
5852         ret = F_SETSIG;
5853         break;
5854 #if TARGET_ABI_BITS == 32
5855     case TARGET_F_GETLK64:
5856         ret = F_GETLK64;
5857         break;
5858     case TARGET_F_SETLK64:
5859         ret = F_SETLK64;
5860         break;
5861     case TARGET_F_SETLKW64:
5862         ret = F_SETLKW64;
5863         break;
5864 #endif
5865     case TARGET_F_SETLEASE:
5866         ret = F_SETLEASE;
5867         break;
5868     case TARGET_F_GETLEASE:
5869         ret = F_GETLEASE;
5870         break;
5871 #ifdef F_DUPFD_CLOEXEC
5872     case TARGET_F_DUPFD_CLOEXEC:
5873         ret = F_DUPFD_CLOEXEC;
5874         break;
5875 #endif
5876     case TARGET_F_NOTIFY:
5877         ret = F_NOTIFY;
5878         break;
5879 #ifdef F_GETOWN_EX
5880     case TARGET_F_GETOWN_EX:
5881         ret = F_GETOWN_EX;
5882         break;
5883 #endif
5884 #ifdef F_SETOWN_EX
5885     case TARGET_F_SETOWN_EX:
5886         ret = F_SETOWN_EX;
5887         break;
5888 #endif
5889 #ifdef F_SETPIPE_SZ
5890     case TARGET_F_SETPIPE_SZ:
5891         ret = F_SETPIPE_SZ;
5892         break;
5893     case TARGET_F_GETPIPE_SZ:
5894         ret = F_GETPIPE_SZ;
5895         break;
5896 #endif
5897     default:
5898         ret = -TARGET_EINVAL;
5899         break;
5900     }
5901 
5902 #if defined(__powerpc64__)
5903     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5904      * is not supported by kernel. The glibc fcntl call actually adjusts
5905      * them to 5, 6 and 7 before making the syscall(). Since we make the
5906      * syscall directly, adjust to what is supported by the kernel.
5907      */
5908     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5909         ret -= F_GETLK64 - 5;
5910     }
5911 #endif
5912 
5913     return ret;
5914 }
5915 
5916 #define FLOCK_TRANSTBL \
5917     switch (type) { \
5918     TRANSTBL_CONVERT(F_RDLCK); \
5919     TRANSTBL_CONVERT(F_WRLCK); \
5920     TRANSTBL_CONVERT(F_UNLCK); \
5921     TRANSTBL_CONVERT(F_EXLCK); \
5922     TRANSTBL_CONVERT(F_SHLCK); \
5923     }
5924 
5925 static int target_to_host_flock(int type)
5926 {
5927 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5928     FLOCK_TRANSTBL
5929 #undef  TRANSTBL_CONVERT
5930     return -TARGET_EINVAL;
5931 }
5932 
5933 static int host_to_target_flock(int type)
5934 {
5935 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5936     FLOCK_TRANSTBL
5937 #undef  TRANSTBL_CONVERT
5938     /* if we don't know how to convert the value coming
5939      * from the host we copy to the target field as-is
5940      */
5941     return type;
5942 }
5943 
5944 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5945                                             abi_ulong target_flock_addr)
5946 {
5947     struct target_flock *target_fl;
5948     int l_type;
5949 
5950     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5951         return -TARGET_EFAULT;
5952     }
5953 
5954     __get_user(l_type, &target_fl->l_type);
5955     l_type = target_to_host_flock(l_type);
5956     if (l_type < 0) {
5957         return l_type;
5958     }
5959     fl->l_type = l_type;
5960     __get_user(fl->l_whence, &target_fl->l_whence);
5961     __get_user(fl->l_start, &target_fl->l_start);
5962     __get_user(fl->l_len, &target_fl->l_len);
5963     __get_user(fl->l_pid, &target_fl->l_pid);
5964     unlock_user_struct(target_fl, target_flock_addr, 0);
5965     return 0;
5966 }
5967 
5968 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5969                                           const struct flock64 *fl)
5970 {
5971     struct target_flock *target_fl;
5972     short l_type;
5973 
5974     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5975         return -TARGET_EFAULT;
5976     }
5977 
5978     l_type = host_to_target_flock(fl->l_type);
5979     __put_user(l_type, &target_fl->l_type);
5980     __put_user(fl->l_whence, &target_fl->l_whence);
5981     __put_user(fl->l_start, &target_fl->l_start);
5982     __put_user(fl->l_len, &target_fl->l_len);
5983     __put_user(fl->l_pid, &target_fl->l_pid);
5984     unlock_user_struct(target_fl, target_flock_addr, 1);
5985     return 0;
5986 }
5987 
5988 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5989 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5990 
5991 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5992 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5993                                                    abi_ulong target_flock_addr)
5994 {
5995     struct target_oabi_flock64 *target_fl;
5996     int l_type;
5997 
5998     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5999         return -TARGET_EFAULT;
6000     }
6001 
6002     __get_user(l_type, &target_fl->l_type);
6003     l_type = target_to_host_flock(l_type);
6004     if (l_type < 0) {
6005         return l_type;
6006     }
6007     fl->l_type = l_type;
6008     __get_user(fl->l_whence, &target_fl->l_whence);
6009     __get_user(fl->l_start, &target_fl->l_start);
6010     __get_user(fl->l_len, &target_fl->l_len);
6011     __get_user(fl->l_pid, &target_fl->l_pid);
6012     unlock_user_struct(target_fl, target_flock_addr, 0);
6013     return 0;
6014 }
6015 
6016 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6017                                                  const struct flock64 *fl)
6018 {
6019     struct target_oabi_flock64 *target_fl;
6020     short l_type;
6021 
6022     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6023         return -TARGET_EFAULT;
6024     }
6025 
6026     l_type = host_to_target_flock(fl->l_type);
6027     __put_user(l_type, &target_fl->l_type);
6028     __put_user(fl->l_whence, &target_fl->l_whence);
6029     __put_user(fl->l_start, &target_fl->l_start);
6030     __put_user(fl->l_len, &target_fl->l_len);
6031     __put_user(fl->l_pid, &target_fl->l_pid);
6032     unlock_user_struct(target_fl, target_flock_addr, 1);
6033     return 0;
6034 }
6035 #endif
6036 
6037 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6038                                               abi_ulong target_flock_addr)
6039 {
6040     struct target_flock64 *target_fl;
6041     int l_type;
6042 
6043     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6044         return -TARGET_EFAULT;
6045     }
6046 
6047     __get_user(l_type, &target_fl->l_type);
6048     l_type = target_to_host_flock(l_type);
6049     if (l_type < 0) {
6050         return l_type;
6051     }
6052     fl->l_type = l_type;
6053     __get_user(fl->l_whence, &target_fl->l_whence);
6054     __get_user(fl->l_start, &target_fl->l_start);
6055     __get_user(fl->l_len, &target_fl->l_len);
6056     __get_user(fl->l_pid, &target_fl->l_pid);
6057     unlock_user_struct(target_fl, target_flock_addr, 0);
6058     return 0;
6059 }
6060 
6061 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6062                                             const struct flock64 *fl)
6063 {
6064     struct target_flock64 *target_fl;
6065     short l_type;
6066 
6067     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6068         return -TARGET_EFAULT;
6069     }
6070 
6071     l_type = host_to_target_flock(fl->l_type);
6072     __put_user(l_type, &target_fl->l_type);
6073     __put_user(fl->l_whence, &target_fl->l_whence);
6074     __put_user(fl->l_start, &target_fl->l_start);
6075     __put_user(fl->l_len, &target_fl->l_len);
6076     __put_user(fl->l_pid, &target_fl->l_pid);
6077     unlock_user_struct(target_fl, target_flock_addr, 1);
6078     return 0;
6079 }
6080 
6081 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6082 {
6083     struct flock64 fl64;
6084 #ifdef F_GETOWN_EX
6085     struct f_owner_ex fox;
6086     struct target_f_owner_ex *target_fox;
6087 #endif
6088     abi_long ret;
6089     int host_cmd = target_to_host_fcntl_cmd(cmd);
6090 
6091     if (host_cmd == -TARGET_EINVAL)
6092 	    return host_cmd;
6093 
6094     switch(cmd) {
6095     case TARGET_F_GETLK:
6096         ret = copy_from_user_flock(&fl64, arg);
6097         if (ret) {
6098             return ret;
6099         }
6100         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6101         if (ret == 0) {
6102             ret = copy_to_user_flock(arg, &fl64);
6103         }
6104         break;
6105 
6106     case TARGET_F_SETLK:
6107     case TARGET_F_SETLKW:
6108         ret = copy_from_user_flock(&fl64, arg);
6109         if (ret) {
6110             return ret;
6111         }
6112         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6113         break;
6114 
6115     case TARGET_F_GETLK64:
6116         ret = copy_from_user_flock64(&fl64, arg);
6117         if (ret) {
6118             return ret;
6119         }
6120         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6121         if (ret == 0) {
6122             ret = copy_to_user_flock64(arg, &fl64);
6123         }
6124         break;
6125     case TARGET_F_SETLK64:
6126     case TARGET_F_SETLKW64:
6127         ret = copy_from_user_flock64(&fl64, arg);
6128         if (ret) {
6129             return ret;
6130         }
6131         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6132         break;
6133 
6134     case TARGET_F_GETFL:
6135         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6136         if (ret >= 0) {
6137             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6138         }
6139         break;
6140 
6141     case TARGET_F_SETFL:
6142         ret = get_errno(safe_fcntl(fd, host_cmd,
6143                                    target_to_host_bitmask(arg,
6144                                                           fcntl_flags_tbl)));
6145         break;
6146 
6147 #ifdef F_GETOWN_EX
6148     case TARGET_F_GETOWN_EX:
6149         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6150         if (ret >= 0) {
6151             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6152                 return -TARGET_EFAULT;
6153             target_fox->type = tswap32(fox.type);
6154             target_fox->pid = tswap32(fox.pid);
6155             unlock_user_struct(target_fox, arg, 1);
6156         }
6157         break;
6158 #endif
6159 
6160 #ifdef F_SETOWN_EX
6161     case TARGET_F_SETOWN_EX:
6162         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6163             return -TARGET_EFAULT;
6164         fox.type = tswap32(target_fox->type);
6165         fox.pid = tswap32(target_fox->pid);
6166         unlock_user_struct(target_fox, arg, 0);
6167         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6168         break;
6169 #endif
6170 
6171     case TARGET_F_SETOWN:
6172     case TARGET_F_GETOWN:
6173     case TARGET_F_SETSIG:
6174     case TARGET_F_GETSIG:
6175     case TARGET_F_SETLEASE:
6176     case TARGET_F_GETLEASE:
6177     case TARGET_F_SETPIPE_SZ:
6178     case TARGET_F_GETPIPE_SZ:
6179         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6180         break;
6181 
6182     default:
6183         ret = get_errno(safe_fcntl(fd, cmd, arg));
6184         break;
6185     }
6186     return ret;
6187 }
6188 
6189 #ifdef USE_UID16
6190 
6191 static inline int high2lowuid(int uid)
6192 {
6193     if (uid > 65535)
6194         return 65534;
6195     else
6196         return uid;
6197 }
6198 
6199 static inline int high2lowgid(int gid)
6200 {
6201     if (gid > 65535)
6202         return 65534;
6203     else
6204         return gid;
6205 }
6206 
6207 static inline int low2highuid(int uid)
6208 {
6209     if ((int16_t)uid == -1)
6210         return -1;
6211     else
6212         return uid;
6213 }
6214 
6215 static inline int low2highgid(int gid)
6216 {
6217     if ((int16_t)gid == -1)
6218         return -1;
6219     else
6220         return gid;
6221 }
6222 static inline int tswapid(int id)
6223 {
6224     return tswap16(id);
6225 }
6226 
6227 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6228 
6229 #else /* !USE_UID16 */
6230 static inline int high2lowuid(int uid)
6231 {
6232     return uid;
6233 }
6234 static inline int high2lowgid(int gid)
6235 {
6236     return gid;
6237 }
6238 static inline int low2highuid(int uid)
6239 {
6240     return uid;
6241 }
6242 static inline int low2highgid(int gid)
6243 {
6244     return gid;
6245 }
6246 static inline int tswapid(int id)
6247 {
6248     return tswap32(id);
6249 }
6250 
6251 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6252 
6253 #endif /* USE_UID16 */
6254 
6255 /* We must do direct syscalls for setting UID/GID, because we want to
6256  * implement the Linux system call semantics of "change only for this thread",
6257  * not the libc/POSIX semantics of "change for all threads in process".
6258  * (See http://ewontfix.com/17/ for more details.)
6259  * We use the 32-bit version of the syscalls if present; if it is not
6260  * then either the host architecture supports 32-bit UIDs natively with
6261  * the standard syscall, or the 16-bit UID is the best we can do.
6262  */
6263 #ifdef __NR_setuid32
6264 #define __NR_sys_setuid __NR_setuid32
6265 #else
6266 #define __NR_sys_setuid __NR_setuid
6267 #endif
6268 #ifdef __NR_setgid32
6269 #define __NR_sys_setgid __NR_setgid32
6270 #else
6271 #define __NR_sys_setgid __NR_setgid
6272 #endif
6273 #ifdef __NR_setresuid32
6274 #define __NR_sys_setresuid __NR_setresuid32
6275 #else
6276 #define __NR_sys_setresuid __NR_setresuid
6277 #endif
6278 #ifdef __NR_setresgid32
6279 #define __NR_sys_setresgid __NR_setresgid32
6280 #else
6281 #define __NR_sys_setresgid __NR_setresgid
6282 #endif
6283 
6284 _syscall1(int, sys_setuid, uid_t, uid)
6285 _syscall1(int, sys_setgid, gid_t, gid)
6286 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6287 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6288 
6289 void syscall_init(void)
6290 {
6291     IOCTLEntry *ie;
6292     const argtype *arg_type;
6293     int size;
6294     int i;
6295 
6296     thunk_init(STRUCT_MAX);
6297 
6298 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6299 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6300 #include "syscall_types.h"
6301 #undef STRUCT
6302 #undef STRUCT_SPECIAL
6303 
6304     /* Build target_to_host_errno_table[] table from
6305      * host_to_target_errno_table[]. */
6306     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6307         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6308     }
6309 
6310     /* we patch the ioctl size if necessary. We rely on the fact that
6311        no ioctl has all the bits at '1' in the size field */
6312     ie = ioctl_entries;
6313     while (ie->target_cmd != 0) {
6314         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6315             TARGET_IOC_SIZEMASK) {
6316             arg_type = ie->arg_type;
6317             if (arg_type[0] != TYPE_PTR) {
6318                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6319                         ie->target_cmd);
6320                 exit(1);
6321             }
6322             arg_type++;
6323             size = thunk_type_size(arg_type, 0);
6324             ie->target_cmd = (ie->target_cmd &
6325                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6326                 (size << TARGET_IOC_SIZESHIFT);
6327         }
6328 
6329         /* automatic consistency check if same arch */
6330 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6331     (defined(__x86_64__) && defined(TARGET_X86_64))
6332         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6333             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6334                     ie->name, ie->target_cmd, ie->host_cmd);
6335         }
6336 #endif
6337         ie++;
6338     }
6339 }
6340 
6341 #if TARGET_ABI_BITS == 32
6342 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6343 {
6344 #ifdef TARGET_WORDS_BIGENDIAN
6345     return ((uint64_t)word0 << 32) | word1;
6346 #else
6347     return ((uint64_t)word1 << 32) | word0;
6348 #endif
6349 }
6350 #else /* TARGET_ABI_BITS == 32 */
6351 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6352 {
6353     return word0;
6354 }
6355 #endif /* TARGET_ABI_BITS != 32 */
6356 
6357 #ifdef TARGET_NR_truncate64
6358 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6359                                          abi_long arg2,
6360                                          abi_long arg3,
6361                                          abi_long arg4)
6362 {
6363     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6364         arg2 = arg3;
6365         arg3 = arg4;
6366     }
6367     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6368 }
6369 #endif
6370 
6371 #ifdef TARGET_NR_ftruncate64
6372 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6373                                           abi_long arg2,
6374                                           abi_long arg3,
6375                                           abi_long arg4)
6376 {
6377     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6378         arg2 = arg3;
6379         arg3 = arg4;
6380     }
6381     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6382 }
6383 #endif
6384 
6385 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6386                                                  abi_ulong target_addr)
6387 {
6388     struct target_itimerspec *target_itspec;
6389 
6390     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6391         return -TARGET_EFAULT;
6392     }
6393 
6394     host_itspec->it_interval.tv_sec =
6395                             tswapal(target_itspec->it_interval.tv_sec);
6396     host_itspec->it_interval.tv_nsec =
6397                             tswapal(target_itspec->it_interval.tv_nsec);
6398     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6399     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6400 
6401     unlock_user_struct(target_itspec, target_addr, 1);
6402     return 0;
6403 }
6404 
6405 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6406                                                struct itimerspec *host_its)
6407 {
6408     struct target_itimerspec *target_itspec;
6409 
6410     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6411         return -TARGET_EFAULT;
6412     }
6413 
6414     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6415     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6416 
6417     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6418     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6419 
6420     unlock_user_struct(target_itspec, target_addr, 0);
6421     return 0;
6422 }
6423 
6424 static inline abi_long target_to_host_timex(struct timex *host_tx,
6425                                             abi_long target_addr)
6426 {
6427     struct target_timex *target_tx;
6428 
6429     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6430         return -TARGET_EFAULT;
6431     }
6432 
6433     __get_user(host_tx->modes, &target_tx->modes);
6434     __get_user(host_tx->offset, &target_tx->offset);
6435     __get_user(host_tx->freq, &target_tx->freq);
6436     __get_user(host_tx->maxerror, &target_tx->maxerror);
6437     __get_user(host_tx->esterror, &target_tx->esterror);
6438     __get_user(host_tx->status, &target_tx->status);
6439     __get_user(host_tx->constant, &target_tx->constant);
6440     __get_user(host_tx->precision, &target_tx->precision);
6441     __get_user(host_tx->tolerance, &target_tx->tolerance);
6442     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6443     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6444     __get_user(host_tx->tick, &target_tx->tick);
6445     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6446     __get_user(host_tx->jitter, &target_tx->jitter);
6447     __get_user(host_tx->shift, &target_tx->shift);
6448     __get_user(host_tx->stabil, &target_tx->stabil);
6449     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6450     __get_user(host_tx->calcnt, &target_tx->calcnt);
6451     __get_user(host_tx->errcnt, &target_tx->errcnt);
6452     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6453     __get_user(host_tx->tai, &target_tx->tai);
6454 
6455     unlock_user_struct(target_tx, target_addr, 0);
6456     return 0;
6457 }
6458 
6459 static inline abi_long host_to_target_timex(abi_long target_addr,
6460                                             struct timex *host_tx)
6461 {
6462     struct target_timex *target_tx;
6463 
6464     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6465         return -TARGET_EFAULT;
6466     }
6467 
6468     __put_user(host_tx->modes, &target_tx->modes);
6469     __put_user(host_tx->offset, &target_tx->offset);
6470     __put_user(host_tx->freq, &target_tx->freq);
6471     __put_user(host_tx->maxerror, &target_tx->maxerror);
6472     __put_user(host_tx->esterror, &target_tx->esterror);
6473     __put_user(host_tx->status, &target_tx->status);
6474     __put_user(host_tx->constant, &target_tx->constant);
6475     __put_user(host_tx->precision, &target_tx->precision);
6476     __put_user(host_tx->tolerance, &target_tx->tolerance);
6477     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6478     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6479     __put_user(host_tx->tick, &target_tx->tick);
6480     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6481     __put_user(host_tx->jitter, &target_tx->jitter);
6482     __put_user(host_tx->shift, &target_tx->shift);
6483     __put_user(host_tx->stabil, &target_tx->stabil);
6484     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6485     __put_user(host_tx->calcnt, &target_tx->calcnt);
6486     __put_user(host_tx->errcnt, &target_tx->errcnt);
6487     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6488     __put_user(host_tx->tai, &target_tx->tai);
6489 
6490     unlock_user_struct(target_tx, target_addr, 1);
6491     return 0;
6492 }
6493 
6494 
6495 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6496                                                abi_ulong target_addr)
6497 {
6498     struct target_sigevent *target_sevp;
6499 
6500     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6501         return -TARGET_EFAULT;
6502     }
6503 
6504     /* This union is awkward on 64 bit systems because it has a 32 bit
6505      * integer and a pointer in it; we follow the conversion approach
6506      * used for handling sigval types in signal.c so the guest should get
6507      * the correct value back even if we did a 64 bit byteswap and it's
6508      * using the 32 bit integer.
6509      */
6510     host_sevp->sigev_value.sival_ptr =
6511         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6512     host_sevp->sigev_signo =
6513         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6514     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6515     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6516 
6517     unlock_user_struct(target_sevp, target_addr, 1);
6518     return 0;
6519 }
6520 
6521 #if defined(TARGET_NR_mlockall)
6522 static inline int target_to_host_mlockall_arg(int arg)
6523 {
6524     int result = 0;
6525 
6526     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6527         result |= MCL_CURRENT;
6528     }
6529     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6530         result |= MCL_FUTURE;
6531     }
6532     return result;
6533 }
6534 #endif
6535 
6536 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6537      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6538      defined(TARGET_NR_newfstatat))
6539 static inline abi_long host_to_target_stat64(void *cpu_env,
6540                                              abi_ulong target_addr,
6541                                              struct stat *host_st)
6542 {
6543 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6544     if (((CPUARMState *)cpu_env)->eabi) {
6545         struct target_eabi_stat64 *target_st;
6546 
6547         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6548             return -TARGET_EFAULT;
6549         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6550         __put_user(host_st->st_dev, &target_st->st_dev);
6551         __put_user(host_st->st_ino, &target_st->st_ino);
6552 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6553         __put_user(host_st->st_ino, &target_st->__st_ino);
6554 #endif
6555         __put_user(host_st->st_mode, &target_st->st_mode);
6556         __put_user(host_st->st_nlink, &target_st->st_nlink);
6557         __put_user(host_st->st_uid, &target_st->st_uid);
6558         __put_user(host_st->st_gid, &target_st->st_gid);
6559         __put_user(host_st->st_rdev, &target_st->st_rdev);
6560         __put_user(host_st->st_size, &target_st->st_size);
6561         __put_user(host_st->st_blksize, &target_st->st_blksize);
6562         __put_user(host_st->st_blocks, &target_st->st_blocks);
6563         __put_user(host_st->st_atime, &target_st->target_st_atime);
6564         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6565         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6566 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6567         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6568         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6569         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6570 #endif
6571         unlock_user_struct(target_st, target_addr, 1);
6572     } else
6573 #endif
6574     {
6575 #if defined(TARGET_HAS_STRUCT_STAT64)
6576         struct target_stat64 *target_st;
6577 #else
6578         struct target_stat *target_st;
6579 #endif
6580 
6581         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6582             return -TARGET_EFAULT;
6583         memset(target_st, 0, sizeof(*target_st));
6584         __put_user(host_st->st_dev, &target_st->st_dev);
6585         __put_user(host_st->st_ino, &target_st->st_ino);
6586 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6587         __put_user(host_st->st_ino, &target_st->__st_ino);
6588 #endif
6589         __put_user(host_st->st_mode, &target_st->st_mode);
6590         __put_user(host_st->st_nlink, &target_st->st_nlink);
6591         __put_user(host_st->st_uid, &target_st->st_uid);
6592         __put_user(host_st->st_gid, &target_st->st_gid);
6593         __put_user(host_st->st_rdev, &target_st->st_rdev);
6594         /* XXX: better use of kernel struct */
6595         __put_user(host_st->st_size, &target_st->st_size);
6596         __put_user(host_st->st_blksize, &target_st->st_blksize);
6597         __put_user(host_st->st_blocks, &target_st->st_blocks);
6598         __put_user(host_st->st_atime, &target_st->target_st_atime);
6599         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6600         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6601 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6602         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6603         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6604         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6605 #endif
6606         unlock_user_struct(target_st, target_addr, 1);
6607     }
6608 
6609     return 0;
6610 }
6611 #endif
6612 
6613 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6614 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6615                                             abi_ulong target_addr)
6616 {
6617     struct target_statx *target_stx;
6618 
6619     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6620         return -TARGET_EFAULT;
6621     }
6622     memset(target_stx, 0, sizeof(*target_stx));
6623 
6624     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6625     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6626     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6627     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6628     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6629     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6630     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6631     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6632     __put_user(host_stx->stx_size, &target_stx->stx_size);
6633     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6634     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6635     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6636     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6637     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6638     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6639     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6640     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6641     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6642     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6643     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6644     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6645     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6646     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6647 
6648     unlock_user_struct(target_stx, target_addr, 1);
6649 
6650     return 0;
6651 }
6652 #endif
6653 
6654 
6655 /* ??? Using host futex calls even when target atomic operations
6656    are not really atomic probably breaks things.  However implementing
6657    futexes locally would make futexes shared between multiple processes
6658    tricky.  However they're probably useless because guest atomic
6659    operations won't work either.  */
6660 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6661                     target_ulong uaddr2, int val3)
6662 {
6663     struct timespec ts, *pts;
6664     int base_op;
6665 
6666     /* ??? We assume FUTEX_* constants are the same on both host
6667        and target.  */
6668 #ifdef FUTEX_CMD_MASK
6669     base_op = op & FUTEX_CMD_MASK;
6670 #else
6671     base_op = op;
6672 #endif
6673     switch (base_op) {
6674     case FUTEX_WAIT:
6675     case FUTEX_WAIT_BITSET:
6676         if (timeout) {
6677             pts = &ts;
6678             target_to_host_timespec(pts, timeout);
6679         } else {
6680             pts = NULL;
6681         }
6682         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6683                          pts, NULL, val3));
6684     case FUTEX_WAKE:
6685         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6686     case FUTEX_FD:
6687         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6688     case FUTEX_REQUEUE:
6689     case FUTEX_CMP_REQUEUE:
6690     case FUTEX_WAKE_OP:
6691         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6692            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6693            But the prototype takes a `struct timespec *'; insert casts
6694            to satisfy the compiler.  We do not need to tswap TIMEOUT
6695            since it's not compared to guest memory.  */
6696         pts = (struct timespec *)(uintptr_t) timeout;
6697         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6698                                     g2h(uaddr2),
6699                                     (base_op == FUTEX_CMP_REQUEUE
6700                                      ? tswap32(val3)
6701                                      : val3)));
6702     default:
6703         return -TARGET_ENOSYS;
6704     }
6705 }
6706 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6707 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6708                                      abi_long handle, abi_long mount_id,
6709                                      abi_long flags)
6710 {
6711     struct file_handle *target_fh;
6712     struct file_handle *fh;
6713     int mid = 0;
6714     abi_long ret;
6715     char *name;
6716     unsigned int size, total_size;
6717 
6718     if (get_user_s32(size, handle)) {
6719         return -TARGET_EFAULT;
6720     }
6721 
6722     name = lock_user_string(pathname);
6723     if (!name) {
6724         return -TARGET_EFAULT;
6725     }
6726 
6727     total_size = sizeof(struct file_handle) + size;
6728     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6729     if (!target_fh) {
6730         unlock_user(name, pathname, 0);
6731         return -TARGET_EFAULT;
6732     }
6733 
6734     fh = g_malloc0(total_size);
6735     fh->handle_bytes = size;
6736 
6737     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6738     unlock_user(name, pathname, 0);
6739 
6740     /* man name_to_handle_at(2):
6741      * Other than the use of the handle_bytes field, the caller should treat
6742      * the file_handle structure as an opaque data type
6743      */
6744 
6745     memcpy(target_fh, fh, total_size);
6746     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6747     target_fh->handle_type = tswap32(fh->handle_type);
6748     g_free(fh);
6749     unlock_user(target_fh, handle, total_size);
6750 
6751     if (put_user_s32(mid, mount_id)) {
6752         return -TARGET_EFAULT;
6753     }
6754 
6755     return ret;
6756 
6757 }
6758 #endif
6759 
6760 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6761 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6762                                      abi_long flags)
6763 {
6764     struct file_handle *target_fh;
6765     struct file_handle *fh;
6766     unsigned int size, total_size;
6767     abi_long ret;
6768 
6769     if (get_user_s32(size, handle)) {
6770         return -TARGET_EFAULT;
6771     }
6772 
6773     total_size = sizeof(struct file_handle) + size;
6774     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6775     if (!target_fh) {
6776         return -TARGET_EFAULT;
6777     }
6778 
6779     fh = g_memdup(target_fh, total_size);
6780     fh->handle_bytes = size;
6781     fh->handle_type = tswap32(target_fh->handle_type);
6782 
6783     ret = get_errno(open_by_handle_at(mount_fd, fh,
6784                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6785 
6786     g_free(fh);
6787 
6788     unlock_user(target_fh, handle, total_size);
6789 
6790     return ret;
6791 }
6792 #endif
6793 
6794 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6795 
6796 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6797 {
6798     int host_flags;
6799     target_sigset_t *target_mask;
6800     sigset_t host_mask;
6801     abi_long ret;
6802 
6803     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6804         return -TARGET_EINVAL;
6805     }
6806     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6807         return -TARGET_EFAULT;
6808     }
6809 
6810     target_to_host_sigset(&host_mask, target_mask);
6811 
6812     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6813 
6814     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6815     if (ret >= 0) {
6816         fd_trans_register(ret, &target_signalfd_trans);
6817     }
6818 
6819     unlock_user_struct(target_mask, mask, 0);
6820 
6821     return ret;
6822 }
6823 #endif
6824 
6825 /* Map host to target signal numbers for the wait family of syscalls.
6826    Assume all other status bits are the same.  */
6827 int host_to_target_waitstatus(int status)
6828 {
6829     if (WIFSIGNALED(status)) {
6830         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6831     }
6832     if (WIFSTOPPED(status)) {
6833         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6834                | (status & 0xff);
6835     }
6836     return status;
6837 }
6838 
6839 static int open_self_cmdline(void *cpu_env, int fd)
6840 {
6841     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6842     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6843     int i;
6844 
6845     for (i = 0; i < bprm->argc; i++) {
6846         size_t len = strlen(bprm->argv[i]) + 1;
6847 
6848         if (write(fd, bprm->argv[i], len) != len) {
6849             return -1;
6850         }
6851     }
6852 
6853     return 0;
6854 }
6855 
6856 static int open_self_maps(void *cpu_env, int fd)
6857 {
6858     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6859     TaskState *ts = cpu->opaque;
6860     FILE *fp;
6861     char *line = NULL;
6862     size_t len = 0;
6863     ssize_t read;
6864 
6865     fp = fopen("/proc/self/maps", "r");
6866     if (fp == NULL) {
6867         return -1;
6868     }
6869 
6870     while ((read = getline(&line, &len, fp)) != -1) {
6871         int fields, dev_maj, dev_min, inode;
6872         uint64_t min, max, offset;
6873         char flag_r, flag_w, flag_x, flag_p;
6874         char path[512] = "";
6875         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6876                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6877                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6878 
6879         if ((fields < 10) || (fields > 11)) {
6880             continue;
6881         }
6882         if (h2g_valid(min)) {
6883             int flags = page_get_flags(h2g(min));
6884             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6885             if (page_check_range(h2g(min), max - min, flags) == -1) {
6886                 continue;
6887             }
6888             if (h2g(min) == ts->info->stack_limit) {
6889                 pstrcpy(path, sizeof(path), "      [stack]");
6890             }
6891             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6892                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6893                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6894                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6895                     path[0] ? "         " : "", path);
6896         }
6897     }
6898 
6899     free(line);
6900     fclose(fp);
6901 
6902     return 0;
6903 }
6904 
6905 static int open_self_stat(void *cpu_env, int fd)
6906 {
6907     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6908     TaskState *ts = cpu->opaque;
6909     abi_ulong start_stack = ts->info->start_stack;
6910     int i;
6911 
6912     for (i = 0; i < 44; i++) {
6913       char buf[128];
6914       int len;
6915       uint64_t val = 0;
6916 
6917       if (i == 0) {
6918         /* pid */
6919         val = getpid();
6920         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6921       } else if (i == 1) {
6922         /* app name */
6923         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6924       } else if (i == 27) {
6925         /* stack bottom */
6926         val = start_stack;
6927         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6928       } else {
6929         /* for the rest, there is MasterCard */
6930         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6931       }
6932 
6933       len = strlen(buf);
6934       if (write(fd, buf, len) != len) {
6935           return -1;
6936       }
6937     }
6938 
6939     return 0;
6940 }
6941 
6942 static int open_self_auxv(void *cpu_env, int fd)
6943 {
6944     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6945     TaskState *ts = cpu->opaque;
6946     abi_ulong auxv = ts->info->saved_auxv;
6947     abi_ulong len = ts->info->auxv_len;
6948     char *ptr;
6949 
6950     /*
6951      * Auxiliary vector is stored in target process stack.
6952      * read in whole auxv vector and copy it to file
6953      */
6954     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6955     if (ptr != NULL) {
6956         while (len > 0) {
6957             ssize_t r;
6958             r = write(fd, ptr, len);
6959             if (r <= 0) {
6960                 break;
6961             }
6962             len -= r;
6963             ptr += r;
6964         }
6965         lseek(fd, 0, SEEK_SET);
6966         unlock_user(ptr, auxv, len);
6967     }
6968 
6969     return 0;
6970 }
6971 
6972 static int is_proc_myself(const char *filename, const char *entry)
6973 {
6974     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6975         filename += strlen("/proc/");
6976         if (!strncmp(filename, "self/", strlen("self/"))) {
6977             filename += strlen("self/");
6978         } else if (*filename >= '1' && *filename <= '9') {
6979             char myself[80];
6980             snprintf(myself, sizeof(myself), "%d/", getpid());
6981             if (!strncmp(filename, myself, strlen(myself))) {
6982                 filename += strlen(myself);
6983             } else {
6984                 return 0;
6985             }
6986         } else {
6987             return 0;
6988         }
6989         if (!strcmp(filename, entry)) {
6990             return 1;
6991         }
6992     }
6993     return 0;
6994 }
6995 
6996 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6997     defined(TARGET_SPARC) || defined(TARGET_M68K)
6998 static int is_proc(const char *filename, const char *entry)
6999 {
7000     return strcmp(filename, entry) == 0;
7001 }
7002 #endif
7003 
7004 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7005 static int open_net_route(void *cpu_env, int fd)
7006 {
7007     FILE *fp;
7008     char *line = NULL;
7009     size_t len = 0;
7010     ssize_t read;
7011 
7012     fp = fopen("/proc/net/route", "r");
7013     if (fp == NULL) {
7014         return -1;
7015     }
7016 
7017     /* read header */
7018 
7019     read = getline(&line, &len, fp);
7020     dprintf(fd, "%s", line);
7021 
7022     /* read routes */
7023 
7024     while ((read = getline(&line, &len, fp)) != -1) {
7025         char iface[16];
7026         uint32_t dest, gw, mask;
7027         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7028         int fields;
7029 
7030         fields = sscanf(line,
7031                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7032                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7033                         &mask, &mtu, &window, &irtt);
7034         if (fields != 11) {
7035             continue;
7036         }
7037         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7038                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7039                 metric, tswap32(mask), mtu, window, irtt);
7040     }
7041 
7042     free(line);
7043     fclose(fp);
7044 
7045     return 0;
7046 }
7047 #endif
7048 
7049 #if defined(TARGET_SPARC)
7050 static int open_cpuinfo(void *cpu_env, int fd)
7051 {
7052     dprintf(fd, "type\t\t: sun4u\n");
7053     return 0;
7054 }
7055 #endif
7056 
7057 #if defined(TARGET_M68K)
7058 static int open_hardware(void *cpu_env, int fd)
7059 {
7060     dprintf(fd, "Model:\t\tqemu-m68k\n");
7061     return 0;
7062 }
7063 #endif
7064 
7065 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7066 {
7067     struct fake_open {
7068         const char *filename;
7069         int (*fill)(void *cpu_env, int fd);
7070         int (*cmp)(const char *s1, const char *s2);
7071     };
7072     const struct fake_open *fake_open;
7073     static const struct fake_open fakes[] = {
7074         { "maps", open_self_maps, is_proc_myself },
7075         { "stat", open_self_stat, is_proc_myself },
7076         { "auxv", open_self_auxv, is_proc_myself },
7077         { "cmdline", open_self_cmdline, is_proc_myself },
7078 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7079         { "/proc/net/route", open_net_route, is_proc },
7080 #endif
7081 #if defined(TARGET_SPARC)
7082         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7083 #endif
7084 #if defined(TARGET_M68K)
7085         { "/proc/hardware", open_hardware, is_proc },
7086 #endif
7087         { NULL, NULL, NULL }
7088     };
7089 
7090     if (is_proc_myself(pathname, "exe")) {
7091         int execfd = qemu_getauxval(AT_EXECFD);
7092         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7093     }
7094 
7095     for (fake_open = fakes; fake_open->filename; fake_open++) {
7096         if (fake_open->cmp(pathname, fake_open->filename)) {
7097             break;
7098         }
7099     }
7100 
7101     if (fake_open->filename) {
7102         const char *tmpdir;
7103         char filename[PATH_MAX];
7104         int fd, r;
7105 
7106         /* create temporary file to map stat to */
7107         tmpdir = getenv("TMPDIR");
7108         if (!tmpdir)
7109             tmpdir = "/tmp";
7110         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7111         fd = mkstemp(filename);
7112         if (fd < 0) {
7113             return fd;
7114         }
7115         unlink(filename);
7116 
7117         if ((r = fake_open->fill(cpu_env, fd))) {
7118             int e = errno;
7119             close(fd);
7120             errno = e;
7121             return r;
7122         }
7123         lseek(fd, 0, SEEK_SET);
7124 
7125         return fd;
7126     }
7127 
7128     return safe_openat(dirfd, path(pathname), flags, mode);
7129 }
7130 
7131 #define TIMER_MAGIC 0x0caf0000
7132 #define TIMER_MAGIC_MASK 0xffff0000
7133 
7134 /* Convert QEMU provided timer ID back to internal 16bit index format */
7135 static target_timer_t get_timer_id(abi_long arg)
7136 {
7137     target_timer_t timerid = arg;
7138 
7139     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7140         return -TARGET_EINVAL;
7141     }
7142 
7143     timerid &= 0xffff;
7144 
7145     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7146         return -TARGET_EINVAL;
7147     }
7148 
7149     return timerid;
7150 }
7151 
7152 static int target_to_host_cpu_mask(unsigned long *host_mask,
7153                                    size_t host_size,
7154                                    abi_ulong target_addr,
7155                                    size_t target_size)
7156 {
7157     unsigned target_bits = sizeof(abi_ulong) * 8;
7158     unsigned host_bits = sizeof(*host_mask) * 8;
7159     abi_ulong *target_mask;
7160     unsigned i, j;
7161 
7162     assert(host_size >= target_size);
7163 
7164     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7165     if (!target_mask) {
7166         return -TARGET_EFAULT;
7167     }
7168     memset(host_mask, 0, host_size);
7169 
7170     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7171         unsigned bit = i * target_bits;
7172         abi_ulong val;
7173 
7174         __get_user(val, &target_mask[i]);
7175         for (j = 0; j < target_bits; j++, bit++) {
7176             if (val & (1UL << j)) {
7177                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7178             }
7179         }
7180     }
7181 
7182     unlock_user(target_mask, target_addr, 0);
7183     return 0;
7184 }
7185 
7186 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7187                                    size_t host_size,
7188                                    abi_ulong target_addr,
7189                                    size_t target_size)
7190 {
7191     unsigned target_bits = sizeof(abi_ulong) * 8;
7192     unsigned host_bits = sizeof(*host_mask) * 8;
7193     abi_ulong *target_mask;
7194     unsigned i, j;
7195 
7196     assert(host_size >= target_size);
7197 
7198     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7199     if (!target_mask) {
7200         return -TARGET_EFAULT;
7201     }
7202 
7203     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7204         unsigned bit = i * target_bits;
7205         abi_ulong val = 0;
7206 
7207         for (j = 0; j < target_bits; j++, bit++) {
7208             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7209                 val |= 1UL << j;
7210             }
7211         }
7212         __put_user(val, &target_mask[i]);
7213     }
7214 
7215     unlock_user(target_mask, target_addr, target_size);
7216     return 0;
7217 }
7218 
7219 /* This is an internal helper for do_syscall so that it is easier
7220  * to have a single return point, so that actions, such as logging
7221  * of syscall results, can be performed.
7222  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7223  */
7224 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7225                             abi_long arg2, abi_long arg3, abi_long arg4,
7226                             abi_long arg5, abi_long arg6, abi_long arg7,
7227                             abi_long arg8)
7228 {
7229     CPUState *cpu = env_cpu(cpu_env);
7230     abi_long ret;
7231 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7232     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7233     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7234     || defined(TARGET_NR_statx)
7235     struct stat st;
7236 #endif
7237 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7238     || defined(TARGET_NR_fstatfs)
7239     struct statfs stfs;
7240 #endif
7241     void *p;
7242 
7243     switch(num) {
7244     case TARGET_NR_exit:
7245         /* In old applications this may be used to implement _exit(2).
7246            However in threaded applictions it is used for thread termination,
7247            and _exit_group is used for application termination.
7248            Do thread termination if we have more then one thread.  */
7249 
7250         if (block_signals()) {
7251             return -TARGET_ERESTARTSYS;
7252         }
7253 
7254         cpu_list_lock();
7255 
7256         if (CPU_NEXT(first_cpu)) {
7257             TaskState *ts;
7258 
7259             /* Remove the CPU from the list.  */
7260             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7261 
7262             cpu_list_unlock();
7263 
7264             ts = cpu->opaque;
7265             if (ts->child_tidptr) {
7266                 put_user_u32(0, ts->child_tidptr);
7267                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7268                           NULL, NULL, 0);
7269             }
7270             thread_cpu = NULL;
7271             object_unref(OBJECT(cpu));
7272             g_free(ts);
7273             rcu_unregister_thread();
7274             pthread_exit(NULL);
7275         }
7276 
7277         cpu_list_unlock();
7278         preexit_cleanup(cpu_env, arg1);
7279         _exit(arg1);
7280         return 0; /* avoid warning */
7281     case TARGET_NR_read:
7282         if (arg2 == 0 && arg3 == 0) {
7283             return get_errno(safe_read(arg1, 0, 0));
7284         } else {
7285             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7286                 return -TARGET_EFAULT;
7287             ret = get_errno(safe_read(arg1, p, arg3));
7288             if (ret >= 0 &&
7289                 fd_trans_host_to_target_data(arg1)) {
7290                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7291             }
7292             unlock_user(p, arg2, ret);
7293         }
7294         return ret;
7295     case TARGET_NR_write:
7296         if (arg2 == 0 && arg3 == 0) {
7297             return get_errno(safe_write(arg1, 0, 0));
7298         }
7299         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7300             return -TARGET_EFAULT;
7301         if (fd_trans_target_to_host_data(arg1)) {
7302             void *copy = g_malloc(arg3);
7303             memcpy(copy, p, arg3);
7304             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7305             if (ret >= 0) {
7306                 ret = get_errno(safe_write(arg1, copy, ret));
7307             }
7308             g_free(copy);
7309         } else {
7310             ret = get_errno(safe_write(arg1, p, arg3));
7311         }
7312         unlock_user(p, arg2, 0);
7313         return ret;
7314 
7315 #ifdef TARGET_NR_open
7316     case TARGET_NR_open:
7317         if (!(p = lock_user_string(arg1)))
7318             return -TARGET_EFAULT;
7319         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7320                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7321                                   arg3));
7322         fd_trans_unregister(ret);
7323         unlock_user(p, arg1, 0);
7324         return ret;
7325 #endif
7326     case TARGET_NR_openat:
7327         if (!(p = lock_user_string(arg2)))
7328             return -TARGET_EFAULT;
7329         ret = get_errno(do_openat(cpu_env, arg1, p,
7330                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7331                                   arg4));
7332         fd_trans_unregister(ret);
7333         unlock_user(p, arg2, 0);
7334         return ret;
7335 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7336     case TARGET_NR_name_to_handle_at:
7337         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7338         return ret;
7339 #endif
7340 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7341     case TARGET_NR_open_by_handle_at:
7342         ret = do_open_by_handle_at(arg1, arg2, arg3);
7343         fd_trans_unregister(ret);
7344         return ret;
7345 #endif
7346     case TARGET_NR_close:
7347         fd_trans_unregister(arg1);
7348         return get_errno(close(arg1));
7349 
7350     case TARGET_NR_brk:
7351         return do_brk(arg1);
7352 #ifdef TARGET_NR_fork
7353     case TARGET_NR_fork:
7354         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7355 #endif
7356 #ifdef TARGET_NR_waitpid
7357     case TARGET_NR_waitpid:
7358         {
7359             int status;
7360             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7361             if (!is_error(ret) && arg2 && ret
7362                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7363                 return -TARGET_EFAULT;
7364         }
7365         return ret;
7366 #endif
7367 #ifdef TARGET_NR_waitid
7368     case TARGET_NR_waitid:
7369         {
7370             siginfo_t info;
7371             info.si_pid = 0;
7372             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7373             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7374                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7375                     return -TARGET_EFAULT;
7376                 host_to_target_siginfo(p, &info);
7377                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7378             }
7379         }
7380         return ret;
7381 #endif
7382 #ifdef TARGET_NR_creat /* not on alpha */
7383     case TARGET_NR_creat:
7384         if (!(p = lock_user_string(arg1)))
7385             return -TARGET_EFAULT;
7386         ret = get_errno(creat(p, arg2));
7387         fd_trans_unregister(ret);
7388         unlock_user(p, arg1, 0);
7389         return ret;
7390 #endif
7391 #ifdef TARGET_NR_link
7392     case TARGET_NR_link:
7393         {
7394             void * p2;
7395             p = lock_user_string(arg1);
7396             p2 = lock_user_string(arg2);
7397             if (!p || !p2)
7398                 ret = -TARGET_EFAULT;
7399             else
7400                 ret = get_errno(link(p, p2));
7401             unlock_user(p2, arg2, 0);
7402             unlock_user(p, arg1, 0);
7403         }
7404         return ret;
7405 #endif
7406 #if defined(TARGET_NR_linkat)
7407     case TARGET_NR_linkat:
7408         {
7409             void * p2 = NULL;
7410             if (!arg2 || !arg4)
7411                 return -TARGET_EFAULT;
7412             p  = lock_user_string(arg2);
7413             p2 = lock_user_string(arg4);
7414             if (!p || !p2)
7415                 ret = -TARGET_EFAULT;
7416             else
7417                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7418             unlock_user(p, arg2, 0);
7419             unlock_user(p2, arg4, 0);
7420         }
7421         return ret;
7422 #endif
7423 #ifdef TARGET_NR_unlink
7424     case TARGET_NR_unlink:
7425         if (!(p = lock_user_string(arg1)))
7426             return -TARGET_EFAULT;
7427         ret = get_errno(unlink(p));
7428         unlock_user(p, arg1, 0);
7429         return ret;
7430 #endif
7431 #if defined(TARGET_NR_unlinkat)
7432     case TARGET_NR_unlinkat:
7433         if (!(p = lock_user_string(arg2)))
7434             return -TARGET_EFAULT;
7435         ret = get_errno(unlinkat(arg1, p, arg3));
7436         unlock_user(p, arg2, 0);
7437         return ret;
7438 #endif
7439     case TARGET_NR_execve:
7440         {
7441             char **argp, **envp;
7442             int argc, envc;
7443             abi_ulong gp;
7444             abi_ulong guest_argp;
7445             abi_ulong guest_envp;
7446             abi_ulong addr;
7447             char **q;
7448             int total_size = 0;
7449 
7450             argc = 0;
7451             guest_argp = arg2;
7452             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7453                 if (get_user_ual(addr, gp))
7454                     return -TARGET_EFAULT;
7455                 if (!addr)
7456                     break;
7457                 argc++;
7458             }
7459             envc = 0;
7460             guest_envp = arg3;
7461             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7462                 if (get_user_ual(addr, gp))
7463                     return -TARGET_EFAULT;
7464                 if (!addr)
7465                     break;
7466                 envc++;
7467             }
7468 
7469             argp = g_new0(char *, argc + 1);
7470             envp = g_new0(char *, envc + 1);
7471 
7472             for (gp = guest_argp, q = argp; gp;
7473                   gp += sizeof(abi_ulong), q++) {
7474                 if (get_user_ual(addr, gp))
7475                     goto execve_efault;
7476                 if (!addr)
7477                     break;
7478                 if (!(*q = lock_user_string(addr)))
7479                     goto execve_efault;
7480                 total_size += strlen(*q) + 1;
7481             }
7482             *q = NULL;
7483 
7484             for (gp = guest_envp, q = envp; gp;
7485                   gp += sizeof(abi_ulong), q++) {
7486                 if (get_user_ual(addr, gp))
7487                     goto execve_efault;
7488                 if (!addr)
7489                     break;
7490                 if (!(*q = lock_user_string(addr)))
7491                     goto execve_efault;
7492                 total_size += strlen(*q) + 1;
7493             }
7494             *q = NULL;
7495 
7496             if (!(p = lock_user_string(arg1)))
7497                 goto execve_efault;
7498             /* Although execve() is not an interruptible syscall it is
7499              * a special case where we must use the safe_syscall wrapper:
7500              * if we allow a signal to happen before we make the host
7501              * syscall then we will 'lose' it, because at the point of
7502              * execve the process leaves QEMU's control. So we use the
7503              * safe syscall wrapper to ensure that we either take the
7504              * signal as a guest signal, or else it does not happen
7505              * before the execve completes and makes it the other
7506              * program's problem.
7507              */
7508             ret = get_errno(safe_execve(p, argp, envp));
7509             unlock_user(p, arg1, 0);
7510 
7511             goto execve_end;
7512 
7513         execve_efault:
7514             ret = -TARGET_EFAULT;
7515 
7516         execve_end:
7517             for (gp = guest_argp, q = argp; *q;
7518                   gp += sizeof(abi_ulong), q++) {
7519                 if (get_user_ual(addr, gp)
7520                     || !addr)
7521                     break;
7522                 unlock_user(*q, addr, 0);
7523             }
7524             for (gp = guest_envp, q = envp; *q;
7525                   gp += sizeof(abi_ulong), q++) {
7526                 if (get_user_ual(addr, gp)
7527                     || !addr)
7528                     break;
7529                 unlock_user(*q, addr, 0);
7530             }
7531 
7532             g_free(argp);
7533             g_free(envp);
7534         }
7535         return ret;
7536     case TARGET_NR_chdir:
7537         if (!(p = lock_user_string(arg1)))
7538             return -TARGET_EFAULT;
7539         ret = get_errno(chdir(p));
7540         unlock_user(p, arg1, 0);
7541         return ret;
7542 #ifdef TARGET_NR_time
7543     case TARGET_NR_time:
7544         {
7545             time_t host_time;
7546             ret = get_errno(time(&host_time));
7547             if (!is_error(ret)
7548                 && arg1
7549                 && put_user_sal(host_time, arg1))
7550                 return -TARGET_EFAULT;
7551         }
7552         return ret;
7553 #endif
7554 #ifdef TARGET_NR_mknod
7555     case TARGET_NR_mknod:
7556         if (!(p = lock_user_string(arg1)))
7557             return -TARGET_EFAULT;
7558         ret = get_errno(mknod(p, arg2, arg3));
7559         unlock_user(p, arg1, 0);
7560         return ret;
7561 #endif
7562 #if defined(TARGET_NR_mknodat)
7563     case TARGET_NR_mknodat:
7564         if (!(p = lock_user_string(arg2)))
7565             return -TARGET_EFAULT;
7566         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7567         unlock_user(p, arg2, 0);
7568         return ret;
7569 #endif
7570 #ifdef TARGET_NR_chmod
7571     case TARGET_NR_chmod:
7572         if (!(p = lock_user_string(arg1)))
7573             return -TARGET_EFAULT;
7574         ret = get_errno(chmod(p, arg2));
7575         unlock_user(p, arg1, 0);
7576         return ret;
7577 #endif
7578 #ifdef TARGET_NR_lseek
7579     case TARGET_NR_lseek:
7580         return get_errno(lseek(arg1, arg2, arg3));
7581 #endif
7582 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7583     /* Alpha specific */
7584     case TARGET_NR_getxpid:
7585         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7586         return get_errno(getpid());
7587 #endif
7588 #ifdef TARGET_NR_getpid
7589     case TARGET_NR_getpid:
7590         return get_errno(getpid());
7591 #endif
7592     case TARGET_NR_mount:
7593         {
7594             /* need to look at the data field */
7595             void *p2, *p3;
7596 
7597             if (arg1) {
7598                 p = lock_user_string(arg1);
7599                 if (!p) {
7600                     return -TARGET_EFAULT;
7601                 }
7602             } else {
7603                 p = NULL;
7604             }
7605 
7606             p2 = lock_user_string(arg2);
7607             if (!p2) {
7608                 if (arg1) {
7609                     unlock_user(p, arg1, 0);
7610                 }
7611                 return -TARGET_EFAULT;
7612             }
7613 
7614             if (arg3) {
7615                 p3 = lock_user_string(arg3);
7616                 if (!p3) {
7617                     if (arg1) {
7618                         unlock_user(p, arg1, 0);
7619                     }
7620                     unlock_user(p2, arg2, 0);
7621                     return -TARGET_EFAULT;
7622                 }
7623             } else {
7624                 p3 = NULL;
7625             }
7626 
7627             /* FIXME - arg5 should be locked, but it isn't clear how to
7628              * do that since it's not guaranteed to be a NULL-terminated
7629              * string.
7630              */
7631             if (!arg5) {
7632                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7633             } else {
7634                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7635             }
7636             ret = get_errno(ret);
7637 
7638             if (arg1) {
7639                 unlock_user(p, arg1, 0);
7640             }
7641             unlock_user(p2, arg2, 0);
7642             if (arg3) {
7643                 unlock_user(p3, arg3, 0);
7644             }
7645         }
7646         return ret;
7647 #ifdef TARGET_NR_umount
7648     case TARGET_NR_umount:
7649         if (!(p = lock_user_string(arg1)))
7650             return -TARGET_EFAULT;
7651         ret = get_errno(umount(p));
7652         unlock_user(p, arg1, 0);
7653         return ret;
7654 #endif
7655 #ifdef TARGET_NR_stime /* not on alpha */
7656     case TARGET_NR_stime:
7657         {
7658             time_t host_time;
7659             if (get_user_sal(host_time, arg1))
7660                 return -TARGET_EFAULT;
7661             return get_errno(stime(&host_time));
7662         }
7663 #endif
7664 #ifdef TARGET_NR_alarm /* not on alpha */
7665     case TARGET_NR_alarm:
7666         return alarm(arg1);
7667 #endif
7668 #ifdef TARGET_NR_pause /* not on alpha */
7669     case TARGET_NR_pause:
7670         if (!block_signals()) {
7671             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7672         }
7673         return -TARGET_EINTR;
7674 #endif
7675 #ifdef TARGET_NR_utime
7676     case TARGET_NR_utime:
7677         {
7678             struct utimbuf tbuf, *host_tbuf;
7679             struct target_utimbuf *target_tbuf;
7680             if (arg2) {
7681                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7682                     return -TARGET_EFAULT;
7683                 tbuf.actime = tswapal(target_tbuf->actime);
7684                 tbuf.modtime = tswapal(target_tbuf->modtime);
7685                 unlock_user_struct(target_tbuf, arg2, 0);
7686                 host_tbuf = &tbuf;
7687             } else {
7688                 host_tbuf = NULL;
7689             }
7690             if (!(p = lock_user_string(arg1)))
7691                 return -TARGET_EFAULT;
7692             ret = get_errno(utime(p, host_tbuf));
7693             unlock_user(p, arg1, 0);
7694         }
7695         return ret;
7696 #endif
7697 #ifdef TARGET_NR_utimes
7698     case TARGET_NR_utimes:
7699         {
7700             struct timeval *tvp, tv[2];
7701             if (arg2) {
7702                 if (copy_from_user_timeval(&tv[0], arg2)
7703                     || copy_from_user_timeval(&tv[1],
7704                                               arg2 + sizeof(struct target_timeval)))
7705                     return -TARGET_EFAULT;
7706                 tvp = tv;
7707             } else {
7708                 tvp = NULL;
7709             }
7710             if (!(p = lock_user_string(arg1)))
7711                 return -TARGET_EFAULT;
7712             ret = get_errno(utimes(p, tvp));
7713             unlock_user(p, arg1, 0);
7714         }
7715         return ret;
7716 #endif
7717 #if defined(TARGET_NR_futimesat)
7718     case TARGET_NR_futimesat:
7719         {
7720             struct timeval *tvp, tv[2];
7721             if (arg3) {
7722                 if (copy_from_user_timeval(&tv[0], arg3)
7723                     || copy_from_user_timeval(&tv[1],
7724                                               arg3 + sizeof(struct target_timeval)))
7725                     return -TARGET_EFAULT;
7726                 tvp = tv;
7727             } else {
7728                 tvp = NULL;
7729             }
7730             if (!(p = lock_user_string(arg2))) {
7731                 return -TARGET_EFAULT;
7732             }
7733             ret = get_errno(futimesat(arg1, path(p), tvp));
7734             unlock_user(p, arg2, 0);
7735         }
7736         return ret;
7737 #endif
7738 #ifdef TARGET_NR_access
7739     case TARGET_NR_access:
7740         if (!(p = lock_user_string(arg1))) {
7741             return -TARGET_EFAULT;
7742         }
7743         ret = get_errno(access(path(p), arg2));
7744         unlock_user(p, arg1, 0);
7745         return ret;
7746 #endif
7747 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7748     case TARGET_NR_faccessat:
7749         if (!(p = lock_user_string(arg2))) {
7750             return -TARGET_EFAULT;
7751         }
7752         ret = get_errno(faccessat(arg1, p, arg3, 0));
7753         unlock_user(p, arg2, 0);
7754         return ret;
7755 #endif
7756 #ifdef TARGET_NR_nice /* not on alpha */
7757     case TARGET_NR_nice:
7758         return get_errno(nice(arg1));
7759 #endif
7760     case TARGET_NR_sync:
7761         sync();
7762         return 0;
7763 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7764     case TARGET_NR_syncfs:
7765         return get_errno(syncfs(arg1));
7766 #endif
7767     case TARGET_NR_kill:
7768         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7769 #ifdef TARGET_NR_rename
7770     case TARGET_NR_rename:
7771         {
7772             void *p2;
7773             p = lock_user_string(arg1);
7774             p2 = lock_user_string(arg2);
7775             if (!p || !p2)
7776                 ret = -TARGET_EFAULT;
7777             else
7778                 ret = get_errno(rename(p, p2));
7779             unlock_user(p2, arg2, 0);
7780             unlock_user(p, arg1, 0);
7781         }
7782         return ret;
7783 #endif
7784 #if defined(TARGET_NR_renameat)
7785     case TARGET_NR_renameat:
7786         {
7787             void *p2;
7788             p  = lock_user_string(arg2);
7789             p2 = lock_user_string(arg4);
7790             if (!p || !p2)
7791                 ret = -TARGET_EFAULT;
7792             else
7793                 ret = get_errno(renameat(arg1, p, arg3, p2));
7794             unlock_user(p2, arg4, 0);
7795             unlock_user(p, arg2, 0);
7796         }
7797         return ret;
7798 #endif
7799 #if defined(TARGET_NR_renameat2)
7800     case TARGET_NR_renameat2:
7801         {
7802             void *p2;
7803             p  = lock_user_string(arg2);
7804             p2 = lock_user_string(arg4);
7805             if (!p || !p2) {
7806                 ret = -TARGET_EFAULT;
7807             } else {
7808                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7809             }
7810             unlock_user(p2, arg4, 0);
7811             unlock_user(p, arg2, 0);
7812         }
7813         return ret;
7814 #endif
7815 #ifdef TARGET_NR_mkdir
7816     case TARGET_NR_mkdir:
7817         if (!(p = lock_user_string(arg1)))
7818             return -TARGET_EFAULT;
7819         ret = get_errno(mkdir(p, arg2));
7820         unlock_user(p, arg1, 0);
7821         return ret;
7822 #endif
7823 #if defined(TARGET_NR_mkdirat)
7824     case TARGET_NR_mkdirat:
7825         if (!(p = lock_user_string(arg2)))
7826             return -TARGET_EFAULT;
7827         ret = get_errno(mkdirat(arg1, p, arg3));
7828         unlock_user(p, arg2, 0);
7829         return ret;
7830 #endif
7831 #ifdef TARGET_NR_rmdir
7832     case TARGET_NR_rmdir:
7833         if (!(p = lock_user_string(arg1)))
7834             return -TARGET_EFAULT;
7835         ret = get_errno(rmdir(p));
7836         unlock_user(p, arg1, 0);
7837         return ret;
7838 #endif
7839     case TARGET_NR_dup:
7840         ret = get_errno(dup(arg1));
7841         if (ret >= 0) {
7842             fd_trans_dup(arg1, ret);
7843         }
7844         return ret;
7845 #ifdef TARGET_NR_pipe
7846     case TARGET_NR_pipe:
7847         return do_pipe(cpu_env, arg1, 0, 0);
7848 #endif
7849 #ifdef TARGET_NR_pipe2
7850     case TARGET_NR_pipe2:
7851         return do_pipe(cpu_env, arg1,
7852                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7853 #endif
7854     case TARGET_NR_times:
7855         {
7856             struct target_tms *tmsp;
7857             struct tms tms;
7858             ret = get_errno(times(&tms));
7859             if (arg1) {
7860                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7861                 if (!tmsp)
7862                     return -TARGET_EFAULT;
7863                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7864                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7865                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7866                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7867             }
7868             if (!is_error(ret))
7869                 ret = host_to_target_clock_t(ret);
7870         }
7871         return ret;
7872     case TARGET_NR_acct:
7873         if (arg1 == 0) {
7874             ret = get_errno(acct(NULL));
7875         } else {
7876             if (!(p = lock_user_string(arg1))) {
7877                 return -TARGET_EFAULT;
7878             }
7879             ret = get_errno(acct(path(p)));
7880             unlock_user(p, arg1, 0);
7881         }
7882         return ret;
7883 #ifdef TARGET_NR_umount2
7884     case TARGET_NR_umount2:
7885         if (!(p = lock_user_string(arg1)))
7886             return -TARGET_EFAULT;
7887         ret = get_errno(umount2(p, arg2));
7888         unlock_user(p, arg1, 0);
7889         return ret;
7890 #endif
7891     case TARGET_NR_ioctl:
7892         return do_ioctl(arg1, arg2, arg3);
7893 #ifdef TARGET_NR_fcntl
7894     case TARGET_NR_fcntl:
7895         return do_fcntl(arg1, arg2, arg3);
7896 #endif
7897     case TARGET_NR_setpgid:
7898         return get_errno(setpgid(arg1, arg2));
7899     case TARGET_NR_umask:
7900         return get_errno(umask(arg1));
7901     case TARGET_NR_chroot:
7902         if (!(p = lock_user_string(arg1)))
7903             return -TARGET_EFAULT;
7904         ret = get_errno(chroot(p));
7905         unlock_user(p, arg1, 0);
7906         return ret;
7907 #ifdef TARGET_NR_dup2
7908     case TARGET_NR_dup2:
7909         ret = get_errno(dup2(arg1, arg2));
7910         if (ret >= 0) {
7911             fd_trans_dup(arg1, arg2);
7912         }
7913         return ret;
7914 #endif
7915 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7916     case TARGET_NR_dup3:
7917     {
7918         int host_flags;
7919 
7920         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7921             return -EINVAL;
7922         }
7923         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7924         ret = get_errno(dup3(arg1, arg2, host_flags));
7925         if (ret >= 0) {
7926             fd_trans_dup(arg1, arg2);
7927         }
7928         return ret;
7929     }
7930 #endif
7931 #ifdef TARGET_NR_getppid /* not on alpha */
7932     case TARGET_NR_getppid:
7933         return get_errno(getppid());
7934 #endif
7935 #ifdef TARGET_NR_getpgrp
7936     case TARGET_NR_getpgrp:
7937         return get_errno(getpgrp());
7938 #endif
7939     case TARGET_NR_setsid:
7940         return get_errno(setsid());
7941 #ifdef TARGET_NR_sigaction
7942     case TARGET_NR_sigaction:
7943         {
7944 #if defined(TARGET_ALPHA)
7945             struct target_sigaction act, oact, *pact = 0;
7946             struct target_old_sigaction *old_act;
7947             if (arg2) {
7948                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7949                     return -TARGET_EFAULT;
7950                 act._sa_handler = old_act->_sa_handler;
7951                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7952                 act.sa_flags = old_act->sa_flags;
7953                 act.sa_restorer = 0;
7954                 unlock_user_struct(old_act, arg2, 0);
7955                 pact = &act;
7956             }
7957             ret = get_errno(do_sigaction(arg1, pact, &oact));
7958             if (!is_error(ret) && arg3) {
7959                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7960                     return -TARGET_EFAULT;
7961                 old_act->_sa_handler = oact._sa_handler;
7962                 old_act->sa_mask = oact.sa_mask.sig[0];
7963                 old_act->sa_flags = oact.sa_flags;
7964                 unlock_user_struct(old_act, arg3, 1);
7965             }
7966 #elif defined(TARGET_MIPS)
7967 	    struct target_sigaction act, oact, *pact, *old_act;
7968 
7969 	    if (arg2) {
7970                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7971                     return -TARGET_EFAULT;
7972 		act._sa_handler = old_act->_sa_handler;
7973 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7974 		act.sa_flags = old_act->sa_flags;
7975 		unlock_user_struct(old_act, arg2, 0);
7976 		pact = &act;
7977 	    } else {
7978 		pact = NULL;
7979 	    }
7980 
7981 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7982 
7983 	    if (!is_error(ret) && arg3) {
7984                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7985                     return -TARGET_EFAULT;
7986 		old_act->_sa_handler = oact._sa_handler;
7987 		old_act->sa_flags = oact.sa_flags;
7988 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7989 		old_act->sa_mask.sig[1] = 0;
7990 		old_act->sa_mask.sig[2] = 0;
7991 		old_act->sa_mask.sig[3] = 0;
7992 		unlock_user_struct(old_act, arg3, 1);
7993 	    }
7994 #else
7995             struct target_old_sigaction *old_act;
7996             struct target_sigaction act, oact, *pact;
7997             if (arg2) {
7998                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7999                     return -TARGET_EFAULT;
8000                 act._sa_handler = old_act->_sa_handler;
8001                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8002                 act.sa_flags = old_act->sa_flags;
8003                 act.sa_restorer = old_act->sa_restorer;
8004 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8005                 act.ka_restorer = 0;
8006 #endif
8007                 unlock_user_struct(old_act, arg2, 0);
8008                 pact = &act;
8009             } else {
8010                 pact = NULL;
8011             }
8012             ret = get_errno(do_sigaction(arg1, pact, &oact));
8013             if (!is_error(ret) && arg3) {
8014                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8015                     return -TARGET_EFAULT;
8016                 old_act->_sa_handler = oact._sa_handler;
8017                 old_act->sa_mask = oact.sa_mask.sig[0];
8018                 old_act->sa_flags = oact.sa_flags;
8019                 old_act->sa_restorer = oact.sa_restorer;
8020                 unlock_user_struct(old_act, arg3, 1);
8021             }
8022 #endif
8023         }
8024         return ret;
8025 #endif
8026     case TARGET_NR_rt_sigaction:
8027         {
8028 #if defined(TARGET_ALPHA)
8029             /* For Alpha and SPARC this is a 5 argument syscall, with
8030              * a 'restorer' parameter which must be copied into the
8031              * sa_restorer field of the sigaction struct.
8032              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8033              * and arg5 is the sigsetsize.
8034              * Alpha also has a separate rt_sigaction struct that it uses
8035              * here; SPARC uses the usual sigaction struct.
8036              */
8037             struct target_rt_sigaction *rt_act;
8038             struct target_sigaction act, oact, *pact = 0;
8039 
8040             if (arg4 != sizeof(target_sigset_t)) {
8041                 return -TARGET_EINVAL;
8042             }
8043             if (arg2) {
8044                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8045                     return -TARGET_EFAULT;
8046                 act._sa_handler = rt_act->_sa_handler;
8047                 act.sa_mask = rt_act->sa_mask;
8048                 act.sa_flags = rt_act->sa_flags;
8049                 act.sa_restorer = arg5;
8050                 unlock_user_struct(rt_act, arg2, 0);
8051                 pact = &act;
8052             }
8053             ret = get_errno(do_sigaction(arg1, pact, &oact));
8054             if (!is_error(ret) && arg3) {
8055                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8056                     return -TARGET_EFAULT;
8057                 rt_act->_sa_handler = oact._sa_handler;
8058                 rt_act->sa_mask = oact.sa_mask;
8059                 rt_act->sa_flags = oact.sa_flags;
8060                 unlock_user_struct(rt_act, arg3, 1);
8061             }
8062 #else
8063 #ifdef TARGET_SPARC
8064             target_ulong restorer = arg4;
8065             target_ulong sigsetsize = arg5;
8066 #else
8067             target_ulong sigsetsize = arg4;
8068 #endif
8069             struct target_sigaction *act;
8070             struct target_sigaction *oact;
8071 
8072             if (sigsetsize != sizeof(target_sigset_t)) {
8073                 return -TARGET_EINVAL;
8074             }
8075             if (arg2) {
8076                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8077                     return -TARGET_EFAULT;
8078                 }
8079 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8080                 act->ka_restorer = restorer;
8081 #endif
8082             } else {
8083                 act = NULL;
8084             }
8085             if (arg3) {
8086                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8087                     ret = -TARGET_EFAULT;
8088                     goto rt_sigaction_fail;
8089                 }
8090             } else
8091                 oact = NULL;
8092             ret = get_errno(do_sigaction(arg1, act, oact));
8093 	rt_sigaction_fail:
8094             if (act)
8095                 unlock_user_struct(act, arg2, 0);
8096             if (oact)
8097                 unlock_user_struct(oact, arg3, 1);
8098 #endif
8099         }
8100         return ret;
8101 #ifdef TARGET_NR_sgetmask /* not on alpha */
8102     case TARGET_NR_sgetmask:
8103         {
8104             sigset_t cur_set;
8105             abi_ulong target_set;
8106             ret = do_sigprocmask(0, NULL, &cur_set);
8107             if (!ret) {
8108                 host_to_target_old_sigset(&target_set, &cur_set);
8109                 ret = target_set;
8110             }
8111         }
8112         return ret;
8113 #endif
8114 #ifdef TARGET_NR_ssetmask /* not on alpha */
8115     case TARGET_NR_ssetmask:
8116         {
8117             sigset_t set, oset;
8118             abi_ulong target_set = arg1;
8119             target_to_host_old_sigset(&set, &target_set);
8120             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8121             if (!ret) {
8122                 host_to_target_old_sigset(&target_set, &oset);
8123                 ret = target_set;
8124             }
8125         }
8126         return ret;
8127 #endif
8128 #ifdef TARGET_NR_sigprocmask
8129     case TARGET_NR_sigprocmask:
8130         {
8131 #if defined(TARGET_ALPHA)
8132             sigset_t set, oldset;
8133             abi_ulong mask;
8134             int how;
8135 
8136             switch (arg1) {
8137             case TARGET_SIG_BLOCK:
8138                 how = SIG_BLOCK;
8139                 break;
8140             case TARGET_SIG_UNBLOCK:
8141                 how = SIG_UNBLOCK;
8142                 break;
8143             case TARGET_SIG_SETMASK:
8144                 how = SIG_SETMASK;
8145                 break;
8146             default:
8147                 return -TARGET_EINVAL;
8148             }
8149             mask = arg2;
8150             target_to_host_old_sigset(&set, &mask);
8151 
8152             ret = do_sigprocmask(how, &set, &oldset);
8153             if (!is_error(ret)) {
8154                 host_to_target_old_sigset(&mask, &oldset);
8155                 ret = mask;
8156                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8157             }
8158 #else
8159             sigset_t set, oldset, *set_ptr;
8160             int how;
8161 
8162             if (arg2) {
8163                 switch (arg1) {
8164                 case TARGET_SIG_BLOCK:
8165                     how = SIG_BLOCK;
8166                     break;
8167                 case TARGET_SIG_UNBLOCK:
8168                     how = SIG_UNBLOCK;
8169                     break;
8170                 case TARGET_SIG_SETMASK:
8171                     how = SIG_SETMASK;
8172                     break;
8173                 default:
8174                     return -TARGET_EINVAL;
8175                 }
8176                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8177                     return -TARGET_EFAULT;
8178                 target_to_host_old_sigset(&set, p);
8179                 unlock_user(p, arg2, 0);
8180                 set_ptr = &set;
8181             } else {
8182                 how = 0;
8183                 set_ptr = NULL;
8184             }
8185             ret = do_sigprocmask(how, set_ptr, &oldset);
8186             if (!is_error(ret) && arg3) {
8187                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8188                     return -TARGET_EFAULT;
8189                 host_to_target_old_sigset(p, &oldset);
8190                 unlock_user(p, arg3, sizeof(target_sigset_t));
8191             }
8192 #endif
8193         }
8194         return ret;
8195 #endif
8196     case TARGET_NR_rt_sigprocmask:
8197         {
8198             int how = arg1;
8199             sigset_t set, oldset, *set_ptr;
8200 
8201             if (arg4 != sizeof(target_sigset_t)) {
8202                 return -TARGET_EINVAL;
8203             }
8204 
8205             if (arg2) {
8206                 switch(how) {
8207                 case TARGET_SIG_BLOCK:
8208                     how = SIG_BLOCK;
8209                     break;
8210                 case TARGET_SIG_UNBLOCK:
8211                     how = SIG_UNBLOCK;
8212                     break;
8213                 case TARGET_SIG_SETMASK:
8214                     how = SIG_SETMASK;
8215                     break;
8216                 default:
8217                     return -TARGET_EINVAL;
8218                 }
8219                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8220                     return -TARGET_EFAULT;
8221                 target_to_host_sigset(&set, p);
8222                 unlock_user(p, arg2, 0);
8223                 set_ptr = &set;
8224             } else {
8225                 how = 0;
8226                 set_ptr = NULL;
8227             }
8228             ret = do_sigprocmask(how, set_ptr, &oldset);
8229             if (!is_error(ret) && arg3) {
8230                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8231                     return -TARGET_EFAULT;
8232                 host_to_target_sigset(p, &oldset);
8233                 unlock_user(p, arg3, sizeof(target_sigset_t));
8234             }
8235         }
8236         return ret;
8237 #ifdef TARGET_NR_sigpending
8238     case TARGET_NR_sigpending:
8239         {
8240             sigset_t set;
8241             ret = get_errno(sigpending(&set));
8242             if (!is_error(ret)) {
8243                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8244                     return -TARGET_EFAULT;
8245                 host_to_target_old_sigset(p, &set);
8246                 unlock_user(p, arg1, sizeof(target_sigset_t));
8247             }
8248         }
8249         return ret;
8250 #endif
8251     case TARGET_NR_rt_sigpending:
8252         {
8253             sigset_t set;
8254 
8255             /* Yes, this check is >, not != like most. We follow the kernel's
8256              * logic and it does it like this because it implements
8257              * NR_sigpending through the same code path, and in that case
8258              * the old_sigset_t is smaller in size.
8259              */
8260             if (arg2 > sizeof(target_sigset_t)) {
8261                 return -TARGET_EINVAL;
8262             }
8263 
8264             ret = get_errno(sigpending(&set));
8265             if (!is_error(ret)) {
8266                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8267                     return -TARGET_EFAULT;
8268                 host_to_target_sigset(p, &set);
8269                 unlock_user(p, arg1, sizeof(target_sigset_t));
8270             }
8271         }
8272         return ret;
8273 #ifdef TARGET_NR_sigsuspend
8274     case TARGET_NR_sigsuspend:
8275         {
8276             TaskState *ts = cpu->opaque;
8277 #if defined(TARGET_ALPHA)
8278             abi_ulong mask = arg1;
8279             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8280 #else
8281             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8282                 return -TARGET_EFAULT;
8283             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8284             unlock_user(p, arg1, 0);
8285 #endif
8286             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8287                                                SIGSET_T_SIZE));
8288             if (ret != -TARGET_ERESTARTSYS) {
8289                 ts->in_sigsuspend = 1;
8290             }
8291         }
8292         return ret;
8293 #endif
8294     case TARGET_NR_rt_sigsuspend:
8295         {
8296             TaskState *ts = cpu->opaque;
8297 
8298             if (arg2 != sizeof(target_sigset_t)) {
8299                 return -TARGET_EINVAL;
8300             }
8301             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8302                 return -TARGET_EFAULT;
8303             target_to_host_sigset(&ts->sigsuspend_mask, p);
8304             unlock_user(p, arg1, 0);
8305             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8306                                                SIGSET_T_SIZE));
8307             if (ret != -TARGET_ERESTARTSYS) {
8308                 ts->in_sigsuspend = 1;
8309             }
8310         }
8311         return ret;
8312     case TARGET_NR_rt_sigtimedwait:
8313         {
8314             sigset_t set;
8315             struct timespec uts, *puts;
8316             siginfo_t uinfo;
8317 
8318             if (arg4 != sizeof(target_sigset_t)) {
8319                 return -TARGET_EINVAL;
8320             }
8321 
8322             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8323                 return -TARGET_EFAULT;
8324             target_to_host_sigset(&set, p);
8325             unlock_user(p, arg1, 0);
8326             if (arg3) {
8327                 puts = &uts;
8328                 target_to_host_timespec(puts, arg3);
8329             } else {
8330                 puts = NULL;
8331             }
8332             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8333                                                  SIGSET_T_SIZE));
8334             if (!is_error(ret)) {
8335                 if (arg2) {
8336                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8337                                   0);
8338                     if (!p) {
8339                         return -TARGET_EFAULT;
8340                     }
8341                     host_to_target_siginfo(p, &uinfo);
8342                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8343                 }
8344                 ret = host_to_target_signal(ret);
8345             }
8346         }
8347         return ret;
8348     case TARGET_NR_rt_sigqueueinfo:
8349         {
8350             siginfo_t uinfo;
8351 
8352             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8353             if (!p) {
8354                 return -TARGET_EFAULT;
8355             }
8356             target_to_host_siginfo(&uinfo, p);
8357             unlock_user(p, arg3, 0);
8358             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8359         }
8360         return ret;
8361     case TARGET_NR_rt_tgsigqueueinfo:
8362         {
8363             siginfo_t uinfo;
8364 
8365             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8366             if (!p) {
8367                 return -TARGET_EFAULT;
8368             }
8369             target_to_host_siginfo(&uinfo, p);
8370             unlock_user(p, arg4, 0);
8371             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8372         }
8373         return ret;
8374 #ifdef TARGET_NR_sigreturn
8375     case TARGET_NR_sigreturn:
8376         if (block_signals()) {
8377             return -TARGET_ERESTARTSYS;
8378         }
8379         return do_sigreturn(cpu_env);
8380 #endif
8381     case TARGET_NR_rt_sigreturn:
8382         if (block_signals()) {
8383             return -TARGET_ERESTARTSYS;
8384         }
8385         return do_rt_sigreturn(cpu_env);
8386     case TARGET_NR_sethostname:
8387         if (!(p = lock_user_string(arg1)))
8388             return -TARGET_EFAULT;
8389         ret = get_errno(sethostname(p, arg2));
8390         unlock_user(p, arg1, 0);
8391         return ret;
8392 #ifdef TARGET_NR_setrlimit
8393     case TARGET_NR_setrlimit:
8394         {
8395             int resource = target_to_host_resource(arg1);
8396             struct target_rlimit *target_rlim;
8397             struct rlimit rlim;
8398             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8399                 return -TARGET_EFAULT;
8400             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8401             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8402             unlock_user_struct(target_rlim, arg2, 0);
8403             /*
8404              * If we just passed through resource limit settings for memory then
8405              * they would also apply to QEMU's own allocations, and QEMU will
8406              * crash or hang or die if its allocations fail. Ideally we would
8407              * track the guest allocations in QEMU and apply the limits ourselves.
8408              * For now, just tell the guest the call succeeded but don't actually
8409              * limit anything.
8410              */
8411             if (resource != RLIMIT_AS &&
8412                 resource != RLIMIT_DATA &&
8413                 resource != RLIMIT_STACK) {
8414                 return get_errno(setrlimit(resource, &rlim));
8415             } else {
8416                 return 0;
8417             }
8418         }
8419 #endif
8420 #ifdef TARGET_NR_getrlimit
8421     case TARGET_NR_getrlimit:
8422         {
8423             int resource = target_to_host_resource(arg1);
8424             struct target_rlimit *target_rlim;
8425             struct rlimit rlim;
8426 
8427             ret = get_errno(getrlimit(resource, &rlim));
8428             if (!is_error(ret)) {
8429                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8430                     return -TARGET_EFAULT;
8431                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8432                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8433                 unlock_user_struct(target_rlim, arg2, 1);
8434             }
8435         }
8436         return ret;
8437 #endif
8438     case TARGET_NR_getrusage:
8439         {
8440             struct rusage rusage;
8441             ret = get_errno(getrusage(arg1, &rusage));
8442             if (!is_error(ret)) {
8443                 ret = host_to_target_rusage(arg2, &rusage);
8444             }
8445         }
8446         return ret;
8447     case TARGET_NR_gettimeofday:
8448         {
8449             struct timeval tv;
8450             ret = get_errno(gettimeofday(&tv, NULL));
8451             if (!is_error(ret)) {
8452                 if (copy_to_user_timeval(arg1, &tv))
8453                     return -TARGET_EFAULT;
8454             }
8455         }
8456         return ret;
8457     case TARGET_NR_settimeofday:
8458         {
8459             struct timeval tv, *ptv = NULL;
8460             struct timezone tz, *ptz = NULL;
8461 
8462             if (arg1) {
8463                 if (copy_from_user_timeval(&tv, arg1)) {
8464                     return -TARGET_EFAULT;
8465                 }
8466                 ptv = &tv;
8467             }
8468 
8469             if (arg2) {
8470                 if (copy_from_user_timezone(&tz, arg2)) {
8471                     return -TARGET_EFAULT;
8472                 }
8473                 ptz = &tz;
8474             }
8475 
8476             return get_errno(settimeofday(ptv, ptz));
8477         }
8478 #if defined(TARGET_NR_select)
8479     case TARGET_NR_select:
8480 #if defined(TARGET_WANT_NI_OLD_SELECT)
8481         /* some architectures used to have old_select here
8482          * but now ENOSYS it.
8483          */
8484         ret = -TARGET_ENOSYS;
8485 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8486         ret = do_old_select(arg1);
8487 #else
8488         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8489 #endif
8490         return ret;
8491 #endif
8492 #ifdef TARGET_NR_pselect6
8493     case TARGET_NR_pselect6:
8494         {
8495             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8496             fd_set rfds, wfds, efds;
8497             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8498             struct timespec ts, *ts_ptr;
8499 
8500             /*
8501              * The 6th arg is actually two args smashed together,
8502              * so we cannot use the C library.
8503              */
8504             sigset_t set;
8505             struct {
8506                 sigset_t *set;
8507                 size_t size;
8508             } sig, *sig_ptr;
8509 
8510             abi_ulong arg_sigset, arg_sigsize, *arg7;
8511             target_sigset_t *target_sigset;
8512 
8513             n = arg1;
8514             rfd_addr = arg2;
8515             wfd_addr = arg3;
8516             efd_addr = arg4;
8517             ts_addr = arg5;
8518 
8519             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8520             if (ret) {
8521                 return ret;
8522             }
8523             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8524             if (ret) {
8525                 return ret;
8526             }
8527             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8528             if (ret) {
8529                 return ret;
8530             }
8531 
8532             /*
8533              * This takes a timespec, and not a timeval, so we cannot
8534              * use the do_select() helper ...
8535              */
8536             if (ts_addr) {
8537                 if (target_to_host_timespec(&ts, ts_addr)) {
8538                     return -TARGET_EFAULT;
8539                 }
8540                 ts_ptr = &ts;
8541             } else {
8542                 ts_ptr = NULL;
8543             }
8544 
8545             /* Extract the two packed args for the sigset */
8546             if (arg6) {
8547                 sig_ptr = &sig;
8548                 sig.size = SIGSET_T_SIZE;
8549 
8550                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8551                 if (!arg7) {
8552                     return -TARGET_EFAULT;
8553                 }
8554                 arg_sigset = tswapal(arg7[0]);
8555                 arg_sigsize = tswapal(arg7[1]);
8556                 unlock_user(arg7, arg6, 0);
8557 
8558                 if (arg_sigset) {
8559                     sig.set = &set;
8560                     if (arg_sigsize != sizeof(*target_sigset)) {
8561                         /* Like the kernel, we enforce correct size sigsets */
8562                         return -TARGET_EINVAL;
8563                     }
8564                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8565                                               sizeof(*target_sigset), 1);
8566                     if (!target_sigset) {
8567                         return -TARGET_EFAULT;
8568                     }
8569                     target_to_host_sigset(&set, target_sigset);
8570                     unlock_user(target_sigset, arg_sigset, 0);
8571                 } else {
8572                     sig.set = NULL;
8573                 }
8574             } else {
8575                 sig_ptr = NULL;
8576             }
8577 
8578             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8579                                           ts_ptr, sig_ptr));
8580 
8581             if (!is_error(ret)) {
8582                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8583                     return -TARGET_EFAULT;
8584                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8585                     return -TARGET_EFAULT;
8586                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8587                     return -TARGET_EFAULT;
8588 
8589                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8590                     return -TARGET_EFAULT;
8591             }
8592         }
8593         return ret;
8594 #endif
8595 #ifdef TARGET_NR_symlink
8596     case TARGET_NR_symlink:
8597         {
8598             void *p2;
8599             p = lock_user_string(arg1);
8600             p2 = lock_user_string(arg2);
8601             if (!p || !p2)
8602                 ret = -TARGET_EFAULT;
8603             else
8604                 ret = get_errno(symlink(p, p2));
8605             unlock_user(p2, arg2, 0);
8606             unlock_user(p, arg1, 0);
8607         }
8608         return ret;
8609 #endif
8610 #if defined(TARGET_NR_symlinkat)
8611     case TARGET_NR_symlinkat:
8612         {
8613             void *p2;
8614             p  = lock_user_string(arg1);
8615             p2 = lock_user_string(arg3);
8616             if (!p || !p2)
8617                 ret = -TARGET_EFAULT;
8618             else
8619                 ret = get_errno(symlinkat(p, arg2, p2));
8620             unlock_user(p2, arg3, 0);
8621             unlock_user(p, arg1, 0);
8622         }
8623         return ret;
8624 #endif
8625 #ifdef TARGET_NR_readlink
8626     case TARGET_NR_readlink:
8627         {
8628             void *p2;
8629             p = lock_user_string(arg1);
8630             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8631             if (!p || !p2) {
8632                 ret = -TARGET_EFAULT;
8633             } else if (!arg3) {
8634                 /* Short circuit this for the magic exe check. */
8635                 ret = -TARGET_EINVAL;
8636             } else if (is_proc_myself((const char *)p, "exe")) {
8637                 char real[PATH_MAX], *temp;
8638                 temp = realpath(exec_path, real);
8639                 /* Return value is # of bytes that we wrote to the buffer. */
8640                 if (temp == NULL) {
8641                     ret = get_errno(-1);
8642                 } else {
8643                     /* Don't worry about sign mismatch as earlier mapping
8644                      * logic would have thrown a bad address error. */
8645                     ret = MIN(strlen(real), arg3);
8646                     /* We cannot NUL terminate the string. */
8647                     memcpy(p2, real, ret);
8648                 }
8649             } else {
8650                 ret = get_errno(readlink(path(p), p2, arg3));
8651             }
8652             unlock_user(p2, arg2, ret);
8653             unlock_user(p, arg1, 0);
8654         }
8655         return ret;
8656 #endif
8657 #if defined(TARGET_NR_readlinkat)
8658     case TARGET_NR_readlinkat:
8659         {
8660             void *p2;
8661             p  = lock_user_string(arg2);
8662             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8663             if (!p || !p2) {
8664                 ret = -TARGET_EFAULT;
8665             } else if (is_proc_myself((const char *)p, "exe")) {
8666                 char real[PATH_MAX], *temp;
8667                 temp = realpath(exec_path, real);
8668                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8669                 snprintf((char *)p2, arg4, "%s", real);
8670             } else {
8671                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8672             }
8673             unlock_user(p2, arg3, ret);
8674             unlock_user(p, arg2, 0);
8675         }
8676         return ret;
8677 #endif
8678 #ifdef TARGET_NR_swapon
8679     case TARGET_NR_swapon:
8680         if (!(p = lock_user_string(arg1)))
8681             return -TARGET_EFAULT;
8682         ret = get_errno(swapon(p, arg2));
8683         unlock_user(p, arg1, 0);
8684         return ret;
8685 #endif
8686     case TARGET_NR_reboot:
8687         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8688            /* arg4 must be ignored in all other cases */
8689            p = lock_user_string(arg4);
8690            if (!p) {
8691                return -TARGET_EFAULT;
8692            }
8693            ret = get_errno(reboot(arg1, arg2, arg3, p));
8694            unlock_user(p, arg4, 0);
8695         } else {
8696            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8697         }
8698         return ret;
8699 #ifdef TARGET_NR_mmap
8700     case TARGET_NR_mmap:
8701 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8702     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8703     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8704     || defined(TARGET_S390X)
8705         {
8706             abi_ulong *v;
8707             abi_ulong v1, v2, v3, v4, v5, v6;
8708             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8709                 return -TARGET_EFAULT;
8710             v1 = tswapal(v[0]);
8711             v2 = tswapal(v[1]);
8712             v3 = tswapal(v[2]);
8713             v4 = tswapal(v[3]);
8714             v5 = tswapal(v[4]);
8715             v6 = tswapal(v[5]);
8716             unlock_user(v, arg1, 0);
8717             ret = get_errno(target_mmap(v1, v2, v3,
8718                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8719                                         v5, v6));
8720         }
8721 #else
8722         ret = get_errno(target_mmap(arg1, arg2, arg3,
8723                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8724                                     arg5,
8725                                     arg6));
8726 #endif
8727         return ret;
8728 #endif
8729 #ifdef TARGET_NR_mmap2
8730     case TARGET_NR_mmap2:
8731 #ifndef MMAP_SHIFT
8732 #define MMAP_SHIFT 12
8733 #endif
8734         ret = target_mmap(arg1, arg2, arg3,
8735                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8736                           arg5, arg6 << MMAP_SHIFT);
8737         return get_errno(ret);
8738 #endif
8739     case TARGET_NR_munmap:
8740         return get_errno(target_munmap(arg1, arg2));
8741     case TARGET_NR_mprotect:
8742         {
8743             TaskState *ts = cpu->opaque;
8744             /* Special hack to detect libc making the stack executable.  */
8745             if ((arg3 & PROT_GROWSDOWN)
8746                 && arg1 >= ts->info->stack_limit
8747                 && arg1 <= ts->info->start_stack) {
8748                 arg3 &= ~PROT_GROWSDOWN;
8749                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8750                 arg1 = ts->info->stack_limit;
8751             }
8752         }
8753         return get_errno(target_mprotect(arg1, arg2, arg3));
8754 #ifdef TARGET_NR_mremap
8755     case TARGET_NR_mremap:
8756         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8757 #endif
8758         /* ??? msync/mlock/munlock are broken for softmmu.  */
8759 #ifdef TARGET_NR_msync
8760     case TARGET_NR_msync:
8761         return get_errno(msync(g2h(arg1), arg2, arg3));
8762 #endif
8763 #ifdef TARGET_NR_mlock
8764     case TARGET_NR_mlock:
8765         return get_errno(mlock(g2h(arg1), arg2));
8766 #endif
8767 #ifdef TARGET_NR_munlock
8768     case TARGET_NR_munlock:
8769         return get_errno(munlock(g2h(arg1), arg2));
8770 #endif
8771 #ifdef TARGET_NR_mlockall
8772     case TARGET_NR_mlockall:
8773         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8774 #endif
8775 #ifdef TARGET_NR_munlockall
8776     case TARGET_NR_munlockall:
8777         return get_errno(munlockall());
8778 #endif
8779 #ifdef TARGET_NR_truncate
8780     case TARGET_NR_truncate:
8781         if (!(p = lock_user_string(arg1)))
8782             return -TARGET_EFAULT;
8783         ret = get_errno(truncate(p, arg2));
8784         unlock_user(p, arg1, 0);
8785         return ret;
8786 #endif
8787 #ifdef TARGET_NR_ftruncate
8788     case TARGET_NR_ftruncate:
8789         return get_errno(ftruncate(arg1, arg2));
8790 #endif
8791     case TARGET_NR_fchmod:
8792         return get_errno(fchmod(arg1, arg2));
8793 #if defined(TARGET_NR_fchmodat)
8794     case TARGET_NR_fchmodat:
8795         if (!(p = lock_user_string(arg2)))
8796             return -TARGET_EFAULT;
8797         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8798         unlock_user(p, arg2, 0);
8799         return ret;
8800 #endif
8801     case TARGET_NR_getpriority:
8802         /* Note that negative values are valid for getpriority, so we must
8803            differentiate based on errno settings.  */
8804         errno = 0;
8805         ret = getpriority(arg1, arg2);
8806         if (ret == -1 && errno != 0) {
8807             return -host_to_target_errno(errno);
8808         }
8809 #ifdef TARGET_ALPHA
8810         /* Return value is the unbiased priority.  Signal no error.  */
8811         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8812 #else
8813         /* Return value is a biased priority to avoid negative numbers.  */
8814         ret = 20 - ret;
8815 #endif
8816         return ret;
8817     case TARGET_NR_setpriority:
8818         return get_errno(setpriority(arg1, arg2, arg3));
8819 #ifdef TARGET_NR_statfs
8820     case TARGET_NR_statfs:
8821         if (!(p = lock_user_string(arg1))) {
8822             return -TARGET_EFAULT;
8823         }
8824         ret = get_errno(statfs(path(p), &stfs));
8825         unlock_user(p, arg1, 0);
8826     convert_statfs:
8827         if (!is_error(ret)) {
8828             struct target_statfs *target_stfs;
8829 
8830             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8831                 return -TARGET_EFAULT;
8832             __put_user(stfs.f_type, &target_stfs->f_type);
8833             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8834             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8835             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8836             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8837             __put_user(stfs.f_files, &target_stfs->f_files);
8838             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8839             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8840             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8841             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8842             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8843 #ifdef _STATFS_F_FLAGS
8844             __put_user(stfs.f_flags, &target_stfs->f_flags);
8845 #else
8846             __put_user(0, &target_stfs->f_flags);
8847 #endif
8848             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8849             unlock_user_struct(target_stfs, arg2, 1);
8850         }
8851         return ret;
8852 #endif
8853 #ifdef TARGET_NR_fstatfs
8854     case TARGET_NR_fstatfs:
8855         ret = get_errno(fstatfs(arg1, &stfs));
8856         goto convert_statfs;
8857 #endif
8858 #ifdef TARGET_NR_statfs64
8859     case TARGET_NR_statfs64:
8860         if (!(p = lock_user_string(arg1))) {
8861             return -TARGET_EFAULT;
8862         }
8863         ret = get_errno(statfs(path(p), &stfs));
8864         unlock_user(p, arg1, 0);
8865     convert_statfs64:
8866         if (!is_error(ret)) {
8867             struct target_statfs64 *target_stfs;
8868 
8869             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8870                 return -TARGET_EFAULT;
8871             __put_user(stfs.f_type, &target_stfs->f_type);
8872             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8873             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8874             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8875             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8876             __put_user(stfs.f_files, &target_stfs->f_files);
8877             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8878             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8879             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8880             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8881             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8882             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8883             unlock_user_struct(target_stfs, arg3, 1);
8884         }
8885         return ret;
8886     case TARGET_NR_fstatfs64:
8887         ret = get_errno(fstatfs(arg1, &stfs));
8888         goto convert_statfs64;
8889 #endif
8890 #ifdef TARGET_NR_socketcall
8891     case TARGET_NR_socketcall:
8892         return do_socketcall(arg1, arg2);
8893 #endif
8894 #ifdef TARGET_NR_accept
8895     case TARGET_NR_accept:
8896         return do_accept4(arg1, arg2, arg3, 0);
8897 #endif
8898 #ifdef TARGET_NR_accept4
8899     case TARGET_NR_accept4:
8900         return do_accept4(arg1, arg2, arg3, arg4);
8901 #endif
8902 #ifdef TARGET_NR_bind
8903     case TARGET_NR_bind:
8904         return do_bind(arg1, arg2, arg3);
8905 #endif
8906 #ifdef TARGET_NR_connect
8907     case TARGET_NR_connect:
8908         return do_connect(arg1, arg2, arg3);
8909 #endif
8910 #ifdef TARGET_NR_getpeername
8911     case TARGET_NR_getpeername:
8912         return do_getpeername(arg1, arg2, arg3);
8913 #endif
8914 #ifdef TARGET_NR_getsockname
8915     case TARGET_NR_getsockname:
8916         return do_getsockname(arg1, arg2, arg3);
8917 #endif
8918 #ifdef TARGET_NR_getsockopt
8919     case TARGET_NR_getsockopt:
8920         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8921 #endif
8922 #ifdef TARGET_NR_listen
8923     case TARGET_NR_listen:
8924         return get_errno(listen(arg1, arg2));
8925 #endif
8926 #ifdef TARGET_NR_recv
8927     case TARGET_NR_recv:
8928         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8929 #endif
8930 #ifdef TARGET_NR_recvfrom
8931     case TARGET_NR_recvfrom:
8932         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8933 #endif
8934 #ifdef TARGET_NR_recvmsg
8935     case TARGET_NR_recvmsg:
8936         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8937 #endif
8938 #ifdef TARGET_NR_send
8939     case TARGET_NR_send:
8940         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8941 #endif
8942 #ifdef TARGET_NR_sendmsg
8943     case TARGET_NR_sendmsg:
8944         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8945 #endif
8946 #ifdef TARGET_NR_sendmmsg
8947     case TARGET_NR_sendmmsg:
8948         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8949     case TARGET_NR_recvmmsg:
8950         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8951 #endif
8952 #ifdef TARGET_NR_sendto
8953     case TARGET_NR_sendto:
8954         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8955 #endif
8956 #ifdef TARGET_NR_shutdown
8957     case TARGET_NR_shutdown:
8958         return get_errno(shutdown(arg1, arg2));
8959 #endif
8960 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8961     case TARGET_NR_getrandom:
8962         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8963         if (!p) {
8964             return -TARGET_EFAULT;
8965         }
8966         ret = get_errno(getrandom(p, arg2, arg3));
8967         unlock_user(p, arg1, ret);
8968         return ret;
8969 #endif
8970 #ifdef TARGET_NR_socket
8971     case TARGET_NR_socket:
8972         return do_socket(arg1, arg2, arg3);
8973 #endif
8974 #ifdef TARGET_NR_socketpair
8975     case TARGET_NR_socketpair:
8976         return do_socketpair(arg1, arg2, arg3, arg4);
8977 #endif
8978 #ifdef TARGET_NR_setsockopt
8979     case TARGET_NR_setsockopt:
8980         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8981 #endif
8982 #if defined(TARGET_NR_syslog)
8983     case TARGET_NR_syslog:
8984         {
8985             int len = arg2;
8986 
8987             switch (arg1) {
8988             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8989             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8990             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8991             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8992             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8993             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8994             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8995             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8996                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8997             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8998             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8999             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9000                 {
9001                     if (len < 0) {
9002                         return -TARGET_EINVAL;
9003                     }
9004                     if (len == 0) {
9005                         return 0;
9006                     }
9007                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9008                     if (!p) {
9009                         return -TARGET_EFAULT;
9010                     }
9011                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9012                     unlock_user(p, arg2, arg3);
9013                 }
9014                 return ret;
9015             default:
9016                 return -TARGET_EINVAL;
9017             }
9018         }
9019         break;
9020 #endif
9021     case TARGET_NR_setitimer:
9022         {
9023             struct itimerval value, ovalue, *pvalue;
9024 
9025             if (arg2) {
9026                 pvalue = &value;
9027                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9028                     || copy_from_user_timeval(&pvalue->it_value,
9029                                               arg2 + sizeof(struct target_timeval)))
9030                     return -TARGET_EFAULT;
9031             } else {
9032                 pvalue = NULL;
9033             }
9034             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9035             if (!is_error(ret) && arg3) {
9036                 if (copy_to_user_timeval(arg3,
9037                                          &ovalue.it_interval)
9038                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9039                                             &ovalue.it_value))
9040                     return -TARGET_EFAULT;
9041             }
9042         }
9043         return ret;
9044     case TARGET_NR_getitimer:
9045         {
9046             struct itimerval value;
9047 
9048             ret = get_errno(getitimer(arg1, &value));
9049             if (!is_error(ret) && arg2) {
9050                 if (copy_to_user_timeval(arg2,
9051                                          &value.it_interval)
9052                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9053                                             &value.it_value))
9054                     return -TARGET_EFAULT;
9055             }
9056         }
9057         return ret;
9058 #ifdef TARGET_NR_stat
9059     case TARGET_NR_stat:
9060         if (!(p = lock_user_string(arg1))) {
9061             return -TARGET_EFAULT;
9062         }
9063         ret = get_errno(stat(path(p), &st));
9064         unlock_user(p, arg1, 0);
9065         goto do_stat;
9066 #endif
9067 #ifdef TARGET_NR_lstat
9068     case TARGET_NR_lstat:
9069         if (!(p = lock_user_string(arg1))) {
9070             return -TARGET_EFAULT;
9071         }
9072         ret = get_errno(lstat(path(p), &st));
9073         unlock_user(p, arg1, 0);
9074         goto do_stat;
9075 #endif
9076 #ifdef TARGET_NR_fstat
9077     case TARGET_NR_fstat:
9078         {
9079             ret = get_errno(fstat(arg1, &st));
9080 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9081         do_stat:
9082 #endif
9083             if (!is_error(ret)) {
9084                 struct target_stat *target_st;
9085 
9086                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9087                     return -TARGET_EFAULT;
9088                 memset(target_st, 0, sizeof(*target_st));
9089                 __put_user(st.st_dev, &target_st->st_dev);
9090                 __put_user(st.st_ino, &target_st->st_ino);
9091                 __put_user(st.st_mode, &target_st->st_mode);
9092                 __put_user(st.st_uid, &target_st->st_uid);
9093                 __put_user(st.st_gid, &target_st->st_gid);
9094                 __put_user(st.st_nlink, &target_st->st_nlink);
9095                 __put_user(st.st_rdev, &target_st->st_rdev);
9096                 __put_user(st.st_size, &target_st->st_size);
9097                 __put_user(st.st_blksize, &target_st->st_blksize);
9098                 __put_user(st.st_blocks, &target_st->st_blocks);
9099                 __put_user(st.st_atime, &target_st->target_st_atime);
9100                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9101                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9102 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9103     defined(TARGET_STAT_HAVE_NSEC)
9104                 __put_user(st.st_atim.tv_nsec,
9105                            &target_st->target_st_atime_nsec);
9106                 __put_user(st.st_mtim.tv_nsec,
9107                            &target_st->target_st_mtime_nsec);
9108                 __put_user(st.st_ctim.tv_nsec,
9109                            &target_st->target_st_ctime_nsec);
9110 #endif
9111                 unlock_user_struct(target_st, arg2, 1);
9112             }
9113         }
9114         return ret;
9115 #endif
9116     case TARGET_NR_vhangup:
9117         return get_errno(vhangup());
9118 #ifdef TARGET_NR_syscall
9119     case TARGET_NR_syscall:
9120         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9121                           arg6, arg7, arg8, 0);
9122 #endif
9123     case TARGET_NR_wait4:
9124         {
9125             int status;
9126             abi_long status_ptr = arg2;
9127             struct rusage rusage, *rusage_ptr;
9128             abi_ulong target_rusage = arg4;
9129             abi_long rusage_err;
9130             if (target_rusage)
9131                 rusage_ptr = &rusage;
9132             else
9133                 rusage_ptr = NULL;
9134             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9135             if (!is_error(ret)) {
9136                 if (status_ptr && ret) {
9137                     status = host_to_target_waitstatus(status);
9138                     if (put_user_s32(status, status_ptr))
9139                         return -TARGET_EFAULT;
9140                 }
9141                 if (target_rusage) {
9142                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9143                     if (rusage_err) {
9144                         ret = rusage_err;
9145                     }
9146                 }
9147             }
9148         }
9149         return ret;
9150 #ifdef TARGET_NR_swapoff
9151     case TARGET_NR_swapoff:
9152         if (!(p = lock_user_string(arg1)))
9153             return -TARGET_EFAULT;
9154         ret = get_errno(swapoff(p));
9155         unlock_user(p, arg1, 0);
9156         return ret;
9157 #endif
9158     case TARGET_NR_sysinfo:
9159         {
9160             struct target_sysinfo *target_value;
9161             struct sysinfo value;
9162             ret = get_errno(sysinfo(&value));
9163             if (!is_error(ret) && arg1)
9164             {
9165                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9166                     return -TARGET_EFAULT;
9167                 __put_user(value.uptime, &target_value->uptime);
9168                 __put_user(value.loads[0], &target_value->loads[0]);
9169                 __put_user(value.loads[1], &target_value->loads[1]);
9170                 __put_user(value.loads[2], &target_value->loads[2]);
9171                 __put_user(value.totalram, &target_value->totalram);
9172                 __put_user(value.freeram, &target_value->freeram);
9173                 __put_user(value.sharedram, &target_value->sharedram);
9174                 __put_user(value.bufferram, &target_value->bufferram);
9175                 __put_user(value.totalswap, &target_value->totalswap);
9176                 __put_user(value.freeswap, &target_value->freeswap);
9177                 __put_user(value.procs, &target_value->procs);
9178                 __put_user(value.totalhigh, &target_value->totalhigh);
9179                 __put_user(value.freehigh, &target_value->freehigh);
9180                 __put_user(value.mem_unit, &target_value->mem_unit);
9181                 unlock_user_struct(target_value, arg1, 1);
9182             }
9183         }
9184         return ret;
9185 #ifdef TARGET_NR_ipc
9186     case TARGET_NR_ipc:
9187         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9188 #endif
9189 #ifdef TARGET_NR_semget
9190     case TARGET_NR_semget:
9191         return get_errno(semget(arg1, arg2, arg3));
9192 #endif
9193 #ifdef TARGET_NR_semop
9194     case TARGET_NR_semop:
9195         return do_semop(arg1, arg2, arg3);
9196 #endif
9197 #ifdef TARGET_NR_semctl
9198     case TARGET_NR_semctl:
9199         return do_semctl(arg1, arg2, arg3, arg4);
9200 #endif
9201 #ifdef TARGET_NR_msgctl
9202     case TARGET_NR_msgctl:
9203         return do_msgctl(arg1, arg2, arg3);
9204 #endif
9205 #ifdef TARGET_NR_msgget
9206     case TARGET_NR_msgget:
9207         return get_errno(msgget(arg1, arg2));
9208 #endif
9209 #ifdef TARGET_NR_msgrcv
9210     case TARGET_NR_msgrcv:
9211         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9212 #endif
9213 #ifdef TARGET_NR_msgsnd
9214     case TARGET_NR_msgsnd:
9215         return do_msgsnd(arg1, arg2, arg3, arg4);
9216 #endif
9217 #ifdef TARGET_NR_shmget
9218     case TARGET_NR_shmget:
9219         return get_errno(shmget(arg1, arg2, arg3));
9220 #endif
9221 #ifdef TARGET_NR_shmctl
9222     case TARGET_NR_shmctl:
9223         return do_shmctl(arg1, arg2, arg3);
9224 #endif
9225 #ifdef TARGET_NR_shmat
9226     case TARGET_NR_shmat:
9227         return do_shmat(cpu_env, arg1, arg2, arg3);
9228 #endif
9229 #ifdef TARGET_NR_shmdt
9230     case TARGET_NR_shmdt:
9231         return do_shmdt(arg1);
9232 #endif
9233     case TARGET_NR_fsync:
9234         return get_errno(fsync(arg1));
9235     case TARGET_NR_clone:
9236         /* Linux manages to have three different orderings for its
9237          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9238          * match the kernel's CONFIG_CLONE_* settings.
9239          * Microblaze is further special in that it uses a sixth
9240          * implicit argument to clone for the TLS pointer.
9241          */
9242 #if defined(TARGET_MICROBLAZE)
9243         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9244 #elif defined(TARGET_CLONE_BACKWARDS)
9245         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9246 #elif defined(TARGET_CLONE_BACKWARDS2)
9247         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9248 #else
9249         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9250 #endif
9251         return ret;
9252 #ifdef __NR_exit_group
9253         /* new thread calls */
9254     case TARGET_NR_exit_group:
9255         preexit_cleanup(cpu_env, arg1);
9256         return get_errno(exit_group(arg1));
9257 #endif
9258     case TARGET_NR_setdomainname:
9259         if (!(p = lock_user_string(arg1)))
9260             return -TARGET_EFAULT;
9261         ret = get_errno(setdomainname(p, arg2));
9262         unlock_user(p, arg1, 0);
9263         return ret;
9264     case TARGET_NR_uname:
9265         /* no need to transcode because we use the linux syscall */
9266         {
9267             struct new_utsname * buf;
9268 
9269             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9270                 return -TARGET_EFAULT;
9271             ret = get_errno(sys_uname(buf));
9272             if (!is_error(ret)) {
9273                 /* Overwrite the native machine name with whatever is being
9274                    emulated. */
9275                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9276                           sizeof(buf->machine));
9277                 /* Allow the user to override the reported release.  */
9278                 if (qemu_uname_release && *qemu_uname_release) {
9279                     g_strlcpy(buf->release, qemu_uname_release,
9280                               sizeof(buf->release));
9281                 }
9282             }
9283             unlock_user_struct(buf, arg1, 1);
9284         }
9285         return ret;
9286 #ifdef TARGET_I386
9287     case TARGET_NR_modify_ldt:
9288         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9289 #if !defined(TARGET_X86_64)
9290     case TARGET_NR_vm86:
9291         return do_vm86(cpu_env, arg1, arg2);
9292 #endif
9293 #endif
9294     case TARGET_NR_adjtimex:
9295         {
9296             struct timex host_buf;
9297 
9298             if (target_to_host_timex(&host_buf, arg1) != 0) {
9299                 return -TARGET_EFAULT;
9300             }
9301             ret = get_errno(adjtimex(&host_buf));
9302             if (!is_error(ret)) {
9303                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9304                     return -TARGET_EFAULT;
9305                 }
9306             }
9307         }
9308         return ret;
9309 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9310     case TARGET_NR_clock_adjtime:
9311         {
9312             struct timex htx, *phtx = &htx;
9313 
9314             if (target_to_host_timex(phtx, arg2) != 0) {
9315                 return -TARGET_EFAULT;
9316             }
9317             ret = get_errno(clock_adjtime(arg1, phtx));
9318             if (!is_error(ret) && phtx) {
9319                 if (host_to_target_timex(arg2, phtx) != 0) {
9320                     return -TARGET_EFAULT;
9321                 }
9322             }
9323         }
9324         return ret;
9325 #endif
9326     case TARGET_NR_getpgid:
9327         return get_errno(getpgid(arg1));
9328     case TARGET_NR_fchdir:
9329         return get_errno(fchdir(arg1));
9330     case TARGET_NR_personality:
9331         return get_errno(personality(arg1));
9332 #ifdef TARGET_NR__llseek /* Not on alpha */
9333     case TARGET_NR__llseek:
9334         {
9335             int64_t res;
9336 #if !defined(__NR_llseek)
9337             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9338             if (res == -1) {
9339                 ret = get_errno(res);
9340             } else {
9341                 ret = 0;
9342             }
9343 #else
9344             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9345 #endif
9346             if ((ret == 0) && put_user_s64(res, arg4)) {
9347                 return -TARGET_EFAULT;
9348             }
9349         }
9350         return ret;
9351 #endif
9352 #ifdef TARGET_NR_getdents
9353     case TARGET_NR_getdents:
9354 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9355 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9356         {
9357             struct target_dirent *target_dirp;
9358             struct linux_dirent *dirp;
9359             abi_long count = arg3;
9360 
9361             dirp = g_try_malloc(count);
9362             if (!dirp) {
9363                 return -TARGET_ENOMEM;
9364             }
9365 
9366             ret = get_errno(sys_getdents(arg1, dirp, count));
9367             if (!is_error(ret)) {
9368                 struct linux_dirent *de;
9369 		struct target_dirent *tde;
9370                 int len = ret;
9371                 int reclen, treclen;
9372 		int count1, tnamelen;
9373 
9374 		count1 = 0;
9375                 de = dirp;
9376                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9377                     return -TARGET_EFAULT;
9378 		tde = target_dirp;
9379                 while (len > 0) {
9380                     reclen = de->d_reclen;
9381                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9382                     assert(tnamelen >= 0);
9383                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9384                     assert(count1 + treclen <= count);
9385                     tde->d_reclen = tswap16(treclen);
9386                     tde->d_ino = tswapal(de->d_ino);
9387                     tde->d_off = tswapal(de->d_off);
9388                     memcpy(tde->d_name, de->d_name, tnamelen);
9389                     de = (struct linux_dirent *)((char *)de + reclen);
9390                     len -= reclen;
9391                     tde = (struct target_dirent *)((char *)tde + treclen);
9392 		    count1 += treclen;
9393                 }
9394 		ret = count1;
9395                 unlock_user(target_dirp, arg2, ret);
9396             }
9397             g_free(dirp);
9398         }
9399 #else
9400         {
9401             struct linux_dirent *dirp;
9402             abi_long count = arg3;
9403 
9404             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9405                 return -TARGET_EFAULT;
9406             ret = get_errno(sys_getdents(arg1, dirp, count));
9407             if (!is_error(ret)) {
9408                 struct linux_dirent *de;
9409                 int len = ret;
9410                 int reclen;
9411                 de = dirp;
9412                 while (len > 0) {
9413                     reclen = de->d_reclen;
9414                     if (reclen > len)
9415                         break;
9416                     de->d_reclen = tswap16(reclen);
9417                     tswapls(&de->d_ino);
9418                     tswapls(&de->d_off);
9419                     de = (struct linux_dirent *)((char *)de + reclen);
9420                     len -= reclen;
9421                 }
9422             }
9423             unlock_user(dirp, arg2, ret);
9424         }
9425 #endif
9426 #else
9427         /* Implement getdents in terms of getdents64 */
9428         {
9429             struct linux_dirent64 *dirp;
9430             abi_long count = arg3;
9431 
9432             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9433             if (!dirp) {
9434                 return -TARGET_EFAULT;
9435             }
9436             ret = get_errno(sys_getdents64(arg1, dirp, count));
9437             if (!is_error(ret)) {
9438                 /* Convert the dirent64 structs to target dirent.  We do this
9439                  * in-place, since we can guarantee that a target_dirent is no
9440                  * larger than a dirent64; however this means we have to be
9441                  * careful to read everything before writing in the new format.
9442                  */
9443                 struct linux_dirent64 *de;
9444                 struct target_dirent *tde;
9445                 int len = ret;
9446                 int tlen = 0;
9447 
9448                 de = dirp;
9449                 tde = (struct target_dirent *)dirp;
9450                 while (len > 0) {
9451                     int namelen, treclen;
9452                     int reclen = de->d_reclen;
9453                     uint64_t ino = de->d_ino;
9454                     int64_t off = de->d_off;
9455                     uint8_t type = de->d_type;
9456 
9457                     namelen = strlen(de->d_name);
9458                     treclen = offsetof(struct target_dirent, d_name)
9459                         + namelen + 2;
9460                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9461 
9462                     memmove(tde->d_name, de->d_name, namelen + 1);
9463                     tde->d_ino = tswapal(ino);
9464                     tde->d_off = tswapal(off);
9465                     tde->d_reclen = tswap16(treclen);
9466                     /* The target_dirent type is in what was formerly a padding
9467                      * byte at the end of the structure:
9468                      */
9469                     *(((char *)tde) + treclen - 1) = type;
9470 
9471                     de = (struct linux_dirent64 *)((char *)de + reclen);
9472                     tde = (struct target_dirent *)((char *)tde + treclen);
9473                     len -= reclen;
9474                     tlen += treclen;
9475                 }
9476                 ret = tlen;
9477             }
9478             unlock_user(dirp, arg2, ret);
9479         }
9480 #endif
9481         return ret;
9482 #endif /* TARGET_NR_getdents */
9483 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9484     case TARGET_NR_getdents64:
9485         {
9486             struct linux_dirent64 *dirp;
9487             abi_long count = arg3;
9488             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9489                 return -TARGET_EFAULT;
9490             ret = get_errno(sys_getdents64(arg1, dirp, count));
9491             if (!is_error(ret)) {
9492                 struct linux_dirent64 *de;
9493                 int len = ret;
9494                 int reclen;
9495                 de = dirp;
9496                 while (len > 0) {
9497                     reclen = de->d_reclen;
9498                     if (reclen > len)
9499                         break;
9500                     de->d_reclen = tswap16(reclen);
9501                     tswap64s((uint64_t *)&de->d_ino);
9502                     tswap64s((uint64_t *)&de->d_off);
9503                     de = (struct linux_dirent64 *)((char *)de + reclen);
9504                     len -= reclen;
9505                 }
9506             }
9507             unlock_user(dirp, arg2, ret);
9508         }
9509         return ret;
9510 #endif /* TARGET_NR_getdents64 */
9511 #if defined(TARGET_NR__newselect)
9512     case TARGET_NR__newselect:
9513         return do_select(arg1, arg2, arg3, arg4, arg5);
9514 #endif
9515 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9516 # ifdef TARGET_NR_poll
9517     case TARGET_NR_poll:
9518 # endif
9519 # ifdef TARGET_NR_ppoll
9520     case TARGET_NR_ppoll:
9521 # endif
9522         {
9523             struct target_pollfd *target_pfd;
9524             unsigned int nfds = arg2;
9525             struct pollfd *pfd;
9526             unsigned int i;
9527 
9528             pfd = NULL;
9529             target_pfd = NULL;
9530             if (nfds) {
9531                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9532                     return -TARGET_EINVAL;
9533                 }
9534 
9535                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9536                                        sizeof(struct target_pollfd) * nfds, 1);
9537                 if (!target_pfd) {
9538                     return -TARGET_EFAULT;
9539                 }
9540 
9541                 pfd = alloca(sizeof(struct pollfd) * nfds);
9542                 for (i = 0; i < nfds; i++) {
9543                     pfd[i].fd = tswap32(target_pfd[i].fd);
9544                     pfd[i].events = tswap16(target_pfd[i].events);
9545                 }
9546             }
9547 
9548             switch (num) {
9549 # ifdef TARGET_NR_ppoll
9550             case TARGET_NR_ppoll:
9551             {
9552                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9553                 target_sigset_t *target_set;
9554                 sigset_t _set, *set = &_set;
9555 
9556                 if (arg3) {
9557                     if (target_to_host_timespec(timeout_ts, arg3)) {
9558                         unlock_user(target_pfd, arg1, 0);
9559                         return -TARGET_EFAULT;
9560                     }
9561                 } else {
9562                     timeout_ts = NULL;
9563                 }
9564 
9565                 if (arg4) {
9566                     if (arg5 != sizeof(target_sigset_t)) {
9567                         unlock_user(target_pfd, arg1, 0);
9568                         return -TARGET_EINVAL;
9569                     }
9570 
9571                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9572                     if (!target_set) {
9573                         unlock_user(target_pfd, arg1, 0);
9574                         return -TARGET_EFAULT;
9575                     }
9576                     target_to_host_sigset(set, target_set);
9577                 } else {
9578                     set = NULL;
9579                 }
9580 
9581                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9582                                            set, SIGSET_T_SIZE));
9583 
9584                 if (!is_error(ret) && arg3) {
9585                     host_to_target_timespec(arg3, timeout_ts);
9586                 }
9587                 if (arg4) {
9588                     unlock_user(target_set, arg4, 0);
9589                 }
9590                 break;
9591             }
9592 # endif
9593 # ifdef TARGET_NR_poll
9594             case TARGET_NR_poll:
9595             {
9596                 struct timespec ts, *pts;
9597 
9598                 if (arg3 >= 0) {
9599                     /* Convert ms to secs, ns */
9600                     ts.tv_sec = arg3 / 1000;
9601                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9602                     pts = &ts;
9603                 } else {
9604                     /* -ve poll() timeout means "infinite" */
9605                     pts = NULL;
9606                 }
9607                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9608                 break;
9609             }
9610 # endif
9611             default:
9612                 g_assert_not_reached();
9613             }
9614 
9615             if (!is_error(ret)) {
9616                 for(i = 0; i < nfds; i++) {
9617                     target_pfd[i].revents = tswap16(pfd[i].revents);
9618                 }
9619             }
9620             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9621         }
9622         return ret;
9623 #endif
9624     case TARGET_NR_flock:
9625         /* NOTE: the flock constant seems to be the same for every
9626            Linux platform */
9627         return get_errno(safe_flock(arg1, arg2));
9628     case TARGET_NR_readv:
9629         {
9630             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9631             if (vec != NULL) {
9632                 ret = get_errno(safe_readv(arg1, vec, arg3));
9633                 unlock_iovec(vec, arg2, arg3, 1);
9634             } else {
9635                 ret = -host_to_target_errno(errno);
9636             }
9637         }
9638         return ret;
9639     case TARGET_NR_writev:
9640         {
9641             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9642             if (vec != NULL) {
9643                 ret = get_errno(safe_writev(arg1, vec, arg3));
9644                 unlock_iovec(vec, arg2, arg3, 0);
9645             } else {
9646                 ret = -host_to_target_errno(errno);
9647             }
9648         }
9649         return ret;
9650 #if defined(TARGET_NR_preadv)
9651     case TARGET_NR_preadv:
9652         {
9653             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9654             if (vec != NULL) {
9655                 unsigned long low, high;
9656 
9657                 target_to_host_low_high(arg4, arg5, &low, &high);
9658                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9659                 unlock_iovec(vec, arg2, arg3, 1);
9660             } else {
9661                 ret = -host_to_target_errno(errno);
9662            }
9663         }
9664         return ret;
9665 #endif
9666 #if defined(TARGET_NR_pwritev)
9667     case TARGET_NR_pwritev:
9668         {
9669             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9670             if (vec != NULL) {
9671                 unsigned long low, high;
9672 
9673                 target_to_host_low_high(arg4, arg5, &low, &high);
9674                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9675                 unlock_iovec(vec, arg2, arg3, 0);
9676             } else {
9677                 ret = -host_to_target_errno(errno);
9678            }
9679         }
9680         return ret;
9681 #endif
9682     case TARGET_NR_getsid:
9683         return get_errno(getsid(arg1));
9684 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9685     case TARGET_NR_fdatasync:
9686         return get_errno(fdatasync(arg1));
9687 #endif
9688 #ifdef TARGET_NR__sysctl
9689     case TARGET_NR__sysctl:
9690         /* We don't implement this, but ENOTDIR is always a safe
9691            return value. */
9692         return -TARGET_ENOTDIR;
9693 #endif
9694     case TARGET_NR_sched_getaffinity:
9695         {
9696             unsigned int mask_size;
9697             unsigned long *mask;
9698 
9699             /*
9700              * sched_getaffinity needs multiples of ulong, so need to take
9701              * care of mismatches between target ulong and host ulong sizes.
9702              */
9703             if (arg2 & (sizeof(abi_ulong) - 1)) {
9704                 return -TARGET_EINVAL;
9705             }
9706             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9707 
9708             mask = alloca(mask_size);
9709             memset(mask, 0, mask_size);
9710             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9711 
9712             if (!is_error(ret)) {
9713                 if (ret > arg2) {
9714                     /* More data returned than the caller's buffer will fit.
9715                      * This only happens if sizeof(abi_long) < sizeof(long)
9716                      * and the caller passed us a buffer holding an odd number
9717                      * of abi_longs. If the host kernel is actually using the
9718                      * extra 4 bytes then fail EINVAL; otherwise we can just
9719                      * ignore them and only copy the interesting part.
9720                      */
9721                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9722                     if (numcpus > arg2 * 8) {
9723                         return -TARGET_EINVAL;
9724                     }
9725                     ret = arg2;
9726                 }
9727 
9728                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9729                     return -TARGET_EFAULT;
9730                 }
9731             }
9732         }
9733         return ret;
9734     case TARGET_NR_sched_setaffinity:
9735         {
9736             unsigned int mask_size;
9737             unsigned long *mask;
9738 
9739             /*
9740              * sched_setaffinity needs multiples of ulong, so need to take
9741              * care of mismatches between target ulong and host ulong sizes.
9742              */
9743             if (arg2 & (sizeof(abi_ulong) - 1)) {
9744                 return -TARGET_EINVAL;
9745             }
9746             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9747             mask = alloca(mask_size);
9748 
9749             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9750             if (ret) {
9751                 return ret;
9752             }
9753 
9754             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9755         }
9756     case TARGET_NR_getcpu:
9757         {
9758             unsigned cpu, node;
9759             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9760                                        arg2 ? &node : NULL,
9761                                        NULL));
9762             if (is_error(ret)) {
9763                 return ret;
9764             }
9765             if (arg1 && put_user_u32(cpu, arg1)) {
9766                 return -TARGET_EFAULT;
9767             }
9768             if (arg2 && put_user_u32(node, arg2)) {
9769                 return -TARGET_EFAULT;
9770             }
9771         }
9772         return ret;
9773     case TARGET_NR_sched_setparam:
9774         {
9775             struct sched_param *target_schp;
9776             struct sched_param schp;
9777 
9778             if (arg2 == 0) {
9779                 return -TARGET_EINVAL;
9780             }
9781             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9782                 return -TARGET_EFAULT;
9783             schp.sched_priority = tswap32(target_schp->sched_priority);
9784             unlock_user_struct(target_schp, arg2, 0);
9785             return get_errno(sched_setparam(arg1, &schp));
9786         }
9787     case TARGET_NR_sched_getparam:
9788         {
9789             struct sched_param *target_schp;
9790             struct sched_param schp;
9791 
9792             if (arg2 == 0) {
9793                 return -TARGET_EINVAL;
9794             }
9795             ret = get_errno(sched_getparam(arg1, &schp));
9796             if (!is_error(ret)) {
9797                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9798                     return -TARGET_EFAULT;
9799                 target_schp->sched_priority = tswap32(schp.sched_priority);
9800                 unlock_user_struct(target_schp, arg2, 1);
9801             }
9802         }
9803         return ret;
9804     case TARGET_NR_sched_setscheduler:
9805         {
9806             struct sched_param *target_schp;
9807             struct sched_param schp;
9808             if (arg3 == 0) {
9809                 return -TARGET_EINVAL;
9810             }
9811             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9812                 return -TARGET_EFAULT;
9813             schp.sched_priority = tswap32(target_schp->sched_priority);
9814             unlock_user_struct(target_schp, arg3, 0);
9815             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9816         }
9817     case TARGET_NR_sched_getscheduler:
9818         return get_errno(sched_getscheduler(arg1));
9819     case TARGET_NR_sched_yield:
9820         return get_errno(sched_yield());
9821     case TARGET_NR_sched_get_priority_max:
9822         return get_errno(sched_get_priority_max(arg1));
9823     case TARGET_NR_sched_get_priority_min:
9824         return get_errno(sched_get_priority_min(arg1));
9825     case TARGET_NR_sched_rr_get_interval:
9826         {
9827             struct timespec ts;
9828             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9829             if (!is_error(ret)) {
9830                 ret = host_to_target_timespec(arg2, &ts);
9831             }
9832         }
9833         return ret;
9834     case TARGET_NR_nanosleep:
9835         {
9836             struct timespec req, rem;
9837             target_to_host_timespec(&req, arg1);
9838             ret = get_errno(safe_nanosleep(&req, &rem));
9839             if (is_error(ret) && arg2) {
9840                 host_to_target_timespec(arg2, &rem);
9841             }
9842         }
9843         return ret;
9844     case TARGET_NR_prctl:
9845         switch (arg1) {
9846         case PR_GET_PDEATHSIG:
9847         {
9848             int deathsig;
9849             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9850             if (!is_error(ret) && arg2
9851                 && put_user_ual(deathsig, arg2)) {
9852                 return -TARGET_EFAULT;
9853             }
9854             return ret;
9855         }
9856 #ifdef PR_GET_NAME
9857         case PR_GET_NAME:
9858         {
9859             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9860             if (!name) {
9861                 return -TARGET_EFAULT;
9862             }
9863             ret = get_errno(prctl(arg1, (unsigned long)name,
9864                                   arg3, arg4, arg5));
9865             unlock_user(name, arg2, 16);
9866             return ret;
9867         }
9868         case PR_SET_NAME:
9869         {
9870             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9871             if (!name) {
9872                 return -TARGET_EFAULT;
9873             }
9874             ret = get_errno(prctl(arg1, (unsigned long)name,
9875                                   arg3, arg4, arg5));
9876             unlock_user(name, arg2, 0);
9877             return ret;
9878         }
9879 #endif
9880 #ifdef TARGET_MIPS
9881         case TARGET_PR_GET_FP_MODE:
9882         {
9883             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9884             ret = 0;
9885             if (env->CP0_Status & (1 << CP0St_FR)) {
9886                 ret |= TARGET_PR_FP_MODE_FR;
9887             }
9888             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9889                 ret |= TARGET_PR_FP_MODE_FRE;
9890             }
9891             return ret;
9892         }
9893         case TARGET_PR_SET_FP_MODE:
9894         {
9895             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9896             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9897             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9898             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9899             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9900 
9901             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9902                                             TARGET_PR_FP_MODE_FRE;
9903 
9904             /* If nothing to change, return right away, successfully.  */
9905             if (old_fr == new_fr && old_fre == new_fre) {
9906                 return 0;
9907             }
9908             /* Check the value is valid */
9909             if (arg2 & ~known_bits) {
9910                 return -TARGET_EOPNOTSUPP;
9911             }
9912             /* Setting FRE without FR is not supported.  */
9913             if (new_fre && !new_fr) {
9914                 return -TARGET_EOPNOTSUPP;
9915             }
9916             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9917                 /* FR1 is not supported */
9918                 return -TARGET_EOPNOTSUPP;
9919             }
9920             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9921                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9922                 /* cannot set FR=0 */
9923                 return -TARGET_EOPNOTSUPP;
9924             }
9925             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9926                 /* Cannot set FRE=1 */
9927                 return -TARGET_EOPNOTSUPP;
9928             }
9929 
9930             int i;
9931             fpr_t *fpr = env->active_fpu.fpr;
9932             for (i = 0; i < 32 ; i += 2) {
9933                 if (!old_fr && new_fr) {
9934                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9935                 } else if (old_fr && !new_fr) {
9936                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9937                 }
9938             }
9939 
9940             if (new_fr) {
9941                 env->CP0_Status |= (1 << CP0St_FR);
9942                 env->hflags |= MIPS_HFLAG_F64;
9943             } else {
9944                 env->CP0_Status &= ~(1 << CP0St_FR);
9945                 env->hflags &= ~MIPS_HFLAG_F64;
9946             }
9947             if (new_fre) {
9948                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9949                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9950                     env->hflags |= MIPS_HFLAG_FRE;
9951                 }
9952             } else {
9953                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9954                 env->hflags &= ~MIPS_HFLAG_FRE;
9955             }
9956 
9957             return 0;
9958         }
9959 #endif /* MIPS */
9960 #ifdef TARGET_AARCH64
9961         case TARGET_PR_SVE_SET_VL:
9962             /*
9963              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9964              * PR_SVE_VL_INHERIT.  Note the kernel definition
9965              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9966              * even though the current architectural maximum is VQ=16.
9967              */
9968             ret = -TARGET_EINVAL;
9969             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9970                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9971                 CPUARMState *env = cpu_env;
9972                 ARMCPU *cpu = env_archcpu(env);
9973                 uint32_t vq, old_vq;
9974 
9975                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9976                 vq = MAX(arg2 / 16, 1);
9977                 vq = MIN(vq, cpu->sve_max_vq);
9978 
9979                 if (vq < old_vq) {
9980                     aarch64_sve_narrow_vq(env, vq);
9981                 }
9982                 env->vfp.zcr_el[1] = vq - 1;
9983                 ret = vq * 16;
9984             }
9985             return ret;
9986         case TARGET_PR_SVE_GET_VL:
9987             ret = -TARGET_EINVAL;
9988             {
9989                 ARMCPU *cpu = env_archcpu(cpu_env);
9990                 if (cpu_isar_feature(aa64_sve, cpu)) {
9991                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9992                 }
9993             }
9994             return ret;
9995         case TARGET_PR_PAC_RESET_KEYS:
9996             {
9997                 CPUARMState *env = cpu_env;
9998                 ARMCPU *cpu = env_archcpu(env);
9999 
10000                 if (arg3 || arg4 || arg5) {
10001                     return -TARGET_EINVAL;
10002                 }
10003                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10004                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10005                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10006                                TARGET_PR_PAC_APGAKEY);
10007                     int ret = 0;
10008                     Error *err = NULL;
10009 
10010                     if (arg2 == 0) {
10011                         arg2 = all;
10012                     } else if (arg2 & ~all) {
10013                         return -TARGET_EINVAL;
10014                     }
10015                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10016                         ret |= qemu_guest_getrandom(&env->keys.apia,
10017                                                     sizeof(ARMPACKey), &err);
10018                     }
10019                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10020                         ret |= qemu_guest_getrandom(&env->keys.apib,
10021                                                     sizeof(ARMPACKey), &err);
10022                     }
10023                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10024                         ret |= qemu_guest_getrandom(&env->keys.apda,
10025                                                     sizeof(ARMPACKey), &err);
10026                     }
10027                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10028                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10029                                                     sizeof(ARMPACKey), &err);
10030                     }
10031                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10032                         ret |= qemu_guest_getrandom(&env->keys.apga,
10033                                                     sizeof(ARMPACKey), &err);
10034                     }
10035                     if (ret != 0) {
10036                         /*
10037                          * Some unknown failure in the crypto.  The best
10038                          * we can do is log it and fail the syscall.
10039                          * The real syscall cannot fail this way.
10040                          */
10041                         qemu_log_mask(LOG_UNIMP,
10042                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10043                                       error_get_pretty(err));
10044                         error_free(err);
10045                         return -TARGET_EIO;
10046                     }
10047                     return 0;
10048                 }
10049             }
10050             return -TARGET_EINVAL;
10051 #endif /* AARCH64 */
10052         case PR_GET_SECCOMP:
10053         case PR_SET_SECCOMP:
10054             /* Disable seccomp to prevent the target disabling syscalls we
10055              * need. */
10056             return -TARGET_EINVAL;
10057         default:
10058             /* Most prctl options have no pointer arguments */
10059             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10060         }
10061         break;
10062 #ifdef TARGET_NR_arch_prctl
10063     case TARGET_NR_arch_prctl:
10064 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10065         return do_arch_prctl(cpu_env, arg1, arg2);
10066 #else
10067 #error unreachable
10068 #endif
10069 #endif
10070 #ifdef TARGET_NR_pread64
10071     case TARGET_NR_pread64:
10072         if (regpairs_aligned(cpu_env, num)) {
10073             arg4 = arg5;
10074             arg5 = arg6;
10075         }
10076         if (arg2 == 0 && arg3 == 0) {
10077             /* Special-case NULL buffer and zero length, which should succeed */
10078             p = 0;
10079         } else {
10080             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10081             if (!p) {
10082                 return -TARGET_EFAULT;
10083             }
10084         }
10085         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10086         unlock_user(p, arg2, ret);
10087         return ret;
10088     case TARGET_NR_pwrite64:
10089         if (regpairs_aligned(cpu_env, num)) {
10090             arg4 = arg5;
10091             arg5 = arg6;
10092         }
10093         if (arg2 == 0 && arg3 == 0) {
10094             /* Special-case NULL buffer and zero length, which should succeed */
10095             p = 0;
10096         } else {
10097             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10098             if (!p) {
10099                 return -TARGET_EFAULT;
10100             }
10101         }
10102         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10103         unlock_user(p, arg2, 0);
10104         return ret;
10105 #endif
10106     case TARGET_NR_getcwd:
10107         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10108             return -TARGET_EFAULT;
10109         ret = get_errno(sys_getcwd1(p, arg2));
10110         unlock_user(p, arg1, ret);
10111         return ret;
10112     case TARGET_NR_capget:
10113     case TARGET_NR_capset:
10114     {
10115         struct target_user_cap_header *target_header;
10116         struct target_user_cap_data *target_data = NULL;
10117         struct __user_cap_header_struct header;
10118         struct __user_cap_data_struct data[2];
10119         struct __user_cap_data_struct *dataptr = NULL;
10120         int i, target_datalen;
10121         int data_items = 1;
10122 
10123         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10124             return -TARGET_EFAULT;
10125         }
10126         header.version = tswap32(target_header->version);
10127         header.pid = tswap32(target_header->pid);
10128 
10129         if (header.version != _LINUX_CAPABILITY_VERSION) {
10130             /* Version 2 and up takes pointer to two user_data structs */
10131             data_items = 2;
10132         }
10133 
10134         target_datalen = sizeof(*target_data) * data_items;
10135 
10136         if (arg2) {
10137             if (num == TARGET_NR_capget) {
10138                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10139             } else {
10140                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10141             }
10142             if (!target_data) {
10143                 unlock_user_struct(target_header, arg1, 0);
10144                 return -TARGET_EFAULT;
10145             }
10146 
10147             if (num == TARGET_NR_capset) {
10148                 for (i = 0; i < data_items; i++) {
10149                     data[i].effective = tswap32(target_data[i].effective);
10150                     data[i].permitted = tswap32(target_data[i].permitted);
10151                     data[i].inheritable = tswap32(target_data[i].inheritable);
10152                 }
10153             }
10154 
10155             dataptr = data;
10156         }
10157 
10158         if (num == TARGET_NR_capget) {
10159             ret = get_errno(capget(&header, dataptr));
10160         } else {
10161             ret = get_errno(capset(&header, dataptr));
10162         }
10163 
10164         /* The kernel always updates version for both capget and capset */
10165         target_header->version = tswap32(header.version);
10166         unlock_user_struct(target_header, arg1, 1);
10167 
10168         if (arg2) {
10169             if (num == TARGET_NR_capget) {
10170                 for (i = 0; i < data_items; i++) {
10171                     target_data[i].effective = tswap32(data[i].effective);
10172                     target_data[i].permitted = tswap32(data[i].permitted);
10173                     target_data[i].inheritable = tswap32(data[i].inheritable);
10174                 }
10175                 unlock_user(target_data, arg2, target_datalen);
10176             } else {
10177                 unlock_user(target_data, arg2, 0);
10178             }
10179         }
10180         return ret;
10181     }
10182     case TARGET_NR_sigaltstack:
10183         return do_sigaltstack(arg1, arg2,
10184                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10185 
10186 #ifdef CONFIG_SENDFILE
10187 #ifdef TARGET_NR_sendfile
10188     case TARGET_NR_sendfile:
10189     {
10190         off_t *offp = NULL;
10191         off_t off;
10192         if (arg3) {
10193             ret = get_user_sal(off, arg3);
10194             if (is_error(ret)) {
10195                 return ret;
10196             }
10197             offp = &off;
10198         }
10199         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10200         if (!is_error(ret) && arg3) {
10201             abi_long ret2 = put_user_sal(off, arg3);
10202             if (is_error(ret2)) {
10203                 ret = ret2;
10204             }
10205         }
10206         return ret;
10207     }
10208 #endif
10209 #ifdef TARGET_NR_sendfile64
10210     case TARGET_NR_sendfile64:
10211     {
10212         off_t *offp = NULL;
10213         off_t off;
10214         if (arg3) {
10215             ret = get_user_s64(off, arg3);
10216             if (is_error(ret)) {
10217                 return ret;
10218             }
10219             offp = &off;
10220         }
10221         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10222         if (!is_error(ret) && arg3) {
10223             abi_long ret2 = put_user_s64(off, arg3);
10224             if (is_error(ret2)) {
10225                 ret = ret2;
10226             }
10227         }
10228         return ret;
10229     }
10230 #endif
10231 #endif
10232 #ifdef TARGET_NR_vfork
10233     case TARGET_NR_vfork:
10234         return get_errno(do_fork(cpu_env,
10235                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10236                          0, 0, 0, 0));
10237 #endif
10238 #ifdef TARGET_NR_ugetrlimit
10239     case TARGET_NR_ugetrlimit:
10240     {
10241 	struct rlimit rlim;
10242 	int resource = target_to_host_resource(arg1);
10243 	ret = get_errno(getrlimit(resource, &rlim));
10244 	if (!is_error(ret)) {
10245 	    struct target_rlimit *target_rlim;
10246             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10247                 return -TARGET_EFAULT;
10248 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10249 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10250             unlock_user_struct(target_rlim, arg2, 1);
10251 	}
10252         return ret;
10253     }
10254 #endif
10255 #ifdef TARGET_NR_truncate64
10256     case TARGET_NR_truncate64:
10257         if (!(p = lock_user_string(arg1)))
10258             return -TARGET_EFAULT;
10259 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10260         unlock_user(p, arg1, 0);
10261         return ret;
10262 #endif
10263 #ifdef TARGET_NR_ftruncate64
10264     case TARGET_NR_ftruncate64:
10265         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10266 #endif
10267 #ifdef TARGET_NR_stat64
10268     case TARGET_NR_stat64:
10269         if (!(p = lock_user_string(arg1))) {
10270             return -TARGET_EFAULT;
10271         }
10272         ret = get_errno(stat(path(p), &st));
10273         unlock_user(p, arg1, 0);
10274         if (!is_error(ret))
10275             ret = host_to_target_stat64(cpu_env, arg2, &st);
10276         return ret;
10277 #endif
10278 #ifdef TARGET_NR_lstat64
10279     case TARGET_NR_lstat64:
10280         if (!(p = lock_user_string(arg1))) {
10281             return -TARGET_EFAULT;
10282         }
10283         ret = get_errno(lstat(path(p), &st));
10284         unlock_user(p, arg1, 0);
10285         if (!is_error(ret))
10286             ret = host_to_target_stat64(cpu_env, arg2, &st);
10287         return ret;
10288 #endif
10289 #ifdef TARGET_NR_fstat64
10290     case TARGET_NR_fstat64:
10291         ret = get_errno(fstat(arg1, &st));
10292         if (!is_error(ret))
10293             ret = host_to_target_stat64(cpu_env, arg2, &st);
10294         return ret;
10295 #endif
10296 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10297 #ifdef TARGET_NR_fstatat64
10298     case TARGET_NR_fstatat64:
10299 #endif
10300 #ifdef TARGET_NR_newfstatat
10301     case TARGET_NR_newfstatat:
10302 #endif
10303         if (!(p = lock_user_string(arg2))) {
10304             return -TARGET_EFAULT;
10305         }
10306         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10307         unlock_user(p, arg2, 0);
10308         if (!is_error(ret))
10309             ret = host_to_target_stat64(cpu_env, arg3, &st);
10310         return ret;
10311 #endif
10312 #if defined(TARGET_NR_statx)
10313     case TARGET_NR_statx:
10314         {
10315             struct target_statx *target_stx;
10316             int dirfd = arg1;
10317             int flags = arg3;
10318 
10319             p = lock_user_string(arg2);
10320             if (p == NULL) {
10321                 return -TARGET_EFAULT;
10322             }
10323 #if defined(__NR_statx)
10324             {
10325                 /*
10326                  * It is assumed that struct statx is architecture independent.
10327                  */
10328                 struct target_statx host_stx;
10329                 int mask = arg4;
10330 
10331                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10332                 if (!is_error(ret)) {
10333                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10334                         unlock_user(p, arg2, 0);
10335                         return -TARGET_EFAULT;
10336                     }
10337                 }
10338 
10339                 if (ret != -TARGET_ENOSYS) {
10340                     unlock_user(p, arg2, 0);
10341                     return ret;
10342                 }
10343             }
10344 #endif
10345             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10346             unlock_user(p, arg2, 0);
10347 
10348             if (!is_error(ret)) {
10349                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10350                     return -TARGET_EFAULT;
10351                 }
10352                 memset(target_stx, 0, sizeof(*target_stx));
10353                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10354                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10355                 __put_user(st.st_ino, &target_stx->stx_ino);
10356                 __put_user(st.st_mode, &target_stx->stx_mode);
10357                 __put_user(st.st_uid, &target_stx->stx_uid);
10358                 __put_user(st.st_gid, &target_stx->stx_gid);
10359                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10360                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10361                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10362                 __put_user(st.st_size, &target_stx->stx_size);
10363                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10364                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10365                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10366                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10367                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10368                 unlock_user_struct(target_stx, arg5, 1);
10369             }
10370         }
10371         return ret;
10372 #endif
10373 #ifdef TARGET_NR_lchown
10374     case TARGET_NR_lchown:
10375         if (!(p = lock_user_string(arg1)))
10376             return -TARGET_EFAULT;
10377         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10378         unlock_user(p, arg1, 0);
10379         return ret;
10380 #endif
10381 #ifdef TARGET_NR_getuid
10382     case TARGET_NR_getuid:
10383         return get_errno(high2lowuid(getuid()));
10384 #endif
10385 #ifdef TARGET_NR_getgid
10386     case TARGET_NR_getgid:
10387         return get_errno(high2lowgid(getgid()));
10388 #endif
10389 #ifdef TARGET_NR_geteuid
10390     case TARGET_NR_geteuid:
10391         return get_errno(high2lowuid(geteuid()));
10392 #endif
10393 #ifdef TARGET_NR_getegid
10394     case TARGET_NR_getegid:
10395         return get_errno(high2lowgid(getegid()));
10396 #endif
10397     case TARGET_NR_setreuid:
10398         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10399     case TARGET_NR_setregid:
10400         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10401     case TARGET_NR_getgroups:
10402         {
10403             int gidsetsize = arg1;
10404             target_id *target_grouplist;
10405             gid_t *grouplist;
10406             int i;
10407 
10408             grouplist = alloca(gidsetsize * sizeof(gid_t));
10409             ret = get_errno(getgroups(gidsetsize, grouplist));
10410             if (gidsetsize == 0)
10411                 return ret;
10412             if (!is_error(ret)) {
10413                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10414                 if (!target_grouplist)
10415                     return -TARGET_EFAULT;
10416                 for(i = 0;i < ret; i++)
10417                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10418                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10419             }
10420         }
10421         return ret;
10422     case TARGET_NR_setgroups:
10423         {
10424             int gidsetsize = arg1;
10425             target_id *target_grouplist;
10426             gid_t *grouplist = NULL;
10427             int i;
10428             if (gidsetsize) {
10429                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10430                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10431                 if (!target_grouplist) {
10432                     return -TARGET_EFAULT;
10433                 }
10434                 for (i = 0; i < gidsetsize; i++) {
10435                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10436                 }
10437                 unlock_user(target_grouplist, arg2, 0);
10438             }
10439             return get_errno(setgroups(gidsetsize, grouplist));
10440         }
10441     case TARGET_NR_fchown:
10442         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10443 #if defined(TARGET_NR_fchownat)
10444     case TARGET_NR_fchownat:
10445         if (!(p = lock_user_string(arg2)))
10446             return -TARGET_EFAULT;
10447         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10448                                  low2highgid(arg4), arg5));
10449         unlock_user(p, arg2, 0);
10450         return ret;
10451 #endif
10452 #ifdef TARGET_NR_setresuid
10453     case TARGET_NR_setresuid:
10454         return get_errno(sys_setresuid(low2highuid(arg1),
10455                                        low2highuid(arg2),
10456                                        low2highuid(arg3)));
10457 #endif
10458 #ifdef TARGET_NR_getresuid
10459     case TARGET_NR_getresuid:
10460         {
10461             uid_t ruid, euid, suid;
10462             ret = get_errno(getresuid(&ruid, &euid, &suid));
10463             if (!is_error(ret)) {
10464                 if (put_user_id(high2lowuid(ruid), arg1)
10465                     || put_user_id(high2lowuid(euid), arg2)
10466                     || put_user_id(high2lowuid(suid), arg3))
10467                     return -TARGET_EFAULT;
10468             }
10469         }
10470         return ret;
10471 #endif
10472 #ifdef TARGET_NR_getresgid
10473     case TARGET_NR_setresgid:
10474         return get_errno(sys_setresgid(low2highgid(arg1),
10475                                        low2highgid(arg2),
10476                                        low2highgid(arg3)));
10477 #endif
10478 #ifdef TARGET_NR_getresgid
10479     case TARGET_NR_getresgid:
10480         {
10481             gid_t rgid, egid, sgid;
10482             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10483             if (!is_error(ret)) {
10484                 if (put_user_id(high2lowgid(rgid), arg1)
10485                     || put_user_id(high2lowgid(egid), arg2)
10486                     || put_user_id(high2lowgid(sgid), arg3))
10487                     return -TARGET_EFAULT;
10488             }
10489         }
10490         return ret;
10491 #endif
10492 #ifdef TARGET_NR_chown
10493     case TARGET_NR_chown:
10494         if (!(p = lock_user_string(arg1)))
10495             return -TARGET_EFAULT;
10496         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10497         unlock_user(p, arg1, 0);
10498         return ret;
10499 #endif
10500     case TARGET_NR_setuid:
10501         return get_errno(sys_setuid(low2highuid(arg1)));
10502     case TARGET_NR_setgid:
10503         return get_errno(sys_setgid(low2highgid(arg1)));
10504     case TARGET_NR_setfsuid:
10505         return get_errno(setfsuid(arg1));
10506     case TARGET_NR_setfsgid:
10507         return get_errno(setfsgid(arg1));
10508 
10509 #ifdef TARGET_NR_lchown32
10510     case TARGET_NR_lchown32:
10511         if (!(p = lock_user_string(arg1)))
10512             return -TARGET_EFAULT;
10513         ret = get_errno(lchown(p, arg2, arg3));
10514         unlock_user(p, arg1, 0);
10515         return ret;
10516 #endif
10517 #ifdef TARGET_NR_getuid32
10518     case TARGET_NR_getuid32:
10519         return get_errno(getuid());
10520 #endif
10521 
10522 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10523    /* Alpha specific */
10524     case TARGET_NR_getxuid:
10525          {
10526             uid_t euid;
10527             euid=geteuid();
10528             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10529          }
10530         return get_errno(getuid());
10531 #endif
10532 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10533    /* Alpha specific */
10534     case TARGET_NR_getxgid:
10535          {
10536             uid_t egid;
10537             egid=getegid();
10538             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10539          }
10540         return get_errno(getgid());
10541 #endif
10542 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10543     /* Alpha specific */
10544     case TARGET_NR_osf_getsysinfo:
10545         ret = -TARGET_EOPNOTSUPP;
10546         switch (arg1) {
10547           case TARGET_GSI_IEEE_FP_CONTROL:
10548             {
10549                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10550                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10551 
10552                 swcr &= ~SWCR_STATUS_MASK;
10553                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10554 
10555                 if (put_user_u64 (swcr, arg2))
10556                         return -TARGET_EFAULT;
10557                 ret = 0;
10558             }
10559             break;
10560 
10561           /* case GSI_IEEE_STATE_AT_SIGNAL:
10562              -- Not implemented in linux kernel.
10563              case GSI_UACPROC:
10564              -- Retrieves current unaligned access state; not much used.
10565              case GSI_PROC_TYPE:
10566              -- Retrieves implver information; surely not used.
10567              case GSI_GET_HWRPB:
10568              -- Grabs a copy of the HWRPB; surely not used.
10569           */
10570         }
10571         return ret;
10572 #endif
10573 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10574     /* Alpha specific */
10575     case TARGET_NR_osf_setsysinfo:
10576         ret = -TARGET_EOPNOTSUPP;
10577         switch (arg1) {
10578           case TARGET_SSI_IEEE_FP_CONTROL:
10579             {
10580                 uint64_t swcr, fpcr;
10581 
10582                 if (get_user_u64 (swcr, arg2)) {
10583                     return -TARGET_EFAULT;
10584                 }
10585 
10586                 /*
10587                  * The kernel calls swcr_update_status to update the
10588                  * status bits from the fpcr at every point that it
10589                  * could be queried.  Therefore, we store the status
10590                  * bits only in FPCR.
10591                  */
10592                 ((CPUAlphaState *)cpu_env)->swcr
10593                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10594 
10595                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10596                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10597                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10598                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10599                 ret = 0;
10600             }
10601             break;
10602 
10603           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10604             {
10605                 uint64_t exc, fpcr, fex;
10606 
10607                 if (get_user_u64(exc, arg2)) {
10608                     return -TARGET_EFAULT;
10609                 }
10610                 exc &= SWCR_STATUS_MASK;
10611                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10612 
10613                 /* Old exceptions are not signaled.  */
10614                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10615                 fex = exc & ~fex;
10616                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10617                 fex &= ((CPUArchState *)cpu_env)->swcr;
10618 
10619                 /* Update the hardware fpcr.  */
10620                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10621                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10622 
10623                 if (fex) {
10624                     int si_code = TARGET_FPE_FLTUNK;
10625                     target_siginfo_t info;
10626 
10627                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10628                         si_code = TARGET_FPE_FLTUND;
10629                     }
10630                     if (fex & SWCR_TRAP_ENABLE_INE) {
10631                         si_code = TARGET_FPE_FLTRES;
10632                     }
10633                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10634                         si_code = TARGET_FPE_FLTUND;
10635                     }
10636                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10637                         si_code = TARGET_FPE_FLTOVF;
10638                     }
10639                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10640                         si_code = TARGET_FPE_FLTDIV;
10641                     }
10642                     if (fex & SWCR_TRAP_ENABLE_INV) {
10643                         si_code = TARGET_FPE_FLTINV;
10644                     }
10645 
10646                     info.si_signo = SIGFPE;
10647                     info.si_errno = 0;
10648                     info.si_code = si_code;
10649                     info._sifields._sigfault._addr
10650                         = ((CPUArchState *)cpu_env)->pc;
10651                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10652                                  QEMU_SI_FAULT, &info);
10653                 }
10654                 ret = 0;
10655             }
10656             break;
10657 
10658           /* case SSI_NVPAIRS:
10659              -- Used with SSIN_UACPROC to enable unaligned accesses.
10660              case SSI_IEEE_STATE_AT_SIGNAL:
10661              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10662              -- Not implemented in linux kernel
10663           */
10664         }
10665         return ret;
10666 #endif
10667 #ifdef TARGET_NR_osf_sigprocmask
10668     /* Alpha specific.  */
10669     case TARGET_NR_osf_sigprocmask:
10670         {
10671             abi_ulong mask;
10672             int how;
10673             sigset_t set, oldset;
10674 
10675             switch(arg1) {
10676             case TARGET_SIG_BLOCK:
10677                 how = SIG_BLOCK;
10678                 break;
10679             case TARGET_SIG_UNBLOCK:
10680                 how = SIG_UNBLOCK;
10681                 break;
10682             case TARGET_SIG_SETMASK:
10683                 how = SIG_SETMASK;
10684                 break;
10685             default:
10686                 return -TARGET_EINVAL;
10687             }
10688             mask = arg2;
10689             target_to_host_old_sigset(&set, &mask);
10690             ret = do_sigprocmask(how, &set, &oldset);
10691             if (!ret) {
10692                 host_to_target_old_sigset(&mask, &oldset);
10693                 ret = mask;
10694             }
10695         }
10696         return ret;
10697 #endif
10698 
10699 #ifdef TARGET_NR_getgid32
10700     case TARGET_NR_getgid32:
10701         return get_errno(getgid());
10702 #endif
10703 #ifdef TARGET_NR_geteuid32
10704     case TARGET_NR_geteuid32:
10705         return get_errno(geteuid());
10706 #endif
10707 #ifdef TARGET_NR_getegid32
10708     case TARGET_NR_getegid32:
10709         return get_errno(getegid());
10710 #endif
10711 #ifdef TARGET_NR_setreuid32
10712     case TARGET_NR_setreuid32:
10713         return get_errno(setreuid(arg1, arg2));
10714 #endif
10715 #ifdef TARGET_NR_setregid32
10716     case TARGET_NR_setregid32:
10717         return get_errno(setregid(arg1, arg2));
10718 #endif
10719 #ifdef TARGET_NR_getgroups32
10720     case TARGET_NR_getgroups32:
10721         {
10722             int gidsetsize = arg1;
10723             uint32_t *target_grouplist;
10724             gid_t *grouplist;
10725             int i;
10726 
10727             grouplist = alloca(gidsetsize * sizeof(gid_t));
10728             ret = get_errno(getgroups(gidsetsize, grouplist));
10729             if (gidsetsize == 0)
10730                 return ret;
10731             if (!is_error(ret)) {
10732                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10733                 if (!target_grouplist) {
10734                     return -TARGET_EFAULT;
10735                 }
10736                 for(i = 0;i < ret; i++)
10737                     target_grouplist[i] = tswap32(grouplist[i]);
10738                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10739             }
10740         }
10741         return ret;
10742 #endif
10743 #ifdef TARGET_NR_setgroups32
10744     case TARGET_NR_setgroups32:
10745         {
10746             int gidsetsize = arg1;
10747             uint32_t *target_grouplist;
10748             gid_t *grouplist;
10749             int i;
10750 
10751             grouplist = alloca(gidsetsize * sizeof(gid_t));
10752             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10753             if (!target_grouplist) {
10754                 return -TARGET_EFAULT;
10755             }
10756             for(i = 0;i < gidsetsize; i++)
10757                 grouplist[i] = tswap32(target_grouplist[i]);
10758             unlock_user(target_grouplist, arg2, 0);
10759             return get_errno(setgroups(gidsetsize, grouplist));
10760         }
10761 #endif
10762 #ifdef TARGET_NR_fchown32
10763     case TARGET_NR_fchown32:
10764         return get_errno(fchown(arg1, arg2, arg3));
10765 #endif
10766 #ifdef TARGET_NR_setresuid32
10767     case TARGET_NR_setresuid32:
10768         return get_errno(sys_setresuid(arg1, arg2, arg3));
10769 #endif
10770 #ifdef TARGET_NR_getresuid32
10771     case TARGET_NR_getresuid32:
10772         {
10773             uid_t ruid, euid, suid;
10774             ret = get_errno(getresuid(&ruid, &euid, &suid));
10775             if (!is_error(ret)) {
10776                 if (put_user_u32(ruid, arg1)
10777                     || put_user_u32(euid, arg2)
10778                     || put_user_u32(suid, arg3))
10779                     return -TARGET_EFAULT;
10780             }
10781         }
10782         return ret;
10783 #endif
10784 #ifdef TARGET_NR_setresgid32
10785     case TARGET_NR_setresgid32:
10786         return get_errno(sys_setresgid(arg1, arg2, arg3));
10787 #endif
10788 #ifdef TARGET_NR_getresgid32
10789     case TARGET_NR_getresgid32:
10790         {
10791             gid_t rgid, egid, sgid;
10792             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10793             if (!is_error(ret)) {
10794                 if (put_user_u32(rgid, arg1)
10795                     || put_user_u32(egid, arg2)
10796                     || put_user_u32(sgid, arg3))
10797                     return -TARGET_EFAULT;
10798             }
10799         }
10800         return ret;
10801 #endif
10802 #ifdef TARGET_NR_chown32
10803     case TARGET_NR_chown32:
10804         if (!(p = lock_user_string(arg1)))
10805             return -TARGET_EFAULT;
10806         ret = get_errno(chown(p, arg2, arg3));
10807         unlock_user(p, arg1, 0);
10808         return ret;
10809 #endif
10810 #ifdef TARGET_NR_setuid32
10811     case TARGET_NR_setuid32:
10812         return get_errno(sys_setuid(arg1));
10813 #endif
10814 #ifdef TARGET_NR_setgid32
10815     case TARGET_NR_setgid32:
10816         return get_errno(sys_setgid(arg1));
10817 #endif
10818 #ifdef TARGET_NR_setfsuid32
10819     case TARGET_NR_setfsuid32:
10820         return get_errno(setfsuid(arg1));
10821 #endif
10822 #ifdef TARGET_NR_setfsgid32
10823     case TARGET_NR_setfsgid32:
10824         return get_errno(setfsgid(arg1));
10825 #endif
10826 #ifdef TARGET_NR_mincore
10827     case TARGET_NR_mincore:
10828         {
10829             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10830             if (!a) {
10831                 return -TARGET_ENOMEM;
10832             }
10833             p = lock_user_string(arg3);
10834             if (!p) {
10835                 ret = -TARGET_EFAULT;
10836             } else {
10837                 ret = get_errno(mincore(a, arg2, p));
10838                 unlock_user(p, arg3, ret);
10839             }
10840             unlock_user(a, arg1, 0);
10841         }
10842         return ret;
10843 #endif
10844 #ifdef TARGET_NR_arm_fadvise64_64
10845     case TARGET_NR_arm_fadvise64_64:
10846         /* arm_fadvise64_64 looks like fadvise64_64 but
10847          * with different argument order: fd, advice, offset, len
10848          * rather than the usual fd, offset, len, advice.
10849          * Note that offset and len are both 64-bit so appear as
10850          * pairs of 32-bit registers.
10851          */
10852         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10853                             target_offset64(arg5, arg6), arg2);
10854         return -host_to_target_errno(ret);
10855 #endif
10856 
10857 #if TARGET_ABI_BITS == 32
10858 
10859 #ifdef TARGET_NR_fadvise64_64
10860     case TARGET_NR_fadvise64_64:
10861 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10862         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10863         ret = arg2;
10864         arg2 = arg3;
10865         arg3 = arg4;
10866         arg4 = arg5;
10867         arg5 = arg6;
10868         arg6 = ret;
10869 #else
10870         /* 6 args: fd, offset (high, low), len (high, low), advice */
10871         if (regpairs_aligned(cpu_env, num)) {
10872             /* offset is in (3,4), len in (5,6) and advice in 7 */
10873             arg2 = arg3;
10874             arg3 = arg4;
10875             arg4 = arg5;
10876             arg5 = arg6;
10877             arg6 = arg7;
10878         }
10879 #endif
10880         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10881                             target_offset64(arg4, arg5), arg6);
10882         return -host_to_target_errno(ret);
10883 #endif
10884 
10885 #ifdef TARGET_NR_fadvise64
10886     case TARGET_NR_fadvise64:
10887         /* 5 args: fd, offset (high, low), len, advice */
10888         if (regpairs_aligned(cpu_env, num)) {
10889             /* offset is in (3,4), len in 5 and advice in 6 */
10890             arg2 = arg3;
10891             arg3 = arg4;
10892             arg4 = arg5;
10893             arg5 = arg6;
10894         }
10895         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10896         return -host_to_target_errno(ret);
10897 #endif
10898 
10899 #else /* not a 32-bit ABI */
10900 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10901 #ifdef TARGET_NR_fadvise64_64
10902     case TARGET_NR_fadvise64_64:
10903 #endif
10904 #ifdef TARGET_NR_fadvise64
10905     case TARGET_NR_fadvise64:
10906 #endif
10907 #ifdef TARGET_S390X
10908         switch (arg4) {
10909         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10910         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10911         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10912         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10913         default: break;
10914         }
10915 #endif
10916         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10917 #endif
10918 #endif /* end of 64-bit ABI fadvise handling */
10919 
10920 #ifdef TARGET_NR_madvise
10921     case TARGET_NR_madvise:
10922         /* A straight passthrough may not be safe because qemu sometimes
10923            turns private file-backed mappings into anonymous mappings.
10924            This will break MADV_DONTNEED.
10925            This is a hint, so ignoring and returning success is ok.  */
10926         return 0;
10927 #endif
10928 #if TARGET_ABI_BITS == 32
10929     case TARGET_NR_fcntl64:
10930     {
10931 	int cmd;
10932 	struct flock64 fl;
10933         from_flock64_fn *copyfrom = copy_from_user_flock64;
10934         to_flock64_fn *copyto = copy_to_user_flock64;
10935 
10936 #ifdef TARGET_ARM
10937         if (!((CPUARMState *)cpu_env)->eabi) {
10938             copyfrom = copy_from_user_oabi_flock64;
10939             copyto = copy_to_user_oabi_flock64;
10940         }
10941 #endif
10942 
10943 	cmd = target_to_host_fcntl_cmd(arg2);
10944         if (cmd == -TARGET_EINVAL) {
10945             return cmd;
10946         }
10947 
10948         switch(arg2) {
10949         case TARGET_F_GETLK64:
10950             ret = copyfrom(&fl, arg3);
10951             if (ret) {
10952                 break;
10953             }
10954             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10955             if (ret == 0) {
10956                 ret = copyto(arg3, &fl);
10957             }
10958 	    break;
10959 
10960         case TARGET_F_SETLK64:
10961         case TARGET_F_SETLKW64:
10962             ret = copyfrom(&fl, arg3);
10963             if (ret) {
10964                 break;
10965             }
10966             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10967 	    break;
10968         default:
10969             ret = do_fcntl(arg1, arg2, arg3);
10970             break;
10971         }
10972         return ret;
10973     }
10974 #endif
10975 #ifdef TARGET_NR_cacheflush
10976     case TARGET_NR_cacheflush:
10977         /* self-modifying code is handled automatically, so nothing needed */
10978         return 0;
10979 #endif
10980 #ifdef TARGET_NR_getpagesize
10981     case TARGET_NR_getpagesize:
10982         return TARGET_PAGE_SIZE;
10983 #endif
10984     case TARGET_NR_gettid:
10985         return get_errno(sys_gettid());
10986 #ifdef TARGET_NR_readahead
10987     case TARGET_NR_readahead:
10988 #if TARGET_ABI_BITS == 32
10989         if (regpairs_aligned(cpu_env, num)) {
10990             arg2 = arg3;
10991             arg3 = arg4;
10992             arg4 = arg5;
10993         }
10994         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10995 #else
10996         ret = get_errno(readahead(arg1, arg2, arg3));
10997 #endif
10998         return ret;
10999 #endif
11000 #ifdef CONFIG_ATTR
11001 #ifdef TARGET_NR_setxattr
11002     case TARGET_NR_listxattr:
11003     case TARGET_NR_llistxattr:
11004     {
11005         void *p, *b = 0;
11006         if (arg2) {
11007             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11008             if (!b) {
11009                 return -TARGET_EFAULT;
11010             }
11011         }
11012         p = lock_user_string(arg1);
11013         if (p) {
11014             if (num == TARGET_NR_listxattr) {
11015                 ret = get_errno(listxattr(p, b, arg3));
11016             } else {
11017                 ret = get_errno(llistxattr(p, b, arg3));
11018             }
11019         } else {
11020             ret = -TARGET_EFAULT;
11021         }
11022         unlock_user(p, arg1, 0);
11023         unlock_user(b, arg2, arg3);
11024         return ret;
11025     }
11026     case TARGET_NR_flistxattr:
11027     {
11028         void *b = 0;
11029         if (arg2) {
11030             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11031             if (!b) {
11032                 return -TARGET_EFAULT;
11033             }
11034         }
11035         ret = get_errno(flistxattr(arg1, b, arg3));
11036         unlock_user(b, arg2, arg3);
11037         return ret;
11038     }
11039     case TARGET_NR_setxattr:
11040     case TARGET_NR_lsetxattr:
11041         {
11042             void *p, *n, *v = 0;
11043             if (arg3) {
11044                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11045                 if (!v) {
11046                     return -TARGET_EFAULT;
11047                 }
11048             }
11049             p = lock_user_string(arg1);
11050             n = lock_user_string(arg2);
11051             if (p && n) {
11052                 if (num == TARGET_NR_setxattr) {
11053                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11054                 } else {
11055                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11056                 }
11057             } else {
11058                 ret = -TARGET_EFAULT;
11059             }
11060             unlock_user(p, arg1, 0);
11061             unlock_user(n, arg2, 0);
11062             unlock_user(v, arg3, 0);
11063         }
11064         return ret;
11065     case TARGET_NR_fsetxattr:
11066         {
11067             void *n, *v = 0;
11068             if (arg3) {
11069                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11070                 if (!v) {
11071                     return -TARGET_EFAULT;
11072                 }
11073             }
11074             n = lock_user_string(arg2);
11075             if (n) {
11076                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11077             } else {
11078                 ret = -TARGET_EFAULT;
11079             }
11080             unlock_user(n, arg2, 0);
11081             unlock_user(v, arg3, 0);
11082         }
11083         return ret;
11084     case TARGET_NR_getxattr:
11085     case TARGET_NR_lgetxattr:
11086         {
11087             void *p, *n, *v = 0;
11088             if (arg3) {
11089                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11090                 if (!v) {
11091                     return -TARGET_EFAULT;
11092                 }
11093             }
11094             p = lock_user_string(arg1);
11095             n = lock_user_string(arg2);
11096             if (p && n) {
11097                 if (num == TARGET_NR_getxattr) {
11098                     ret = get_errno(getxattr(p, n, v, arg4));
11099                 } else {
11100                     ret = get_errno(lgetxattr(p, n, v, arg4));
11101                 }
11102             } else {
11103                 ret = -TARGET_EFAULT;
11104             }
11105             unlock_user(p, arg1, 0);
11106             unlock_user(n, arg2, 0);
11107             unlock_user(v, arg3, arg4);
11108         }
11109         return ret;
11110     case TARGET_NR_fgetxattr:
11111         {
11112             void *n, *v = 0;
11113             if (arg3) {
11114                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11115                 if (!v) {
11116                     return -TARGET_EFAULT;
11117                 }
11118             }
11119             n = lock_user_string(arg2);
11120             if (n) {
11121                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11122             } else {
11123                 ret = -TARGET_EFAULT;
11124             }
11125             unlock_user(n, arg2, 0);
11126             unlock_user(v, arg3, arg4);
11127         }
11128         return ret;
11129     case TARGET_NR_removexattr:
11130     case TARGET_NR_lremovexattr:
11131         {
11132             void *p, *n;
11133             p = lock_user_string(arg1);
11134             n = lock_user_string(arg2);
11135             if (p && n) {
11136                 if (num == TARGET_NR_removexattr) {
11137                     ret = get_errno(removexattr(p, n));
11138                 } else {
11139                     ret = get_errno(lremovexattr(p, n));
11140                 }
11141             } else {
11142                 ret = -TARGET_EFAULT;
11143             }
11144             unlock_user(p, arg1, 0);
11145             unlock_user(n, arg2, 0);
11146         }
11147         return ret;
11148     case TARGET_NR_fremovexattr:
11149         {
11150             void *n;
11151             n = lock_user_string(arg2);
11152             if (n) {
11153                 ret = get_errno(fremovexattr(arg1, n));
11154             } else {
11155                 ret = -TARGET_EFAULT;
11156             }
11157             unlock_user(n, arg2, 0);
11158         }
11159         return ret;
11160 #endif
11161 #endif /* CONFIG_ATTR */
11162 #ifdef TARGET_NR_set_thread_area
11163     case TARGET_NR_set_thread_area:
11164 #if defined(TARGET_MIPS)
11165       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11166       return 0;
11167 #elif defined(TARGET_CRIS)
11168       if (arg1 & 0xff)
11169           ret = -TARGET_EINVAL;
11170       else {
11171           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11172           ret = 0;
11173       }
11174       return ret;
11175 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11176       return do_set_thread_area(cpu_env, arg1);
11177 #elif defined(TARGET_M68K)
11178       {
11179           TaskState *ts = cpu->opaque;
11180           ts->tp_value = arg1;
11181           return 0;
11182       }
11183 #else
11184       return -TARGET_ENOSYS;
11185 #endif
11186 #endif
11187 #ifdef TARGET_NR_get_thread_area
11188     case TARGET_NR_get_thread_area:
11189 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11190         return do_get_thread_area(cpu_env, arg1);
11191 #elif defined(TARGET_M68K)
11192         {
11193             TaskState *ts = cpu->opaque;
11194             return ts->tp_value;
11195         }
11196 #else
11197         return -TARGET_ENOSYS;
11198 #endif
11199 #endif
11200 #ifdef TARGET_NR_getdomainname
11201     case TARGET_NR_getdomainname:
11202         return -TARGET_ENOSYS;
11203 #endif
11204 
11205 #ifdef TARGET_NR_clock_settime
11206     case TARGET_NR_clock_settime:
11207     {
11208         struct timespec ts;
11209 
11210         ret = target_to_host_timespec(&ts, arg2);
11211         if (!is_error(ret)) {
11212             ret = get_errno(clock_settime(arg1, &ts));
11213         }
11214         return ret;
11215     }
11216 #endif
11217 #ifdef TARGET_NR_clock_gettime
11218     case TARGET_NR_clock_gettime:
11219     {
11220         struct timespec ts;
11221         ret = get_errno(clock_gettime(arg1, &ts));
11222         if (!is_error(ret)) {
11223             ret = host_to_target_timespec(arg2, &ts);
11224         }
11225         return ret;
11226     }
11227 #endif
11228 #ifdef TARGET_NR_clock_getres
11229     case TARGET_NR_clock_getres:
11230     {
11231         struct timespec ts;
11232         ret = get_errno(clock_getres(arg1, &ts));
11233         if (!is_error(ret)) {
11234             host_to_target_timespec(arg2, &ts);
11235         }
11236         return ret;
11237     }
11238 #endif
11239 #ifdef TARGET_NR_clock_nanosleep
11240     case TARGET_NR_clock_nanosleep:
11241     {
11242         struct timespec ts;
11243         target_to_host_timespec(&ts, arg3);
11244         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11245                                              &ts, arg4 ? &ts : NULL));
11246         if (arg4)
11247             host_to_target_timespec(arg4, &ts);
11248 
11249 #if defined(TARGET_PPC)
11250         /* clock_nanosleep is odd in that it returns positive errno values.
11251          * On PPC, CR0 bit 3 should be set in such a situation. */
11252         if (ret && ret != -TARGET_ERESTARTSYS) {
11253             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11254         }
11255 #endif
11256         return ret;
11257     }
11258 #endif
11259 
11260 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11261     case TARGET_NR_set_tid_address:
11262         return get_errno(set_tid_address((int *)g2h(arg1)));
11263 #endif
11264 
11265     case TARGET_NR_tkill:
11266         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11267 
11268     case TARGET_NR_tgkill:
11269         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11270                          target_to_host_signal(arg3)));
11271 
11272 #ifdef TARGET_NR_set_robust_list
11273     case TARGET_NR_set_robust_list:
11274     case TARGET_NR_get_robust_list:
11275         /* The ABI for supporting robust futexes has userspace pass
11276          * the kernel a pointer to a linked list which is updated by
11277          * userspace after the syscall; the list is walked by the kernel
11278          * when the thread exits. Since the linked list in QEMU guest
11279          * memory isn't a valid linked list for the host and we have
11280          * no way to reliably intercept the thread-death event, we can't
11281          * support these. Silently return ENOSYS so that guest userspace
11282          * falls back to a non-robust futex implementation (which should
11283          * be OK except in the corner case of the guest crashing while
11284          * holding a mutex that is shared with another process via
11285          * shared memory).
11286          */
11287         return -TARGET_ENOSYS;
11288 #endif
11289 
11290 #if defined(TARGET_NR_utimensat)
11291     case TARGET_NR_utimensat:
11292         {
11293             struct timespec *tsp, ts[2];
11294             if (!arg3) {
11295                 tsp = NULL;
11296             } else {
11297                 target_to_host_timespec(ts, arg3);
11298                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11299                 tsp = ts;
11300             }
11301             if (!arg2)
11302                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11303             else {
11304                 if (!(p = lock_user_string(arg2))) {
11305                     return -TARGET_EFAULT;
11306                 }
11307                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11308                 unlock_user(p, arg2, 0);
11309             }
11310         }
11311         return ret;
11312 #endif
11313     case TARGET_NR_futex:
11314         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11315 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11316     case TARGET_NR_inotify_init:
11317         ret = get_errno(sys_inotify_init());
11318         if (ret >= 0) {
11319             fd_trans_register(ret, &target_inotify_trans);
11320         }
11321         return ret;
11322 #endif
11323 #ifdef CONFIG_INOTIFY1
11324 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11325     case TARGET_NR_inotify_init1:
11326         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11327                                           fcntl_flags_tbl)));
11328         if (ret >= 0) {
11329             fd_trans_register(ret, &target_inotify_trans);
11330         }
11331         return ret;
11332 #endif
11333 #endif
11334 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11335     case TARGET_NR_inotify_add_watch:
11336         p = lock_user_string(arg2);
11337         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11338         unlock_user(p, arg2, 0);
11339         return ret;
11340 #endif
11341 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11342     case TARGET_NR_inotify_rm_watch:
11343         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11344 #endif
11345 
11346 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11347     case TARGET_NR_mq_open:
11348         {
11349             struct mq_attr posix_mq_attr;
11350             struct mq_attr *pposix_mq_attr;
11351             int host_flags;
11352 
11353             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11354             pposix_mq_attr = NULL;
11355             if (arg4) {
11356                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11357                     return -TARGET_EFAULT;
11358                 }
11359                 pposix_mq_attr = &posix_mq_attr;
11360             }
11361             p = lock_user_string(arg1 - 1);
11362             if (!p) {
11363                 return -TARGET_EFAULT;
11364             }
11365             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11366             unlock_user (p, arg1, 0);
11367         }
11368         return ret;
11369 
11370     case TARGET_NR_mq_unlink:
11371         p = lock_user_string(arg1 - 1);
11372         if (!p) {
11373             return -TARGET_EFAULT;
11374         }
11375         ret = get_errno(mq_unlink(p));
11376         unlock_user (p, arg1, 0);
11377         return ret;
11378 
11379     case TARGET_NR_mq_timedsend:
11380         {
11381             struct timespec ts;
11382 
11383             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11384             if (arg5 != 0) {
11385                 target_to_host_timespec(&ts, arg5);
11386                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11387                 host_to_target_timespec(arg5, &ts);
11388             } else {
11389                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11390             }
11391             unlock_user (p, arg2, arg3);
11392         }
11393         return ret;
11394 
11395     case TARGET_NR_mq_timedreceive:
11396         {
11397             struct timespec ts;
11398             unsigned int prio;
11399 
11400             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11401             if (arg5 != 0) {
11402                 target_to_host_timespec(&ts, arg5);
11403                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11404                                                      &prio, &ts));
11405                 host_to_target_timespec(arg5, &ts);
11406             } else {
11407                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11408                                                      &prio, NULL));
11409             }
11410             unlock_user (p, arg2, arg3);
11411             if (arg4 != 0)
11412                 put_user_u32(prio, arg4);
11413         }
11414         return ret;
11415 
11416     /* Not implemented for now... */
11417 /*     case TARGET_NR_mq_notify: */
11418 /*         break; */
11419 
11420     case TARGET_NR_mq_getsetattr:
11421         {
11422             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11423             ret = 0;
11424             if (arg2 != 0) {
11425                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11426                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11427                                            &posix_mq_attr_out));
11428             } else if (arg3 != 0) {
11429                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11430             }
11431             if (ret == 0 && arg3 != 0) {
11432                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11433             }
11434         }
11435         return ret;
11436 #endif
11437 
11438 #ifdef CONFIG_SPLICE
11439 #ifdef TARGET_NR_tee
11440     case TARGET_NR_tee:
11441         {
11442             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11443         }
11444         return ret;
11445 #endif
11446 #ifdef TARGET_NR_splice
11447     case TARGET_NR_splice:
11448         {
11449             loff_t loff_in, loff_out;
11450             loff_t *ploff_in = NULL, *ploff_out = NULL;
11451             if (arg2) {
11452                 if (get_user_u64(loff_in, arg2)) {
11453                     return -TARGET_EFAULT;
11454                 }
11455                 ploff_in = &loff_in;
11456             }
11457             if (arg4) {
11458                 if (get_user_u64(loff_out, arg4)) {
11459                     return -TARGET_EFAULT;
11460                 }
11461                 ploff_out = &loff_out;
11462             }
11463             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11464             if (arg2) {
11465                 if (put_user_u64(loff_in, arg2)) {
11466                     return -TARGET_EFAULT;
11467                 }
11468             }
11469             if (arg4) {
11470                 if (put_user_u64(loff_out, arg4)) {
11471                     return -TARGET_EFAULT;
11472                 }
11473             }
11474         }
11475         return ret;
11476 #endif
11477 #ifdef TARGET_NR_vmsplice
11478 	case TARGET_NR_vmsplice:
11479         {
11480             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11481             if (vec != NULL) {
11482                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11483                 unlock_iovec(vec, arg2, arg3, 0);
11484             } else {
11485                 ret = -host_to_target_errno(errno);
11486             }
11487         }
11488         return ret;
11489 #endif
11490 #endif /* CONFIG_SPLICE */
11491 #ifdef CONFIG_EVENTFD
11492 #if defined(TARGET_NR_eventfd)
11493     case TARGET_NR_eventfd:
11494         ret = get_errno(eventfd(arg1, 0));
11495         if (ret >= 0) {
11496             fd_trans_register(ret, &target_eventfd_trans);
11497         }
11498         return ret;
11499 #endif
11500 #if defined(TARGET_NR_eventfd2)
11501     case TARGET_NR_eventfd2:
11502     {
11503         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11504         if (arg2 & TARGET_O_NONBLOCK) {
11505             host_flags |= O_NONBLOCK;
11506         }
11507         if (arg2 & TARGET_O_CLOEXEC) {
11508             host_flags |= O_CLOEXEC;
11509         }
11510         ret = get_errno(eventfd(arg1, host_flags));
11511         if (ret >= 0) {
11512             fd_trans_register(ret, &target_eventfd_trans);
11513         }
11514         return ret;
11515     }
11516 #endif
11517 #endif /* CONFIG_EVENTFD  */
11518 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11519     case TARGET_NR_fallocate:
11520 #if TARGET_ABI_BITS == 32
11521         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11522                                   target_offset64(arg5, arg6)));
11523 #else
11524         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11525 #endif
11526         return ret;
11527 #endif
11528 #if defined(CONFIG_SYNC_FILE_RANGE)
11529 #if defined(TARGET_NR_sync_file_range)
11530     case TARGET_NR_sync_file_range:
11531 #if TARGET_ABI_BITS == 32
11532 #if defined(TARGET_MIPS)
11533         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11534                                         target_offset64(arg5, arg6), arg7));
11535 #else
11536         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11537                                         target_offset64(arg4, arg5), arg6));
11538 #endif /* !TARGET_MIPS */
11539 #else
11540         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11541 #endif
11542         return ret;
11543 #endif
11544 #if defined(TARGET_NR_sync_file_range2)
11545     case TARGET_NR_sync_file_range2:
11546         /* This is like sync_file_range but the arguments are reordered */
11547 #if TARGET_ABI_BITS == 32
11548         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11549                                         target_offset64(arg5, arg6), arg2));
11550 #else
11551         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11552 #endif
11553         return ret;
11554 #endif
11555 #endif
11556 #if defined(TARGET_NR_signalfd4)
11557     case TARGET_NR_signalfd4:
11558         return do_signalfd4(arg1, arg2, arg4);
11559 #endif
11560 #if defined(TARGET_NR_signalfd)
11561     case TARGET_NR_signalfd:
11562         return do_signalfd4(arg1, arg2, 0);
11563 #endif
11564 #if defined(CONFIG_EPOLL)
11565 #if defined(TARGET_NR_epoll_create)
11566     case TARGET_NR_epoll_create:
11567         return get_errno(epoll_create(arg1));
11568 #endif
11569 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11570     case TARGET_NR_epoll_create1:
11571         return get_errno(epoll_create1(arg1));
11572 #endif
11573 #if defined(TARGET_NR_epoll_ctl)
11574     case TARGET_NR_epoll_ctl:
11575     {
11576         struct epoll_event ep;
11577         struct epoll_event *epp = 0;
11578         if (arg4) {
11579             struct target_epoll_event *target_ep;
11580             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11581                 return -TARGET_EFAULT;
11582             }
11583             ep.events = tswap32(target_ep->events);
11584             /* The epoll_data_t union is just opaque data to the kernel,
11585              * so we transfer all 64 bits across and need not worry what
11586              * actual data type it is.
11587              */
11588             ep.data.u64 = tswap64(target_ep->data.u64);
11589             unlock_user_struct(target_ep, arg4, 0);
11590             epp = &ep;
11591         }
11592         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11593     }
11594 #endif
11595 
11596 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11597 #if defined(TARGET_NR_epoll_wait)
11598     case TARGET_NR_epoll_wait:
11599 #endif
11600 #if defined(TARGET_NR_epoll_pwait)
11601     case TARGET_NR_epoll_pwait:
11602 #endif
11603     {
11604         struct target_epoll_event *target_ep;
11605         struct epoll_event *ep;
11606         int epfd = arg1;
11607         int maxevents = arg3;
11608         int timeout = arg4;
11609 
11610         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11611             return -TARGET_EINVAL;
11612         }
11613 
11614         target_ep = lock_user(VERIFY_WRITE, arg2,
11615                               maxevents * sizeof(struct target_epoll_event), 1);
11616         if (!target_ep) {
11617             return -TARGET_EFAULT;
11618         }
11619 
11620         ep = g_try_new(struct epoll_event, maxevents);
11621         if (!ep) {
11622             unlock_user(target_ep, arg2, 0);
11623             return -TARGET_ENOMEM;
11624         }
11625 
11626         switch (num) {
11627 #if defined(TARGET_NR_epoll_pwait)
11628         case TARGET_NR_epoll_pwait:
11629         {
11630             target_sigset_t *target_set;
11631             sigset_t _set, *set = &_set;
11632 
11633             if (arg5) {
11634                 if (arg6 != sizeof(target_sigset_t)) {
11635                     ret = -TARGET_EINVAL;
11636                     break;
11637                 }
11638 
11639                 target_set = lock_user(VERIFY_READ, arg5,
11640                                        sizeof(target_sigset_t), 1);
11641                 if (!target_set) {
11642                     ret = -TARGET_EFAULT;
11643                     break;
11644                 }
11645                 target_to_host_sigset(set, target_set);
11646                 unlock_user(target_set, arg5, 0);
11647             } else {
11648                 set = NULL;
11649             }
11650 
11651             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11652                                              set, SIGSET_T_SIZE));
11653             break;
11654         }
11655 #endif
11656 #if defined(TARGET_NR_epoll_wait)
11657         case TARGET_NR_epoll_wait:
11658             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11659                                              NULL, 0));
11660             break;
11661 #endif
11662         default:
11663             ret = -TARGET_ENOSYS;
11664         }
11665         if (!is_error(ret)) {
11666             int i;
11667             for (i = 0; i < ret; i++) {
11668                 target_ep[i].events = tswap32(ep[i].events);
11669                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11670             }
11671             unlock_user(target_ep, arg2,
11672                         ret * sizeof(struct target_epoll_event));
11673         } else {
11674             unlock_user(target_ep, arg2, 0);
11675         }
11676         g_free(ep);
11677         return ret;
11678     }
11679 #endif
11680 #endif
11681 #ifdef TARGET_NR_prlimit64
11682     case TARGET_NR_prlimit64:
11683     {
11684         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11685         struct target_rlimit64 *target_rnew, *target_rold;
11686         struct host_rlimit64 rnew, rold, *rnewp = 0;
11687         int resource = target_to_host_resource(arg2);
11688         if (arg3) {
11689             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11690                 return -TARGET_EFAULT;
11691             }
11692             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11693             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11694             unlock_user_struct(target_rnew, arg3, 0);
11695             rnewp = &rnew;
11696         }
11697 
11698         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11699         if (!is_error(ret) && arg4) {
11700             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11701                 return -TARGET_EFAULT;
11702             }
11703             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11704             target_rold->rlim_max = tswap64(rold.rlim_max);
11705             unlock_user_struct(target_rold, arg4, 1);
11706         }
11707         return ret;
11708     }
11709 #endif
11710 #ifdef TARGET_NR_gethostname
11711     case TARGET_NR_gethostname:
11712     {
11713         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11714         if (name) {
11715             ret = get_errno(gethostname(name, arg2));
11716             unlock_user(name, arg1, arg2);
11717         } else {
11718             ret = -TARGET_EFAULT;
11719         }
11720         return ret;
11721     }
11722 #endif
11723 #ifdef TARGET_NR_atomic_cmpxchg_32
11724     case TARGET_NR_atomic_cmpxchg_32:
11725     {
11726         /* should use start_exclusive from main.c */
11727         abi_ulong mem_value;
11728         if (get_user_u32(mem_value, arg6)) {
11729             target_siginfo_t info;
11730             info.si_signo = SIGSEGV;
11731             info.si_errno = 0;
11732             info.si_code = TARGET_SEGV_MAPERR;
11733             info._sifields._sigfault._addr = arg6;
11734             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11735                          QEMU_SI_FAULT, &info);
11736             ret = 0xdeadbeef;
11737 
11738         }
11739         if (mem_value == arg2)
11740             put_user_u32(arg1, arg6);
11741         return mem_value;
11742     }
11743 #endif
11744 #ifdef TARGET_NR_atomic_barrier
11745     case TARGET_NR_atomic_barrier:
11746         /* Like the kernel implementation and the
11747            qemu arm barrier, no-op this? */
11748         return 0;
11749 #endif
11750 
11751 #ifdef TARGET_NR_timer_create
11752     case TARGET_NR_timer_create:
11753     {
11754         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11755 
11756         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11757 
11758         int clkid = arg1;
11759         int timer_index = next_free_host_timer();
11760 
11761         if (timer_index < 0) {
11762             ret = -TARGET_EAGAIN;
11763         } else {
11764             timer_t *phtimer = g_posix_timers  + timer_index;
11765 
11766             if (arg2) {
11767                 phost_sevp = &host_sevp;
11768                 ret = target_to_host_sigevent(phost_sevp, arg2);
11769                 if (ret != 0) {
11770                     return ret;
11771                 }
11772             }
11773 
11774             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11775             if (ret) {
11776                 phtimer = NULL;
11777             } else {
11778                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11779                     return -TARGET_EFAULT;
11780                 }
11781             }
11782         }
11783         return ret;
11784     }
11785 #endif
11786 
11787 #ifdef TARGET_NR_timer_settime
11788     case TARGET_NR_timer_settime:
11789     {
11790         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11791          * struct itimerspec * old_value */
11792         target_timer_t timerid = get_timer_id(arg1);
11793 
11794         if (timerid < 0) {
11795             ret = timerid;
11796         } else if (arg3 == 0) {
11797             ret = -TARGET_EINVAL;
11798         } else {
11799             timer_t htimer = g_posix_timers[timerid];
11800             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11801 
11802             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11803                 return -TARGET_EFAULT;
11804             }
11805             ret = get_errno(
11806                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11807             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11808                 return -TARGET_EFAULT;
11809             }
11810         }
11811         return ret;
11812     }
11813 #endif
11814 
11815 #ifdef TARGET_NR_timer_gettime
11816     case TARGET_NR_timer_gettime:
11817     {
11818         /* args: timer_t timerid, struct itimerspec *curr_value */
11819         target_timer_t timerid = get_timer_id(arg1);
11820 
11821         if (timerid < 0) {
11822             ret = timerid;
11823         } else if (!arg2) {
11824             ret = -TARGET_EFAULT;
11825         } else {
11826             timer_t htimer = g_posix_timers[timerid];
11827             struct itimerspec hspec;
11828             ret = get_errno(timer_gettime(htimer, &hspec));
11829 
11830             if (host_to_target_itimerspec(arg2, &hspec)) {
11831                 ret = -TARGET_EFAULT;
11832             }
11833         }
11834         return ret;
11835     }
11836 #endif
11837 
11838 #ifdef TARGET_NR_timer_getoverrun
11839     case TARGET_NR_timer_getoverrun:
11840     {
11841         /* args: timer_t timerid */
11842         target_timer_t timerid = get_timer_id(arg1);
11843 
11844         if (timerid < 0) {
11845             ret = timerid;
11846         } else {
11847             timer_t htimer = g_posix_timers[timerid];
11848             ret = get_errno(timer_getoverrun(htimer));
11849         }
11850         fd_trans_unregister(ret);
11851         return ret;
11852     }
11853 #endif
11854 
11855 #ifdef TARGET_NR_timer_delete
11856     case TARGET_NR_timer_delete:
11857     {
11858         /* args: timer_t timerid */
11859         target_timer_t timerid = get_timer_id(arg1);
11860 
11861         if (timerid < 0) {
11862             ret = timerid;
11863         } else {
11864             timer_t htimer = g_posix_timers[timerid];
11865             ret = get_errno(timer_delete(htimer));
11866             g_posix_timers[timerid] = 0;
11867         }
11868         return ret;
11869     }
11870 #endif
11871 
11872 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11873     case TARGET_NR_timerfd_create:
11874         return get_errno(timerfd_create(arg1,
11875                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11876 #endif
11877 
11878 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11879     case TARGET_NR_timerfd_gettime:
11880         {
11881             struct itimerspec its_curr;
11882 
11883             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11884 
11885             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11886                 return -TARGET_EFAULT;
11887             }
11888         }
11889         return ret;
11890 #endif
11891 
11892 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11893     case TARGET_NR_timerfd_settime:
11894         {
11895             struct itimerspec its_new, its_old, *p_new;
11896 
11897             if (arg3) {
11898                 if (target_to_host_itimerspec(&its_new, arg3)) {
11899                     return -TARGET_EFAULT;
11900                 }
11901                 p_new = &its_new;
11902             } else {
11903                 p_new = NULL;
11904             }
11905 
11906             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11907 
11908             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11909                 return -TARGET_EFAULT;
11910             }
11911         }
11912         return ret;
11913 #endif
11914 
11915 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11916     case TARGET_NR_ioprio_get:
11917         return get_errno(ioprio_get(arg1, arg2));
11918 #endif
11919 
11920 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11921     case TARGET_NR_ioprio_set:
11922         return get_errno(ioprio_set(arg1, arg2, arg3));
11923 #endif
11924 
11925 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11926     case TARGET_NR_setns:
11927         return get_errno(setns(arg1, arg2));
11928 #endif
11929 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11930     case TARGET_NR_unshare:
11931         return get_errno(unshare(arg1));
11932 #endif
11933 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11934     case TARGET_NR_kcmp:
11935         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11936 #endif
11937 #ifdef TARGET_NR_swapcontext
11938     case TARGET_NR_swapcontext:
11939         /* PowerPC specific.  */
11940         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11941 #endif
11942 
11943     default:
11944         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11945         return -TARGET_ENOSYS;
11946     }
11947     return ret;
11948 }
11949 
11950 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11951                     abi_long arg2, abi_long arg3, abi_long arg4,
11952                     abi_long arg5, abi_long arg6, abi_long arg7,
11953                     abi_long arg8)
11954 {
11955     CPUState *cpu = env_cpu(cpu_env);
11956     abi_long ret;
11957 
11958 #ifdef DEBUG_ERESTARTSYS
11959     /* Debug-only code for exercising the syscall-restart code paths
11960      * in the per-architecture cpu main loops: restart every syscall
11961      * the guest makes once before letting it through.
11962      */
11963     {
11964         static bool flag;
11965         flag = !flag;
11966         if (flag) {
11967             return -TARGET_ERESTARTSYS;
11968         }
11969     }
11970 #endif
11971 
11972     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11973                              arg5, arg6, arg7, arg8);
11974 
11975     if (unlikely(do_strace)) {
11976         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11977         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11978                           arg5, arg6, arg7, arg8);
11979         print_syscall_ret(num, ret);
11980     } else {
11981         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11982                           arg5, arg6, arg7, arg8);
11983     }
11984 
11985     trace_guest_user_syscall_ret(cpu, num, ret);
11986     return ret;
11987 }
11988