xref: /openbmc/qemu/linux-user/syscall.c (revision c6c8d102)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
116 #include "uname.h"
117 
118 #include "qemu.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
123 #include "tcg/tcg.h"
124 
125 #ifndef CLONE_IO
126 #define CLONE_IO                0x80000000      /* Clone io context */
127 #endif
128 
129 /* We can't directly call the host clone syscall, because this will
130  * badly confuse libc (breaking mutexes, for example). So we must
131  * divide clone flags into:
132  *  * flag combinations that look like pthread_create()
133  *  * flag combinations that look like fork()
134  *  * flags we can implement within QEMU itself
135  *  * flags we can't support and will return an error for
136  */
137 /* For thread creation, all these flags must be present; for
138  * fork, none must be present.
139  */
140 #define CLONE_THREAD_FLAGS                              \
141     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
142      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 
144 /* These flags are ignored:
145  * CLONE_DETACHED is now ignored by the kernel;
146  * CLONE_IO is just an optimisation hint to the I/O scheduler
147  */
148 #define CLONE_IGNORED_FLAGS                     \
149     (CLONE_DETACHED | CLONE_IO)
150 
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS               \
153     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
154      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 
161 #define CLONE_INVALID_FORK_FLAGS                                        \
162     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 
164 #define CLONE_INVALID_THREAD_FLAGS                                      \
165     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
166        CLONE_IGNORED_FLAGS))
167 
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169  * have almost all been allocated. We cannot support any of
170  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172  * The checks against the invalid thread masks above will catch these.
173  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
174  */
175 
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177  * once. This exercises the codepaths for restart.
178  */
179 //#define DEBUG_ERESTARTSYS
180 
181 //#include <linux/msdos_fs.h>
182 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
183 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
184 
185 #undef _syscall0
186 #undef _syscall1
187 #undef _syscall2
188 #undef _syscall3
189 #undef _syscall4
190 #undef _syscall5
191 #undef _syscall6
192 
193 #define _syscall0(type,name)		\
194 static type name (void)			\
195 {					\
196 	return syscall(__NR_##name);	\
197 }
198 
199 #define _syscall1(type,name,type1,arg1)		\
200 static type name (type1 arg1)			\
201 {						\
202 	return syscall(__NR_##name, arg1);	\
203 }
204 
205 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
206 static type name (type1 arg1,type2 arg2)		\
207 {							\
208 	return syscall(__NR_##name, arg1, arg2);	\
209 }
210 
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3)		\
213 {								\
214 	return syscall(__NR_##name, arg1, arg2, arg3);		\
215 }
216 
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
219 {										\
220 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
221 }
222 
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
224 		  type5,arg5)							\
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
228 }
229 
230 
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
232 		  type5,arg5,type6,arg6)					\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
234                   type6 arg6)							\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
237 }
238 
239 
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #define __NR_sys_futex __NR_futex
249 #define __NR_sys_inotify_init __NR_inotify_init
250 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
251 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
252 #define __NR_sys_statx __NR_statx
253 
254 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
255 #define __NR__llseek __NR_lseek
256 #endif
257 
258 /* Newer kernel ports have llseek() instead of _llseek() */
259 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
260 #define TARGET_NR__llseek TARGET_NR_llseek
261 #endif
262 
263 #define __NR_sys_gettid __NR_gettid
264 _syscall0(int, sys_gettid)
265 
266 /* For the 64-bit guest on 32-bit host case we must emulate
267  * getdents using getdents64, because otherwise the host
268  * might hand us back more dirent records than we can fit
269  * into the guest buffer after structure format conversion.
270  * Otherwise we emulate getdents with getdents if the host has it.
271  */
272 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
273 #define EMULATE_GETDENTS_WITH_GETDENTS
274 #endif
275 
276 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
277 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
278 #endif
279 #if (defined(TARGET_NR_getdents) && \
280       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
281     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
282 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
283 #endif
284 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
285 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
286           loff_t *, res, uint, wh);
287 #endif
288 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
289 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
290           siginfo_t *, uinfo)
291 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
292 #ifdef __NR_exit_group
293 _syscall1(int,exit_group,int,error_code)
294 #endif
295 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
296 _syscall1(int,set_tid_address,int *,tidptr)
297 #endif
298 #if defined(TARGET_NR_futex) && defined(__NR_futex)
299 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
300           const struct timespec *,timeout,int *,uaddr2,int,val3)
301 #endif
302 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
303 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
306 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
307           unsigned long *, user_mask_ptr);
308 #define __NR_sys_getcpu __NR_getcpu
309 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
310 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
311           void *, arg);
312 _syscall2(int, capget, struct __user_cap_header_struct *, header,
313           struct __user_cap_data_struct *, data);
314 _syscall2(int, capset, struct __user_cap_header_struct *, header,
315           struct __user_cap_data_struct *, data);
316 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
317 _syscall2(int, ioprio_get, int, which, int, who)
318 #endif
319 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
320 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
321 #endif
322 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
323 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
324 #endif
325 
326 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
327 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
328           unsigned long, idx1, unsigned long, idx2)
329 #endif
330 
331 /*
332  * It is assumed that struct statx is architecture independent.
333  */
334 #if defined(TARGET_NR_statx) && defined(__NR_statx)
335 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
336           unsigned int, mask, struct target_statx *, statxbuf)
337 #endif
338 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
339 _syscall2(int, membarrier, int, cmd, int, flags)
340 #endif
341 
342 static bitmask_transtbl fcntl_flags_tbl[] = {
343   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
344   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
345   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
346   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
347   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
348   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
349   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
350   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
351   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
352   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
353   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
354   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
355   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
356 #if defined(O_DIRECT)
357   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
358 #endif
359 #if defined(O_NOATIME)
360   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
361 #endif
362 #if defined(O_CLOEXEC)
363   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
364 #endif
365 #if defined(O_PATH)
366   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
367 #endif
368 #if defined(O_TMPFILE)
369   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
370 #endif
371   /* Don't terminate the list prematurely on 64-bit host+guest.  */
372 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
373   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
374 #endif
375   { 0, 0, 0, 0 }
376 };
377 
378 static int sys_getcwd1(char *buf, size_t size)
379 {
380   if (getcwd(buf, size) == NULL) {
381       /* getcwd() sets errno */
382       return (-1);
383   }
384   return strlen(buf)+1;
385 }
386 
387 #ifdef TARGET_NR_utimensat
388 #if defined(__NR_utimensat)
389 #define __NR_sys_utimensat __NR_utimensat
390 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
391           const struct timespec *,tsp,int,flags)
392 #else
393 static int sys_utimensat(int dirfd, const char *pathname,
394                          const struct timespec times[2], int flags)
395 {
396     errno = ENOSYS;
397     return -1;
398 }
399 #endif
400 #endif /* TARGET_NR_utimensat */
401 
402 #ifdef TARGET_NR_renameat2
403 #if defined(__NR_renameat2)
404 #define __NR_sys_renameat2 __NR_renameat2
405 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
406           const char *, new, unsigned int, flags)
407 #else
408 static int sys_renameat2(int oldfd, const char *old,
409                          int newfd, const char *new, int flags)
410 {
411     if (flags == 0) {
412         return renameat(oldfd, old, newfd, new);
413     }
414     errno = ENOSYS;
415     return -1;
416 }
417 #endif
418 #endif /* TARGET_NR_renameat2 */
419 
420 #ifdef CONFIG_INOTIFY
421 #include <sys/inotify.h>
422 
423 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
424 static int sys_inotify_init(void)
425 {
426   return (inotify_init());
427 }
428 #endif
429 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
430 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
431 {
432   return (inotify_add_watch(fd, pathname, mask));
433 }
434 #endif
435 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
436 static int sys_inotify_rm_watch(int fd, int32_t wd)
437 {
438   return (inotify_rm_watch(fd, wd));
439 }
440 #endif
441 #ifdef CONFIG_INOTIFY1
442 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
443 static int sys_inotify_init1(int flags)
444 {
445   return (inotify_init1(flags));
446 }
447 #endif
448 #endif
449 #else
450 /* Userspace can usually survive runtime without inotify */
451 #undef TARGET_NR_inotify_init
452 #undef TARGET_NR_inotify_init1
453 #undef TARGET_NR_inotify_add_watch
454 #undef TARGET_NR_inotify_rm_watch
455 #endif /* CONFIG_INOTIFY  */
456 
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
460 #endif
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64 {
464     uint64_t rlim_cur;
465     uint64_t rlim_max;
466 };
467 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
468           const struct host_rlimit64 *, new_limit,
469           struct host_rlimit64 *, old_limit)
470 #endif
471 
472 
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers[32] = { 0, } ;
476 
477 static inline int next_free_host_timer(void)
478 {
479     int k ;
480     /* FIXME: Does finding the next free slot require a lock? */
481     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
482         if (g_posix_timers[k] == 0) {
483             g_posix_timers[k] = (timer_t) 1;
484             return k;
485         }
486     }
487     return -1;
488 }
489 #endif
490 
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
492 #ifdef TARGET_ARM
493 static inline int regpairs_aligned(void *cpu_env, int num)
494 {
495     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
496 }
497 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
498 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
499 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
500 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
501  * of registers which translates to the same as ARM/MIPS, because we start with
502  * r3 as arg1 */
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #elif defined(TARGET_SH4)
505 /* SH4 doesn't align register pairs, except for p{read,write}64 */
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508     switch (num) {
509     case TARGET_NR_pread64:
510     case TARGET_NR_pwrite64:
511         return 1;
512 
513     default:
514         return 0;
515     }
516 }
517 #elif defined(TARGET_XTENSA)
518 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
519 #else
520 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
521 #endif
522 
523 #define ERRNO_TABLE_SIZE 1200
524 
525 /* target_to_host_errno_table[] is initialized from
526  * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
528 };
529 
530 /*
531  * This list is the union of errno values overridden in asm-<arch>/errno.h
532  * minus the errnos that are not actually generic to all archs.
533  */
534 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
535     [EAGAIN]		= TARGET_EAGAIN,
536     [EIDRM]		= TARGET_EIDRM,
537     [ECHRNG]		= TARGET_ECHRNG,
538     [EL2NSYNC]		= TARGET_EL2NSYNC,
539     [EL3HLT]		= TARGET_EL3HLT,
540     [EL3RST]		= TARGET_EL3RST,
541     [ELNRNG]		= TARGET_ELNRNG,
542     [EUNATCH]		= TARGET_EUNATCH,
543     [ENOCSI]		= TARGET_ENOCSI,
544     [EL2HLT]		= TARGET_EL2HLT,
545     [EDEADLK]		= TARGET_EDEADLK,
546     [ENOLCK]		= TARGET_ENOLCK,
547     [EBADE]		= TARGET_EBADE,
548     [EBADR]		= TARGET_EBADR,
549     [EXFULL]		= TARGET_EXFULL,
550     [ENOANO]		= TARGET_ENOANO,
551     [EBADRQC]		= TARGET_EBADRQC,
552     [EBADSLT]		= TARGET_EBADSLT,
553     [EBFONT]		= TARGET_EBFONT,
554     [ENOSTR]		= TARGET_ENOSTR,
555     [ENODATA]		= TARGET_ENODATA,
556     [ETIME]		= TARGET_ETIME,
557     [ENOSR]		= TARGET_ENOSR,
558     [ENONET]		= TARGET_ENONET,
559     [ENOPKG]		= TARGET_ENOPKG,
560     [EREMOTE]		= TARGET_EREMOTE,
561     [ENOLINK]		= TARGET_ENOLINK,
562     [EADV]		= TARGET_EADV,
563     [ESRMNT]		= TARGET_ESRMNT,
564     [ECOMM]		= TARGET_ECOMM,
565     [EPROTO]		= TARGET_EPROTO,
566     [EDOTDOT]		= TARGET_EDOTDOT,
567     [EMULTIHOP]		= TARGET_EMULTIHOP,
568     [EBADMSG]		= TARGET_EBADMSG,
569     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
570     [EOVERFLOW]		= TARGET_EOVERFLOW,
571     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
572     [EBADFD]		= TARGET_EBADFD,
573     [EREMCHG]		= TARGET_EREMCHG,
574     [ELIBACC]		= TARGET_ELIBACC,
575     [ELIBBAD]		= TARGET_ELIBBAD,
576     [ELIBSCN]		= TARGET_ELIBSCN,
577     [ELIBMAX]		= TARGET_ELIBMAX,
578     [ELIBEXEC]		= TARGET_ELIBEXEC,
579     [EILSEQ]		= TARGET_EILSEQ,
580     [ENOSYS]		= TARGET_ENOSYS,
581     [ELOOP]		= TARGET_ELOOP,
582     [ERESTART]		= TARGET_ERESTART,
583     [ESTRPIPE]		= TARGET_ESTRPIPE,
584     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
585     [EUSERS]		= TARGET_EUSERS,
586     [ENOTSOCK]		= TARGET_ENOTSOCK,
587     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
588     [EMSGSIZE]		= TARGET_EMSGSIZE,
589     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
590     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
591     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
592     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
593     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
594     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
595     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
596     [EADDRINUSE]	= TARGET_EADDRINUSE,
597     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
598     [ENETDOWN]		= TARGET_ENETDOWN,
599     [ENETUNREACH]	= TARGET_ENETUNREACH,
600     [ENETRESET]		= TARGET_ENETRESET,
601     [ECONNABORTED]	= TARGET_ECONNABORTED,
602     [ECONNRESET]	= TARGET_ECONNRESET,
603     [ENOBUFS]		= TARGET_ENOBUFS,
604     [EISCONN]		= TARGET_EISCONN,
605     [ENOTCONN]		= TARGET_ENOTCONN,
606     [EUCLEAN]		= TARGET_EUCLEAN,
607     [ENOTNAM]		= TARGET_ENOTNAM,
608     [ENAVAIL]		= TARGET_ENAVAIL,
609     [EISNAM]		= TARGET_EISNAM,
610     [EREMOTEIO]		= TARGET_EREMOTEIO,
611     [EDQUOT]            = TARGET_EDQUOT,
612     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
613     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
614     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
615     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
616     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
617     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
618     [EALREADY]		= TARGET_EALREADY,
619     [EINPROGRESS]	= TARGET_EINPROGRESS,
620     [ESTALE]		= TARGET_ESTALE,
621     [ECANCELED]		= TARGET_ECANCELED,
622     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
623     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
624 #ifdef ENOKEY
625     [ENOKEY]		= TARGET_ENOKEY,
626 #endif
627 #ifdef EKEYEXPIRED
628     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
629 #endif
630 #ifdef EKEYREVOKED
631     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
632 #endif
633 #ifdef EKEYREJECTED
634     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
635 #endif
636 #ifdef EOWNERDEAD
637     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
638 #endif
639 #ifdef ENOTRECOVERABLE
640     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
641 #endif
642 #ifdef ENOMSG
643     [ENOMSG]            = TARGET_ENOMSG,
644 #endif
645 #ifdef ERKFILL
646     [ERFKILL]           = TARGET_ERFKILL,
647 #endif
648 #ifdef EHWPOISON
649     [EHWPOISON]         = TARGET_EHWPOISON,
650 #endif
651 };
652 
653 static inline int host_to_target_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         host_to_target_errno_table[err]) {
657         return host_to_target_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline int target_to_host_errno(int err)
663 {
664     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
665         target_to_host_errno_table[err]) {
666         return target_to_host_errno_table[err];
667     }
668     return err;
669 }
670 
671 static inline abi_long get_errno(abi_long ret)
672 {
673     if (ret == -1)
674         return -host_to_target_errno(errno);
675     else
676         return ret;
677 }
678 
679 const char *target_strerror(int err)
680 {
681     if (err == TARGET_ERESTARTSYS) {
682         return "To be restarted";
683     }
684     if (err == TARGET_QEMU_ESIGRETURN) {
685         return "Successful exit from sigreturn";
686     }
687 
688     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
689         return NULL;
690     }
691     return strerror(target_to_host_errno(err));
692 }
693 
694 #define safe_syscall0(type, name) \
695 static type safe_##name(void) \
696 { \
697     return safe_syscall(__NR_##name); \
698 }
699 
700 #define safe_syscall1(type, name, type1, arg1) \
701 static type safe_##name(type1 arg1) \
702 { \
703     return safe_syscall(__NR_##name, arg1); \
704 }
705 
706 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
707 static type safe_##name(type1 arg1, type2 arg2) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2); \
710 }
711 
712 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
716 }
717 
718 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
723 }
724 
725 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
726     type4, arg4, type5, arg5) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
728     type5 arg5) \
729 { \
730     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
731 }
732 
733 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
734     type4, arg4, type5, arg5, type6, arg6) \
735 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
736     type5 arg5, type6 arg6) \
737 { \
738     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
739 }
740 
741 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
742 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
743 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
744               int, flags, mode_t, mode)
745 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
746 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
747               struct rusage *, rusage)
748 #endif
749 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
750               int, options, struct rusage *, rusage)
751 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
752 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
753     defined(TARGET_NR_pselect6)
754 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
755               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
756 #endif
757 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
758 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
759               struct timespec *, tsp, const sigset_t *, sigmask,
760               size_t, sigsetsize)
761 #endif
762 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
763               int, maxevents, int, timeout, const sigset_t *, sigmask,
764               size_t, sigsetsize)
765 #ifdef TARGET_NR_futex
766 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
767               const struct timespec *,timeout,int *,uaddr2,int,val3)
768 #endif
769 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
770 safe_syscall2(int, kill, pid_t, pid, int, sig)
771 safe_syscall2(int, tkill, int, tid, int, sig)
772 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
773 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
774 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
775 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
776               unsigned long, pos_l, unsigned long, pos_h)
777 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
778               unsigned long, pos_l, unsigned long, pos_h)
779 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
780               socklen_t, addrlen)
781 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
782               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
783 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
784               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
785 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
786 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
787 safe_syscall2(int, flock, int, fd, int, operation)
788 #ifdef TARGET_NR_rt_sigtimedwait
789 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
790               const struct timespec *, uts, size_t, sigsetsize)
791 #endif
792 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
793               int, flags)
794 #if defined(TARGET_NR_nanosleep)
795 safe_syscall2(int, nanosleep, const struct timespec *, req,
796               struct timespec *, rem)
797 #endif
798 #ifdef TARGET_NR_clock_nanosleep
799 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
800               const struct timespec *, req, struct timespec *, rem)
801 #endif
802 #ifdef __NR_ipc
803 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
804               void *, ptr, long, fifth)
805 #endif
806 #ifdef __NR_msgsnd
807 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
808               int, flags)
809 #endif
810 #ifdef __NR_msgrcv
811 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
812               long, msgtype, int, flags)
813 #endif
814 #ifdef __NR_semtimedop
815 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
816               unsigned, nsops, const struct timespec *, timeout)
817 #endif
818 #ifdef TARGET_NR_mq_timedsend
819 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
820               size_t, len, unsigned, prio, const struct timespec *, timeout)
821 #endif
822 #ifdef TARGET_NR_mq_timedreceive
823 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
824               size_t, len, unsigned *, prio, const struct timespec *, timeout)
825 #endif
826 /* We do ioctl like this rather than via safe_syscall3 to preserve the
827  * "third argument might be integer or pointer or not present" behaviour of
828  * the libc function.
829  */
830 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
831 /* Similarly for fcntl. Note that callers must always:
832  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
833  *  use the flock64 struct rather than unsuffixed flock
834  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
835  */
836 #ifdef __NR_fcntl64
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
838 #else
839 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
840 #endif
841 
842 static inline int host_to_target_sock_type(int host_type)
843 {
844     int target_type;
845 
846     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
847     case SOCK_DGRAM:
848         target_type = TARGET_SOCK_DGRAM;
849         break;
850     case SOCK_STREAM:
851         target_type = TARGET_SOCK_STREAM;
852         break;
853     default:
854         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
855         break;
856     }
857 
858 #if defined(SOCK_CLOEXEC)
859     if (host_type & SOCK_CLOEXEC) {
860         target_type |= TARGET_SOCK_CLOEXEC;
861     }
862 #endif
863 
864 #if defined(SOCK_NONBLOCK)
865     if (host_type & SOCK_NONBLOCK) {
866         target_type |= TARGET_SOCK_NONBLOCK;
867     }
868 #endif
869 
870     return target_type;
871 }
872 
873 static abi_ulong target_brk;
874 static abi_ulong target_original_brk;
875 static abi_ulong brk_page;
876 
877 void target_set_brk(abi_ulong new_brk)
878 {
879     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
880     brk_page = HOST_PAGE_ALIGN(target_brk);
881 }
882 
883 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
884 #define DEBUGF_BRK(message, args...)
885 
886 /* do_brk() must return target values and target errnos. */
887 abi_long do_brk(abi_ulong new_brk)
888 {
889     abi_long mapped_addr;
890     abi_ulong new_alloc_size;
891 
892     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
893 
894     if (!new_brk) {
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
896         return target_brk;
897     }
898     if (new_brk < target_original_brk) {
899         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
900                    target_brk);
901         return target_brk;
902     }
903 
904     /* If the new brk is less than the highest page reserved to the
905      * target heap allocation, set it and we're almost done...  */
906     if (new_brk <= brk_page) {
907         /* Heap contents are initialized to zero, as for anonymous
908          * mapped pages.  */
909         if (new_brk > target_brk) {
910             memset(g2h(target_brk), 0, new_brk - target_brk);
911         }
912 	target_brk = new_brk;
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
914 	return target_brk;
915     }
916 
917     /* We need to allocate more memory after the brk... Note that
918      * we don't use MAP_FIXED because that will map over the top of
919      * any existing mapping (like the one with the host libc or qemu
920      * itself); instead we treat "mapped but at wrong address" as
921      * a failure and unmap again.
922      */
923     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
924     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
925                                         PROT_READ|PROT_WRITE,
926                                         MAP_ANON|MAP_PRIVATE, 0, 0));
927 
928     if (mapped_addr == brk_page) {
929         /* Heap contents are initialized to zero, as for anonymous
930          * mapped pages.  Technically the new pages are already
931          * initialized to zero since they *are* anonymous mapped
932          * pages, however we have to take care with the contents that
933          * come from the remaining part of the previous page: it may
934          * contains garbage data due to a previous heap usage (grown
935          * then shrunken).  */
936         memset(g2h(target_brk), 0, brk_page - target_brk);
937 
938         target_brk = new_brk;
939         brk_page = HOST_PAGE_ALIGN(target_brk);
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
941             target_brk);
942         return target_brk;
943     } else if (mapped_addr != -1) {
944         /* Mapped but at wrong address, meaning there wasn't actually
945          * enough space for this brk.
946          */
947         target_munmap(mapped_addr, new_alloc_size);
948         mapped_addr = -1;
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
950     }
951     else {
952         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
953     }
954 
955 #if defined(TARGET_ALPHA)
956     /* We (partially) emulate OSF/1 on Alpha, which requires we
957        return a proper errno, not an unchanged brk value.  */
958     return -TARGET_ENOMEM;
959 #endif
960     /* For everything else, return the previous break. */
961     return target_brk;
962 }
963 
964 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
965     defined(TARGET_NR_pselect6)
966 static inline abi_long copy_from_user_fdset(fd_set *fds,
967                                             abi_ulong target_fds_addr,
968                                             int n)
969 {
970     int i, nw, j, k;
971     abi_ulong b, *target_fds;
972 
973     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
974     if (!(target_fds = lock_user(VERIFY_READ,
975                                  target_fds_addr,
976                                  sizeof(abi_ulong) * nw,
977                                  1)))
978         return -TARGET_EFAULT;
979 
980     FD_ZERO(fds);
981     k = 0;
982     for (i = 0; i < nw; i++) {
983         /* grab the abi_ulong */
984         __get_user(b, &target_fds[i]);
985         for (j = 0; j < TARGET_ABI_BITS; j++) {
986             /* check the bit inside the abi_ulong */
987             if ((b >> j) & 1)
988                 FD_SET(k, fds);
989             k++;
990         }
991     }
992 
993     unlock_user(target_fds, target_fds_addr, 0);
994 
995     return 0;
996 }
997 
998 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
999                                                  abi_ulong target_fds_addr,
1000                                                  int n)
1001 {
1002     if (target_fds_addr) {
1003         if (copy_from_user_fdset(fds, target_fds_addr, n))
1004             return -TARGET_EFAULT;
1005         *fds_ptr = fds;
1006     } else {
1007         *fds_ptr = NULL;
1008     }
1009     return 0;
1010 }
1011 
1012 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1013                                           const fd_set *fds,
1014                                           int n)
1015 {
1016     int i, nw, j, k;
1017     abi_long v;
1018     abi_ulong *target_fds;
1019 
1020     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1021     if (!(target_fds = lock_user(VERIFY_WRITE,
1022                                  target_fds_addr,
1023                                  sizeof(abi_ulong) * nw,
1024                                  0)))
1025         return -TARGET_EFAULT;
1026 
1027     k = 0;
1028     for (i = 0; i < nw; i++) {
1029         v = 0;
1030         for (j = 0; j < TARGET_ABI_BITS; j++) {
1031             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1032             k++;
1033         }
1034         __put_user(v, &target_fds[i]);
1035     }
1036 
1037     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1038 
1039     return 0;
1040 }
1041 #endif
1042 
1043 #if defined(__alpha__)
1044 #define HOST_HZ 1024
1045 #else
1046 #define HOST_HZ 100
1047 #endif
1048 
1049 static inline abi_long host_to_target_clock_t(long ticks)
1050 {
1051 #if HOST_HZ == TARGET_HZ
1052     return ticks;
1053 #else
1054     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1055 #endif
1056 }
1057 
1058 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1059                                              const struct rusage *rusage)
1060 {
1061     struct target_rusage *target_rusage;
1062 
1063     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1064         return -TARGET_EFAULT;
1065     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1066     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1067     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1068     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1069     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1070     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1071     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1072     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1073     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1074     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1075     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1076     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1077     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1078     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1079     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1080     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1081     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1082     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1083     unlock_user_struct(target_rusage, target_addr, 1);
1084 
1085     return 0;
1086 }
1087 
1088 #ifdef TARGET_NR_setrlimit
1089 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1090 {
1091     abi_ulong target_rlim_swap;
1092     rlim_t result;
1093 
1094     target_rlim_swap = tswapal(target_rlim);
1095     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1096         return RLIM_INFINITY;
1097 
1098     result = target_rlim_swap;
1099     if (target_rlim_swap != (rlim_t)result)
1100         return RLIM_INFINITY;
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1107 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1108 {
1109     abi_ulong target_rlim_swap;
1110     abi_ulong result;
1111 
1112     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1113         target_rlim_swap = TARGET_RLIM_INFINITY;
1114     else
1115         target_rlim_swap = rlim;
1116     result = tswapal(target_rlim_swap);
1117 
1118     return result;
1119 }
1120 #endif
1121 
1122 static inline int target_to_host_resource(int code)
1123 {
1124     switch (code) {
1125     case TARGET_RLIMIT_AS:
1126         return RLIMIT_AS;
1127     case TARGET_RLIMIT_CORE:
1128         return RLIMIT_CORE;
1129     case TARGET_RLIMIT_CPU:
1130         return RLIMIT_CPU;
1131     case TARGET_RLIMIT_DATA:
1132         return RLIMIT_DATA;
1133     case TARGET_RLIMIT_FSIZE:
1134         return RLIMIT_FSIZE;
1135     case TARGET_RLIMIT_LOCKS:
1136         return RLIMIT_LOCKS;
1137     case TARGET_RLIMIT_MEMLOCK:
1138         return RLIMIT_MEMLOCK;
1139     case TARGET_RLIMIT_MSGQUEUE:
1140         return RLIMIT_MSGQUEUE;
1141     case TARGET_RLIMIT_NICE:
1142         return RLIMIT_NICE;
1143     case TARGET_RLIMIT_NOFILE:
1144         return RLIMIT_NOFILE;
1145     case TARGET_RLIMIT_NPROC:
1146         return RLIMIT_NPROC;
1147     case TARGET_RLIMIT_RSS:
1148         return RLIMIT_RSS;
1149     case TARGET_RLIMIT_RTPRIO:
1150         return RLIMIT_RTPRIO;
1151     case TARGET_RLIMIT_SIGPENDING:
1152         return RLIMIT_SIGPENDING;
1153     case TARGET_RLIMIT_STACK:
1154         return RLIMIT_STACK;
1155     default:
1156         return code;
1157     }
1158 }
1159 
1160 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1161                                               abi_ulong target_tv_addr)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __get_user(tv->tv_sec, &target_tv->tv_sec);
1170     __get_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 0);
1173 
1174     return 0;
1175 }
1176 
1177 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1178                                             const struct timeval *tv)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __put_user(tv->tv_sec, &target_tv->tv_sec);
1187     __put_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 1);
1190 
1191     return 0;
1192 }
1193 
1194 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1195                                              const struct timeval *tv)
1196 {
1197     struct target__kernel_sock_timeval *target_tv;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tv->tv_sec, &target_tv->tv_sec);
1204     __put_user(tv->tv_usec, &target_tv->tv_usec);
1205 
1206     unlock_user_struct(target_tv, target_tv_addr, 1);
1207 
1208     return 0;
1209 }
1210 
1211 #if defined(TARGET_NR_futex) || \
1212     defined(TARGET_NR_rt_sigtimedwait) || \
1213     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1214     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1215     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1216     defined(TARGET_NR_mq_timedreceive)
1217 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1218                                                abi_ulong target_addr)
1219 {
1220     struct target_timespec *target_ts;
1221 
1222     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1226     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1227     unlock_user_struct(target_ts, target_addr, 0);
1228     return 0;
1229 }
1230 #endif
1231 
1232 #if defined(TARGET_NR_clock_settime64)
1233 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1234                                                  abi_ulong target_addr)
1235 {
1236     struct target__kernel_timespec *target_ts;
1237 
1238     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1239         return -TARGET_EFAULT;
1240     }
1241     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1242     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243     unlock_user_struct(target_ts, target_addr, 0);
1244     return 0;
1245 }
1246 #endif
1247 
1248 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1249                                                struct timespec *host_ts)
1250 {
1251     struct target_timespec *target_ts;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1254         return -TARGET_EFAULT;
1255     }
1256     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1257     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1258     unlock_user_struct(target_ts, target_addr, 1);
1259     return 0;
1260 }
1261 
1262 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1263                                                  struct timespec *host_ts)
1264 {
1265     struct target__kernel_timespec *target_ts;
1266 
1267     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1268         return -TARGET_EFAULT;
1269     }
1270     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1271     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1272     unlock_user_struct(target_ts, target_addr, 1);
1273     return 0;
1274 }
1275 
1276 #if defined(TARGET_NR_settimeofday)
1277 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1278                                                abi_ulong target_tz_addr)
1279 {
1280     struct target_timezone *target_tz;
1281 
1282     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1283         return -TARGET_EFAULT;
1284     }
1285 
1286     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1287     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1288 
1289     unlock_user_struct(target_tz, target_tz_addr, 0);
1290 
1291     return 0;
1292 }
1293 #endif
1294 
1295 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1296 #include <mqueue.h>
1297 
1298 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1299                                               abi_ulong target_mq_attr_addr)
1300 {
1301     struct target_mq_attr *target_mq_attr;
1302 
1303     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1304                           target_mq_attr_addr, 1))
1305         return -TARGET_EFAULT;
1306 
1307     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1308     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1309     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1310     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1311 
1312     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1313 
1314     return 0;
1315 }
1316 
1317 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1318                                             const struct mq_attr *attr)
1319 {
1320     struct target_mq_attr *target_mq_attr;
1321 
1322     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1323                           target_mq_attr_addr, 0))
1324         return -TARGET_EFAULT;
1325 
1326     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1327     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1328     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1329     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1330 
1331     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1332 
1333     return 0;
1334 }
1335 #endif
1336 
1337 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1338 /* do_select() must return target values and target errnos. */
1339 static abi_long do_select(int n,
1340                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1341                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1342 {
1343     fd_set rfds, wfds, efds;
1344     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1345     struct timeval tv;
1346     struct timespec ts, *ts_ptr;
1347     abi_long ret;
1348 
1349     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1350     if (ret) {
1351         return ret;
1352     }
1353     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1354     if (ret) {
1355         return ret;
1356     }
1357     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1358     if (ret) {
1359         return ret;
1360     }
1361 
1362     if (target_tv_addr) {
1363         if (copy_from_user_timeval(&tv, target_tv_addr))
1364             return -TARGET_EFAULT;
1365         ts.tv_sec = tv.tv_sec;
1366         ts.tv_nsec = tv.tv_usec * 1000;
1367         ts_ptr = &ts;
1368     } else {
1369         ts_ptr = NULL;
1370     }
1371 
1372     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1373                                   ts_ptr, NULL));
1374 
1375     if (!is_error(ret)) {
1376         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1377             return -TARGET_EFAULT;
1378         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1379             return -TARGET_EFAULT;
1380         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1381             return -TARGET_EFAULT;
1382 
1383         if (target_tv_addr) {
1384             tv.tv_sec = ts.tv_sec;
1385             tv.tv_usec = ts.tv_nsec / 1000;
1386             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1387                 return -TARGET_EFAULT;
1388             }
1389         }
1390     }
1391 
1392     return ret;
1393 }
1394 
1395 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1396 static abi_long do_old_select(abi_ulong arg1)
1397 {
1398     struct target_sel_arg_struct *sel;
1399     abi_ulong inp, outp, exp, tvp;
1400     long nsel;
1401 
1402     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1403         return -TARGET_EFAULT;
1404     }
1405 
1406     nsel = tswapal(sel->n);
1407     inp = tswapal(sel->inp);
1408     outp = tswapal(sel->outp);
1409     exp = tswapal(sel->exp);
1410     tvp = tswapal(sel->tvp);
1411 
1412     unlock_user_struct(sel, arg1, 0);
1413 
1414     return do_select(nsel, inp, outp, exp, tvp);
1415 }
1416 #endif
1417 #endif
1418 
1419 static abi_long do_pipe2(int host_pipe[], int flags)
1420 {
1421 #ifdef CONFIG_PIPE2
1422     return pipe2(host_pipe, flags);
1423 #else
1424     return -ENOSYS;
1425 #endif
1426 }
1427 
1428 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1429                         int flags, int is_pipe2)
1430 {
1431     int host_pipe[2];
1432     abi_long ret;
1433     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1434 
1435     if (is_error(ret))
1436         return get_errno(ret);
1437 
1438     /* Several targets have special calling conventions for the original
1439        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1440     if (!is_pipe2) {
1441 #if defined(TARGET_ALPHA)
1442         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1443         return host_pipe[0];
1444 #elif defined(TARGET_MIPS)
1445         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1446         return host_pipe[0];
1447 #elif defined(TARGET_SH4)
1448         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1449         return host_pipe[0];
1450 #elif defined(TARGET_SPARC)
1451         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1452         return host_pipe[0];
1453 #endif
1454     }
1455 
1456     if (put_user_s32(host_pipe[0], pipedes)
1457         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1458         return -TARGET_EFAULT;
1459     return get_errno(ret);
1460 }
1461 
1462 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1463                                               abi_ulong target_addr,
1464                                               socklen_t len)
1465 {
1466     struct target_ip_mreqn *target_smreqn;
1467 
1468     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1469     if (!target_smreqn)
1470         return -TARGET_EFAULT;
1471     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1472     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1473     if (len == sizeof(struct target_ip_mreqn))
1474         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1475     unlock_user(target_smreqn, target_addr, 0);
1476 
1477     return 0;
1478 }
1479 
1480 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1481                                                abi_ulong target_addr,
1482                                                socklen_t len)
1483 {
1484     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1485     sa_family_t sa_family;
1486     struct target_sockaddr *target_saddr;
1487 
1488     if (fd_trans_target_to_host_addr(fd)) {
1489         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1490     }
1491 
1492     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1493     if (!target_saddr)
1494         return -TARGET_EFAULT;
1495 
1496     sa_family = tswap16(target_saddr->sa_family);
1497 
1498     /* Oops. The caller might send a incomplete sun_path; sun_path
1499      * must be terminated by \0 (see the manual page), but
1500      * unfortunately it is quite common to specify sockaddr_un
1501      * length as "strlen(x->sun_path)" while it should be
1502      * "strlen(...) + 1". We'll fix that here if needed.
1503      * Linux kernel has a similar feature.
1504      */
1505 
1506     if (sa_family == AF_UNIX) {
1507         if (len < unix_maxlen && len > 0) {
1508             char *cp = (char*)target_saddr;
1509 
1510             if ( cp[len-1] && !cp[len] )
1511                 len++;
1512         }
1513         if (len > unix_maxlen)
1514             len = unix_maxlen;
1515     }
1516 
1517     memcpy(addr, target_saddr, len);
1518     addr->sa_family = sa_family;
1519     if (sa_family == AF_NETLINK) {
1520         struct sockaddr_nl *nladdr;
1521 
1522         nladdr = (struct sockaddr_nl *)addr;
1523         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1524         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1525     } else if (sa_family == AF_PACKET) {
1526 	struct target_sockaddr_ll *lladdr;
1527 
1528 	lladdr = (struct target_sockaddr_ll *)addr;
1529 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1530 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1531     }
1532     unlock_user(target_saddr, target_addr, 0);
1533 
1534     return 0;
1535 }
1536 
1537 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1538                                                struct sockaddr *addr,
1539                                                socklen_t len)
1540 {
1541     struct target_sockaddr *target_saddr;
1542 
1543     if (len == 0) {
1544         return 0;
1545     }
1546     assert(addr);
1547 
1548     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1549     if (!target_saddr)
1550         return -TARGET_EFAULT;
1551     memcpy(target_saddr, addr, len);
1552     if (len >= offsetof(struct target_sockaddr, sa_family) +
1553         sizeof(target_saddr->sa_family)) {
1554         target_saddr->sa_family = tswap16(addr->sa_family);
1555     }
1556     if (addr->sa_family == AF_NETLINK &&
1557         len >= sizeof(struct target_sockaddr_nl)) {
1558         struct target_sockaddr_nl *target_nl =
1559                (struct target_sockaddr_nl *)target_saddr;
1560         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1561         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1562     } else if (addr->sa_family == AF_PACKET) {
1563         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1564         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1565         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1566     } else if (addr->sa_family == AF_INET6 &&
1567                len >= sizeof(struct target_sockaddr_in6)) {
1568         struct target_sockaddr_in6 *target_in6 =
1569                (struct target_sockaddr_in6 *)target_saddr;
1570         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1571     }
1572     unlock_user(target_saddr, target_addr, len);
1573 
1574     return 0;
1575 }
1576 
1577 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1578                                            struct target_msghdr *target_msgh)
1579 {
1580     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1581     abi_long msg_controllen;
1582     abi_ulong target_cmsg_addr;
1583     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1584     socklen_t space = 0;
1585 
1586     msg_controllen = tswapal(target_msgh->msg_controllen);
1587     if (msg_controllen < sizeof (struct target_cmsghdr))
1588         goto the_end;
1589     target_cmsg_addr = tswapal(target_msgh->msg_control);
1590     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1591     target_cmsg_start = target_cmsg;
1592     if (!target_cmsg)
1593         return -TARGET_EFAULT;
1594 
1595     while (cmsg && target_cmsg) {
1596         void *data = CMSG_DATA(cmsg);
1597         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1598 
1599         int len = tswapal(target_cmsg->cmsg_len)
1600             - sizeof(struct target_cmsghdr);
1601 
1602         space += CMSG_SPACE(len);
1603         if (space > msgh->msg_controllen) {
1604             space -= CMSG_SPACE(len);
1605             /* This is a QEMU bug, since we allocated the payload
1606              * area ourselves (unlike overflow in host-to-target
1607              * conversion, which is just the guest giving us a buffer
1608              * that's too small). It can't happen for the payload types
1609              * we currently support; if it becomes an issue in future
1610              * we would need to improve our allocation strategy to
1611              * something more intelligent than "twice the size of the
1612              * target buffer we're reading from".
1613              */
1614             qemu_log_mask(LOG_UNIMP,
1615                           ("Unsupported ancillary data %d/%d: "
1616                            "unhandled msg size\n"),
1617                           tswap32(target_cmsg->cmsg_level),
1618                           tswap32(target_cmsg->cmsg_type));
1619             break;
1620         }
1621 
1622         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1623             cmsg->cmsg_level = SOL_SOCKET;
1624         } else {
1625             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1626         }
1627         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1628         cmsg->cmsg_len = CMSG_LEN(len);
1629 
1630         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1631             int *fd = (int *)data;
1632             int *target_fd = (int *)target_data;
1633             int i, numfds = len / sizeof(int);
1634 
1635             for (i = 0; i < numfds; i++) {
1636                 __get_user(fd[i], target_fd + i);
1637             }
1638         } else if (cmsg->cmsg_level == SOL_SOCKET
1639                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1640             struct ucred *cred = (struct ucred *)data;
1641             struct target_ucred *target_cred =
1642                 (struct target_ucred *)target_data;
1643 
1644             __get_user(cred->pid, &target_cred->pid);
1645             __get_user(cred->uid, &target_cred->uid);
1646             __get_user(cred->gid, &target_cred->gid);
1647         } else {
1648             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1649                           cmsg->cmsg_level, cmsg->cmsg_type);
1650             memcpy(data, target_data, len);
1651         }
1652 
1653         cmsg = CMSG_NXTHDR(msgh, cmsg);
1654         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1655                                          target_cmsg_start);
1656     }
1657     unlock_user(target_cmsg, target_cmsg_addr, 0);
1658  the_end:
1659     msgh->msg_controllen = space;
1660     return 0;
1661 }
1662 
1663 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1664                                            struct msghdr *msgh)
1665 {
1666     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1667     abi_long msg_controllen;
1668     abi_ulong target_cmsg_addr;
1669     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1670     socklen_t space = 0;
1671 
1672     msg_controllen = tswapal(target_msgh->msg_controllen);
1673     if (msg_controllen < sizeof (struct target_cmsghdr))
1674         goto the_end;
1675     target_cmsg_addr = tswapal(target_msgh->msg_control);
1676     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1677     target_cmsg_start = target_cmsg;
1678     if (!target_cmsg)
1679         return -TARGET_EFAULT;
1680 
1681     while (cmsg && target_cmsg) {
1682         void *data = CMSG_DATA(cmsg);
1683         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1684 
1685         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1686         int tgt_len, tgt_space;
1687 
1688         /* We never copy a half-header but may copy half-data;
1689          * this is Linux's behaviour in put_cmsg(). Note that
1690          * truncation here is a guest problem (which we report
1691          * to the guest via the CTRUNC bit), unlike truncation
1692          * in target_to_host_cmsg, which is a QEMU bug.
1693          */
1694         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1695             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1696             break;
1697         }
1698 
1699         if (cmsg->cmsg_level == SOL_SOCKET) {
1700             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1701         } else {
1702             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1703         }
1704         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1705 
1706         /* Payload types which need a different size of payload on
1707          * the target must adjust tgt_len here.
1708          */
1709         tgt_len = len;
1710         switch (cmsg->cmsg_level) {
1711         case SOL_SOCKET:
1712             switch (cmsg->cmsg_type) {
1713             case SO_TIMESTAMP:
1714                 tgt_len = sizeof(struct target_timeval);
1715                 break;
1716             default:
1717                 break;
1718             }
1719             break;
1720         default:
1721             break;
1722         }
1723 
1724         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1725             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1726             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1727         }
1728 
1729         /* We must now copy-and-convert len bytes of payload
1730          * into tgt_len bytes of destination space. Bear in mind
1731          * that in both source and destination we may be dealing
1732          * with a truncated value!
1733          */
1734         switch (cmsg->cmsg_level) {
1735         case SOL_SOCKET:
1736             switch (cmsg->cmsg_type) {
1737             case SCM_RIGHTS:
1738             {
1739                 int *fd = (int *)data;
1740                 int *target_fd = (int *)target_data;
1741                 int i, numfds = tgt_len / sizeof(int);
1742 
1743                 for (i = 0; i < numfds; i++) {
1744                     __put_user(fd[i], target_fd + i);
1745                 }
1746                 break;
1747             }
1748             case SO_TIMESTAMP:
1749             {
1750                 struct timeval *tv = (struct timeval *)data;
1751                 struct target_timeval *target_tv =
1752                     (struct target_timeval *)target_data;
1753 
1754                 if (len != sizeof(struct timeval) ||
1755                     tgt_len != sizeof(struct target_timeval)) {
1756                     goto unimplemented;
1757                 }
1758 
1759                 /* copy struct timeval to target */
1760                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1761                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1762                 break;
1763             }
1764             case SCM_CREDENTIALS:
1765             {
1766                 struct ucred *cred = (struct ucred *)data;
1767                 struct target_ucred *target_cred =
1768                     (struct target_ucred *)target_data;
1769 
1770                 __put_user(cred->pid, &target_cred->pid);
1771                 __put_user(cred->uid, &target_cred->uid);
1772                 __put_user(cred->gid, &target_cred->gid);
1773                 break;
1774             }
1775             default:
1776                 goto unimplemented;
1777             }
1778             break;
1779 
1780         case SOL_IP:
1781             switch (cmsg->cmsg_type) {
1782             case IP_TTL:
1783             {
1784                 uint32_t *v = (uint32_t *)data;
1785                 uint32_t *t_int = (uint32_t *)target_data;
1786 
1787                 if (len != sizeof(uint32_t) ||
1788                     tgt_len != sizeof(uint32_t)) {
1789                     goto unimplemented;
1790                 }
1791                 __put_user(*v, t_int);
1792                 break;
1793             }
1794             case IP_RECVERR:
1795             {
1796                 struct errhdr_t {
1797                    struct sock_extended_err ee;
1798                    struct sockaddr_in offender;
1799                 };
1800                 struct errhdr_t *errh = (struct errhdr_t *)data;
1801                 struct errhdr_t *target_errh =
1802                     (struct errhdr_t *)target_data;
1803 
1804                 if (len != sizeof(struct errhdr_t) ||
1805                     tgt_len != sizeof(struct errhdr_t)) {
1806                     goto unimplemented;
1807                 }
1808                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1809                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1810                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1811                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1812                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1813                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1814                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1815                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1816                     (void *) &errh->offender, sizeof(errh->offender));
1817                 break;
1818             }
1819             default:
1820                 goto unimplemented;
1821             }
1822             break;
1823 
1824         case SOL_IPV6:
1825             switch (cmsg->cmsg_type) {
1826             case IPV6_HOPLIMIT:
1827             {
1828                 uint32_t *v = (uint32_t *)data;
1829                 uint32_t *t_int = (uint32_t *)target_data;
1830 
1831                 if (len != sizeof(uint32_t) ||
1832                     tgt_len != sizeof(uint32_t)) {
1833                     goto unimplemented;
1834                 }
1835                 __put_user(*v, t_int);
1836                 break;
1837             }
1838             case IPV6_RECVERR:
1839             {
1840                 struct errhdr6_t {
1841                    struct sock_extended_err ee;
1842                    struct sockaddr_in6 offender;
1843                 };
1844                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1845                 struct errhdr6_t *target_errh =
1846                     (struct errhdr6_t *)target_data;
1847 
1848                 if (len != sizeof(struct errhdr6_t) ||
1849                     tgt_len != sizeof(struct errhdr6_t)) {
1850                     goto unimplemented;
1851                 }
1852                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1853                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1854                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1855                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1856                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1857                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1858                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1859                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1860                     (void *) &errh->offender, sizeof(errh->offender));
1861                 break;
1862             }
1863             default:
1864                 goto unimplemented;
1865             }
1866             break;
1867 
1868         default:
1869         unimplemented:
1870             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1871                           cmsg->cmsg_level, cmsg->cmsg_type);
1872             memcpy(target_data, data, MIN(len, tgt_len));
1873             if (tgt_len > len) {
1874                 memset(target_data + len, 0, tgt_len - len);
1875             }
1876         }
1877 
1878         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1879         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1880         if (msg_controllen < tgt_space) {
1881             tgt_space = msg_controllen;
1882         }
1883         msg_controllen -= tgt_space;
1884         space += tgt_space;
1885         cmsg = CMSG_NXTHDR(msgh, cmsg);
1886         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1887                                          target_cmsg_start);
1888     }
1889     unlock_user(target_cmsg, target_cmsg_addr, space);
1890  the_end:
1891     target_msgh->msg_controllen = tswapal(space);
1892     return 0;
1893 }
1894 
1895 /* do_setsockopt() Must return target values and target errnos. */
1896 static abi_long do_setsockopt(int sockfd, int level, int optname,
1897                               abi_ulong optval_addr, socklen_t optlen)
1898 {
1899     abi_long ret;
1900     int val;
1901     struct ip_mreqn *ip_mreq;
1902     struct ip_mreq_source *ip_mreq_source;
1903 
1904     switch(level) {
1905     case SOL_TCP:
1906         /* TCP options all take an 'int' value.  */
1907         if (optlen < sizeof(uint32_t))
1908             return -TARGET_EINVAL;
1909 
1910         if (get_user_u32(val, optval_addr))
1911             return -TARGET_EFAULT;
1912         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1913         break;
1914     case SOL_IP:
1915         switch(optname) {
1916         case IP_TOS:
1917         case IP_TTL:
1918         case IP_HDRINCL:
1919         case IP_ROUTER_ALERT:
1920         case IP_RECVOPTS:
1921         case IP_RETOPTS:
1922         case IP_PKTINFO:
1923         case IP_MTU_DISCOVER:
1924         case IP_RECVERR:
1925         case IP_RECVTTL:
1926         case IP_RECVTOS:
1927 #ifdef IP_FREEBIND
1928         case IP_FREEBIND:
1929 #endif
1930         case IP_MULTICAST_TTL:
1931         case IP_MULTICAST_LOOP:
1932             val = 0;
1933             if (optlen >= sizeof(uint32_t)) {
1934                 if (get_user_u32(val, optval_addr))
1935                     return -TARGET_EFAULT;
1936             } else if (optlen >= 1) {
1937                 if (get_user_u8(val, optval_addr))
1938                     return -TARGET_EFAULT;
1939             }
1940             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1941             break;
1942         case IP_ADD_MEMBERSHIP:
1943         case IP_DROP_MEMBERSHIP:
1944             if (optlen < sizeof (struct target_ip_mreq) ||
1945                 optlen > sizeof (struct target_ip_mreqn))
1946                 return -TARGET_EINVAL;
1947 
1948             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1949             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1950             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1951             break;
1952 
1953         case IP_BLOCK_SOURCE:
1954         case IP_UNBLOCK_SOURCE:
1955         case IP_ADD_SOURCE_MEMBERSHIP:
1956         case IP_DROP_SOURCE_MEMBERSHIP:
1957             if (optlen != sizeof (struct target_ip_mreq_source))
1958                 return -TARGET_EINVAL;
1959 
1960             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1961             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1962             unlock_user (ip_mreq_source, optval_addr, 0);
1963             break;
1964 
1965         default:
1966             goto unimplemented;
1967         }
1968         break;
1969     case SOL_IPV6:
1970         switch (optname) {
1971         case IPV6_MTU_DISCOVER:
1972         case IPV6_MTU:
1973         case IPV6_V6ONLY:
1974         case IPV6_RECVPKTINFO:
1975         case IPV6_UNICAST_HOPS:
1976         case IPV6_MULTICAST_HOPS:
1977         case IPV6_MULTICAST_LOOP:
1978         case IPV6_RECVERR:
1979         case IPV6_RECVHOPLIMIT:
1980         case IPV6_2292HOPLIMIT:
1981         case IPV6_CHECKSUM:
1982         case IPV6_ADDRFORM:
1983         case IPV6_2292PKTINFO:
1984         case IPV6_RECVTCLASS:
1985         case IPV6_RECVRTHDR:
1986         case IPV6_2292RTHDR:
1987         case IPV6_RECVHOPOPTS:
1988         case IPV6_2292HOPOPTS:
1989         case IPV6_RECVDSTOPTS:
1990         case IPV6_2292DSTOPTS:
1991         case IPV6_TCLASS:
1992 #ifdef IPV6_RECVPATHMTU
1993         case IPV6_RECVPATHMTU:
1994 #endif
1995 #ifdef IPV6_TRANSPARENT
1996         case IPV6_TRANSPARENT:
1997 #endif
1998 #ifdef IPV6_FREEBIND
1999         case IPV6_FREEBIND:
2000 #endif
2001 #ifdef IPV6_RECVORIGDSTADDR
2002         case IPV6_RECVORIGDSTADDR:
2003 #endif
2004             val = 0;
2005             if (optlen < sizeof(uint32_t)) {
2006                 return -TARGET_EINVAL;
2007             }
2008             if (get_user_u32(val, optval_addr)) {
2009                 return -TARGET_EFAULT;
2010             }
2011             ret = get_errno(setsockopt(sockfd, level, optname,
2012                                        &val, sizeof(val)));
2013             break;
2014         case IPV6_PKTINFO:
2015         {
2016             struct in6_pktinfo pki;
2017 
2018             if (optlen < sizeof(pki)) {
2019                 return -TARGET_EINVAL;
2020             }
2021 
2022             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2023                 return -TARGET_EFAULT;
2024             }
2025 
2026             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2027 
2028             ret = get_errno(setsockopt(sockfd, level, optname,
2029                                        &pki, sizeof(pki)));
2030             break;
2031         }
2032         case IPV6_ADD_MEMBERSHIP:
2033         case IPV6_DROP_MEMBERSHIP:
2034         {
2035             struct ipv6_mreq ipv6mreq;
2036 
2037             if (optlen < sizeof(ipv6mreq)) {
2038                 return -TARGET_EINVAL;
2039             }
2040 
2041             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2042                 return -TARGET_EFAULT;
2043             }
2044 
2045             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2046 
2047             ret = get_errno(setsockopt(sockfd, level, optname,
2048                                        &ipv6mreq, sizeof(ipv6mreq)));
2049             break;
2050         }
2051         default:
2052             goto unimplemented;
2053         }
2054         break;
2055     case SOL_ICMPV6:
2056         switch (optname) {
2057         case ICMPV6_FILTER:
2058         {
2059             struct icmp6_filter icmp6f;
2060 
2061             if (optlen > sizeof(icmp6f)) {
2062                 optlen = sizeof(icmp6f);
2063             }
2064 
2065             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2066                 return -TARGET_EFAULT;
2067             }
2068 
2069             for (val = 0; val < 8; val++) {
2070                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2071             }
2072 
2073             ret = get_errno(setsockopt(sockfd, level, optname,
2074                                        &icmp6f, optlen));
2075             break;
2076         }
2077         default:
2078             goto unimplemented;
2079         }
2080         break;
2081     case SOL_RAW:
2082         switch (optname) {
2083         case ICMP_FILTER:
2084         case IPV6_CHECKSUM:
2085             /* those take an u32 value */
2086             if (optlen < sizeof(uint32_t)) {
2087                 return -TARGET_EINVAL;
2088             }
2089 
2090             if (get_user_u32(val, optval_addr)) {
2091                 return -TARGET_EFAULT;
2092             }
2093             ret = get_errno(setsockopt(sockfd, level, optname,
2094                                        &val, sizeof(val)));
2095             break;
2096 
2097         default:
2098             goto unimplemented;
2099         }
2100         break;
2101 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2102     case SOL_ALG:
2103         switch (optname) {
2104         case ALG_SET_KEY:
2105         {
2106             char *alg_key = g_malloc(optlen);
2107 
2108             if (!alg_key) {
2109                 return -TARGET_ENOMEM;
2110             }
2111             if (copy_from_user(alg_key, optval_addr, optlen)) {
2112                 g_free(alg_key);
2113                 return -TARGET_EFAULT;
2114             }
2115             ret = get_errno(setsockopt(sockfd, level, optname,
2116                                        alg_key, optlen));
2117             g_free(alg_key);
2118             break;
2119         }
2120         case ALG_SET_AEAD_AUTHSIZE:
2121         {
2122             ret = get_errno(setsockopt(sockfd, level, optname,
2123                                        NULL, optlen));
2124             break;
2125         }
2126         default:
2127             goto unimplemented;
2128         }
2129         break;
2130 #endif
2131     case TARGET_SOL_SOCKET:
2132         switch (optname) {
2133         case TARGET_SO_RCVTIMEO:
2134         {
2135                 struct timeval tv;
2136 
2137                 optname = SO_RCVTIMEO;
2138 
2139 set_timeout:
2140                 if (optlen != sizeof(struct target_timeval)) {
2141                     return -TARGET_EINVAL;
2142                 }
2143 
2144                 if (copy_from_user_timeval(&tv, optval_addr)) {
2145                     return -TARGET_EFAULT;
2146                 }
2147 
2148                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2149                                 &tv, sizeof(tv)));
2150                 return ret;
2151         }
2152         case TARGET_SO_SNDTIMEO:
2153                 optname = SO_SNDTIMEO;
2154                 goto set_timeout;
2155         case TARGET_SO_ATTACH_FILTER:
2156         {
2157                 struct target_sock_fprog *tfprog;
2158                 struct target_sock_filter *tfilter;
2159                 struct sock_fprog fprog;
2160                 struct sock_filter *filter;
2161                 int i;
2162 
2163                 if (optlen != sizeof(*tfprog)) {
2164                     return -TARGET_EINVAL;
2165                 }
2166                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2167                     return -TARGET_EFAULT;
2168                 }
2169                 if (!lock_user_struct(VERIFY_READ, tfilter,
2170                                       tswapal(tfprog->filter), 0)) {
2171                     unlock_user_struct(tfprog, optval_addr, 1);
2172                     return -TARGET_EFAULT;
2173                 }
2174 
2175                 fprog.len = tswap16(tfprog->len);
2176                 filter = g_try_new(struct sock_filter, fprog.len);
2177                 if (filter == NULL) {
2178                     unlock_user_struct(tfilter, tfprog->filter, 1);
2179                     unlock_user_struct(tfprog, optval_addr, 1);
2180                     return -TARGET_ENOMEM;
2181                 }
2182                 for (i = 0; i < fprog.len; i++) {
2183                     filter[i].code = tswap16(tfilter[i].code);
2184                     filter[i].jt = tfilter[i].jt;
2185                     filter[i].jf = tfilter[i].jf;
2186                     filter[i].k = tswap32(tfilter[i].k);
2187                 }
2188                 fprog.filter = filter;
2189 
2190                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2191                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2192                 g_free(filter);
2193 
2194                 unlock_user_struct(tfilter, tfprog->filter, 1);
2195                 unlock_user_struct(tfprog, optval_addr, 1);
2196                 return ret;
2197         }
2198 	case TARGET_SO_BINDTODEVICE:
2199 	{
2200 		char *dev_ifname, *addr_ifname;
2201 
2202 		if (optlen > IFNAMSIZ - 1) {
2203 		    optlen = IFNAMSIZ - 1;
2204 		}
2205 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2206 		if (!dev_ifname) {
2207 		    return -TARGET_EFAULT;
2208 		}
2209 		optname = SO_BINDTODEVICE;
2210 		addr_ifname = alloca(IFNAMSIZ);
2211 		memcpy(addr_ifname, dev_ifname, optlen);
2212 		addr_ifname[optlen] = 0;
2213 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2214                                            addr_ifname, optlen));
2215 		unlock_user (dev_ifname, optval_addr, 0);
2216 		return ret;
2217 	}
2218         case TARGET_SO_LINGER:
2219         {
2220                 struct linger lg;
2221                 struct target_linger *tlg;
2222 
2223                 if (optlen != sizeof(struct target_linger)) {
2224                     return -TARGET_EINVAL;
2225                 }
2226                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2227                     return -TARGET_EFAULT;
2228                 }
2229                 __get_user(lg.l_onoff, &tlg->l_onoff);
2230                 __get_user(lg.l_linger, &tlg->l_linger);
2231                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2232                                 &lg, sizeof(lg)));
2233                 unlock_user_struct(tlg, optval_addr, 0);
2234                 return ret;
2235         }
2236             /* Options with 'int' argument.  */
2237         case TARGET_SO_DEBUG:
2238 		optname = SO_DEBUG;
2239 		break;
2240         case TARGET_SO_REUSEADDR:
2241 		optname = SO_REUSEADDR;
2242 		break;
2243 #ifdef SO_REUSEPORT
2244         case TARGET_SO_REUSEPORT:
2245                 optname = SO_REUSEPORT;
2246                 break;
2247 #endif
2248         case TARGET_SO_TYPE:
2249 		optname = SO_TYPE;
2250 		break;
2251         case TARGET_SO_ERROR:
2252 		optname = SO_ERROR;
2253 		break;
2254         case TARGET_SO_DONTROUTE:
2255 		optname = SO_DONTROUTE;
2256 		break;
2257         case TARGET_SO_BROADCAST:
2258 		optname = SO_BROADCAST;
2259 		break;
2260         case TARGET_SO_SNDBUF:
2261 		optname = SO_SNDBUF;
2262 		break;
2263         case TARGET_SO_SNDBUFFORCE:
2264                 optname = SO_SNDBUFFORCE;
2265                 break;
2266         case TARGET_SO_RCVBUF:
2267 		optname = SO_RCVBUF;
2268 		break;
2269         case TARGET_SO_RCVBUFFORCE:
2270                 optname = SO_RCVBUFFORCE;
2271                 break;
2272         case TARGET_SO_KEEPALIVE:
2273 		optname = SO_KEEPALIVE;
2274 		break;
2275         case TARGET_SO_OOBINLINE:
2276 		optname = SO_OOBINLINE;
2277 		break;
2278         case TARGET_SO_NO_CHECK:
2279 		optname = SO_NO_CHECK;
2280 		break;
2281         case TARGET_SO_PRIORITY:
2282 		optname = SO_PRIORITY;
2283 		break;
2284 #ifdef SO_BSDCOMPAT
2285         case TARGET_SO_BSDCOMPAT:
2286 		optname = SO_BSDCOMPAT;
2287 		break;
2288 #endif
2289         case TARGET_SO_PASSCRED:
2290 		optname = SO_PASSCRED;
2291 		break;
2292         case TARGET_SO_PASSSEC:
2293                 optname = SO_PASSSEC;
2294                 break;
2295         case TARGET_SO_TIMESTAMP:
2296 		optname = SO_TIMESTAMP;
2297 		break;
2298         case TARGET_SO_RCVLOWAT:
2299 		optname = SO_RCVLOWAT;
2300 		break;
2301         default:
2302             goto unimplemented;
2303         }
2304 	if (optlen < sizeof(uint32_t))
2305             return -TARGET_EINVAL;
2306 
2307 	if (get_user_u32(val, optval_addr))
2308             return -TARGET_EFAULT;
2309 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2310         break;
2311 #ifdef SOL_NETLINK
2312     case SOL_NETLINK:
2313         switch (optname) {
2314         case NETLINK_PKTINFO:
2315         case NETLINK_ADD_MEMBERSHIP:
2316         case NETLINK_DROP_MEMBERSHIP:
2317         case NETLINK_BROADCAST_ERROR:
2318         case NETLINK_NO_ENOBUFS:
2319 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2320         case NETLINK_LISTEN_ALL_NSID:
2321         case NETLINK_CAP_ACK:
2322 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2323 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2324         case NETLINK_EXT_ACK:
2325 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2326 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2327         case NETLINK_GET_STRICT_CHK:
2328 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2329             break;
2330         default:
2331             goto unimplemented;
2332         }
2333         val = 0;
2334         if (optlen < sizeof(uint32_t)) {
2335             return -TARGET_EINVAL;
2336         }
2337         if (get_user_u32(val, optval_addr)) {
2338             return -TARGET_EFAULT;
2339         }
2340         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2341                                    sizeof(val)));
2342         break;
2343 #endif /* SOL_NETLINK */
2344     default:
2345     unimplemented:
2346         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2347                       level, optname);
2348         ret = -TARGET_ENOPROTOOPT;
2349     }
2350     return ret;
2351 }
2352 
2353 /* do_getsockopt() Must return target values and target errnos. */
2354 static abi_long do_getsockopt(int sockfd, int level, int optname,
2355                               abi_ulong optval_addr, abi_ulong optlen)
2356 {
2357     abi_long ret;
2358     int len, val;
2359     socklen_t lv;
2360 
2361     switch(level) {
2362     case TARGET_SOL_SOCKET:
2363         level = SOL_SOCKET;
2364         switch (optname) {
2365         /* These don't just return a single integer */
2366         case TARGET_SO_PEERNAME:
2367             goto unimplemented;
2368         case TARGET_SO_RCVTIMEO: {
2369             struct timeval tv;
2370             socklen_t tvlen;
2371 
2372             optname = SO_RCVTIMEO;
2373 
2374 get_timeout:
2375             if (get_user_u32(len, optlen)) {
2376                 return -TARGET_EFAULT;
2377             }
2378             if (len < 0) {
2379                 return -TARGET_EINVAL;
2380             }
2381 
2382             tvlen = sizeof(tv);
2383             ret = get_errno(getsockopt(sockfd, level, optname,
2384                                        &tv, &tvlen));
2385             if (ret < 0) {
2386                 return ret;
2387             }
2388             if (len > sizeof(struct target_timeval)) {
2389                 len = sizeof(struct target_timeval);
2390             }
2391             if (copy_to_user_timeval(optval_addr, &tv)) {
2392                 return -TARGET_EFAULT;
2393             }
2394             if (put_user_u32(len, optlen)) {
2395                 return -TARGET_EFAULT;
2396             }
2397             break;
2398         }
2399         case TARGET_SO_SNDTIMEO:
2400             optname = SO_SNDTIMEO;
2401             goto get_timeout;
2402         case TARGET_SO_PEERCRED: {
2403             struct ucred cr;
2404             socklen_t crlen;
2405             struct target_ucred *tcr;
2406 
2407             if (get_user_u32(len, optlen)) {
2408                 return -TARGET_EFAULT;
2409             }
2410             if (len < 0) {
2411                 return -TARGET_EINVAL;
2412             }
2413 
2414             crlen = sizeof(cr);
2415             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2416                                        &cr, &crlen));
2417             if (ret < 0) {
2418                 return ret;
2419             }
2420             if (len > crlen) {
2421                 len = crlen;
2422             }
2423             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2424                 return -TARGET_EFAULT;
2425             }
2426             __put_user(cr.pid, &tcr->pid);
2427             __put_user(cr.uid, &tcr->uid);
2428             __put_user(cr.gid, &tcr->gid);
2429             unlock_user_struct(tcr, optval_addr, 1);
2430             if (put_user_u32(len, optlen)) {
2431                 return -TARGET_EFAULT;
2432             }
2433             break;
2434         }
2435         case TARGET_SO_PEERSEC: {
2436             char *name;
2437 
2438             if (get_user_u32(len, optlen)) {
2439                 return -TARGET_EFAULT;
2440             }
2441             if (len < 0) {
2442                 return -TARGET_EINVAL;
2443             }
2444             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2445             if (!name) {
2446                 return -TARGET_EFAULT;
2447             }
2448             lv = len;
2449             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2450                                        name, &lv));
2451             if (put_user_u32(lv, optlen)) {
2452                 ret = -TARGET_EFAULT;
2453             }
2454             unlock_user(name, optval_addr, lv);
2455             break;
2456         }
2457         case TARGET_SO_LINGER:
2458         {
2459             struct linger lg;
2460             socklen_t lglen;
2461             struct target_linger *tlg;
2462 
2463             if (get_user_u32(len, optlen)) {
2464                 return -TARGET_EFAULT;
2465             }
2466             if (len < 0) {
2467                 return -TARGET_EINVAL;
2468             }
2469 
2470             lglen = sizeof(lg);
2471             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2472                                        &lg, &lglen));
2473             if (ret < 0) {
2474                 return ret;
2475             }
2476             if (len > lglen) {
2477                 len = lglen;
2478             }
2479             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2480                 return -TARGET_EFAULT;
2481             }
2482             __put_user(lg.l_onoff, &tlg->l_onoff);
2483             __put_user(lg.l_linger, &tlg->l_linger);
2484             unlock_user_struct(tlg, optval_addr, 1);
2485             if (put_user_u32(len, optlen)) {
2486                 return -TARGET_EFAULT;
2487             }
2488             break;
2489         }
2490         /* Options with 'int' argument.  */
2491         case TARGET_SO_DEBUG:
2492             optname = SO_DEBUG;
2493             goto int_case;
2494         case TARGET_SO_REUSEADDR:
2495             optname = SO_REUSEADDR;
2496             goto int_case;
2497 #ifdef SO_REUSEPORT
2498         case TARGET_SO_REUSEPORT:
2499             optname = SO_REUSEPORT;
2500             goto int_case;
2501 #endif
2502         case TARGET_SO_TYPE:
2503             optname = SO_TYPE;
2504             goto int_case;
2505         case TARGET_SO_ERROR:
2506             optname = SO_ERROR;
2507             goto int_case;
2508         case TARGET_SO_DONTROUTE:
2509             optname = SO_DONTROUTE;
2510             goto int_case;
2511         case TARGET_SO_BROADCAST:
2512             optname = SO_BROADCAST;
2513             goto int_case;
2514         case TARGET_SO_SNDBUF:
2515             optname = SO_SNDBUF;
2516             goto int_case;
2517         case TARGET_SO_RCVBUF:
2518             optname = SO_RCVBUF;
2519             goto int_case;
2520         case TARGET_SO_KEEPALIVE:
2521             optname = SO_KEEPALIVE;
2522             goto int_case;
2523         case TARGET_SO_OOBINLINE:
2524             optname = SO_OOBINLINE;
2525             goto int_case;
2526         case TARGET_SO_NO_CHECK:
2527             optname = SO_NO_CHECK;
2528             goto int_case;
2529         case TARGET_SO_PRIORITY:
2530             optname = SO_PRIORITY;
2531             goto int_case;
2532 #ifdef SO_BSDCOMPAT
2533         case TARGET_SO_BSDCOMPAT:
2534             optname = SO_BSDCOMPAT;
2535             goto int_case;
2536 #endif
2537         case TARGET_SO_PASSCRED:
2538             optname = SO_PASSCRED;
2539             goto int_case;
2540         case TARGET_SO_TIMESTAMP:
2541             optname = SO_TIMESTAMP;
2542             goto int_case;
2543         case TARGET_SO_RCVLOWAT:
2544             optname = SO_RCVLOWAT;
2545             goto int_case;
2546         case TARGET_SO_ACCEPTCONN:
2547             optname = SO_ACCEPTCONN;
2548             goto int_case;
2549         default:
2550             goto int_case;
2551         }
2552         break;
2553     case SOL_TCP:
2554         /* TCP options all take an 'int' value.  */
2555     int_case:
2556         if (get_user_u32(len, optlen))
2557             return -TARGET_EFAULT;
2558         if (len < 0)
2559             return -TARGET_EINVAL;
2560         lv = sizeof(lv);
2561         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2562         if (ret < 0)
2563             return ret;
2564         if (optname == SO_TYPE) {
2565             val = host_to_target_sock_type(val);
2566         }
2567         if (len > lv)
2568             len = lv;
2569         if (len == 4) {
2570             if (put_user_u32(val, optval_addr))
2571                 return -TARGET_EFAULT;
2572         } else {
2573             if (put_user_u8(val, optval_addr))
2574                 return -TARGET_EFAULT;
2575         }
2576         if (put_user_u32(len, optlen))
2577             return -TARGET_EFAULT;
2578         break;
2579     case SOL_IP:
2580         switch(optname) {
2581         case IP_TOS:
2582         case IP_TTL:
2583         case IP_HDRINCL:
2584         case IP_ROUTER_ALERT:
2585         case IP_RECVOPTS:
2586         case IP_RETOPTS:
2587         case IP_PKTINFO:
2588         case IP_MTU_DISCOVER:
2589         case IP_RECVERR:
2590         case IP_RECVTOS:
2591 #ifdef IP_FREEBIND
2592         case IP_FREEBIND:
2593 #endif
2594         case IP_MULTICAST_TTL:
2595         case IP_MULTICAST_LOOP:
2596             if (get_user_u32(len, optlen))
2597                 return -TARGET_EFAULT;
2598             if (len < 0)
2599                 return -TARGET_EINVAL;
2600             lv = sizeof(lv);
2601             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2602             if (ret < 0)
2603                 return ret;
2604             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2605                 len = 1;
2606                 if (put_user_u32(len, optlen)
2607                     || put_user_u8(val, optval_addr))
2608                     return -TARGET_EFAULT;
2609             } else {
2610                 if (len > sizeof(int))
2611                     len = sizeof(int);
2612                 if (put_user_u32(len, optlen)
2613                     || put_user_u32(val, optval_addr))
2614                     return -TARGET_EFAULT;
2615             }
2616             break;
2617         default:
2618             ret = -TARGET_ENOPROTOOPT;
2619             break;
2620         }
2621         break;
2622     case SOL_IPV6:
2623         switch (optname) {
2624         case IPV6_MTU_DISCOVER:
2625         case IPV6_MTU:
2626         case IPV6_V6ONLY:
2627         case IPV6_RECVPKTINFO:
2628         case IPV6_UNICAST_HOPS:
2629         case IPV6_MULTICAST_HOPS:
2630         case IPV6_MULTICAST_LOOP:
2631         case IPV6_RECVERR:
2632         case IPV6_RECVHOPLIMIT:
2633         case IPV6_2292HOPLIMIT:
2634         case IPV6_CHECKSUM:
2635         case IPV6_ADDRFORM:
2636         case IPV6_2292PKTINFO:
2637         case IPV6_RECVTCLASS:
2638         case IPV6_RECVRTHDR:
2639         case IPV6_2292RTHDR:
2640         case IPV6_RECVHOPOPTS:
2641         case IPV6_2292HOPOPTS:
2642         case IPV6_RECVDSTOPTS:
2643         case IPV6_2292DSTOPTS:
2644         case IPV6_TCLASS:
2645 #ifdef IPV6_RECVPATHMTU
2646         case IPV6_RECVPATHMTU:
2647 #endif
2648 #ifdef IPV6_TRANSPARENT
2649         case IPV6_TRANSPARENT:
2650 #endif
2651 #ifdef IPV6_FREEBIND
2652         case IPV6_FREEBIND:
2653 #endif
2654 #ifdef IPV6_RECVORIGDSTADDR
2655         case IPV6_RECVORIGDSTADDR:
2656 #endif
2657             if (get_user_u32(len, optlen))
2658                 return -TARGET_EFAULT;
2659             if (len < 0)
2660                 return -TARGET_EINVAL;
2661             lv = sizeof(lv);
2662             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2663             if (ret < 0)
2664                 return ret;
2665             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2666                 len = 1;
2667                 if (put_user_u32(len, optlen)
2668                     || put_user_u8(val, optval_addr))
2669                     return -TARGET_EFAULT;
2670             } else {
2671                 if (len > sizeof(int))
2672                     len = sizeof(int);
2673                 if (put_user_u32(len, optlen)
2674                     || put_user_u32(val, optval_addr))
2675                     return -TARGET_EFAULT;
2676             }
2677             break;
2678         default:
2679             ret = -TARGET_ENOPROTOOPT;
2680             break;
2681         }
2682         break;
2683 #ifdef SOL_NETLINK
2684     case SOL_NETLINK:
2685         switch (optname) {
2686         case NETLINK_PKTINFO:
2687         case NETLINK_BROADCAST_ERROR:
2688         case NETLINK_NO_ENOBUFS:
2689 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2690         case NETLINK_LISTEN_ALL_NSID:
2691         case NETLINK_CAP_ACK:
2692 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2693 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2694         case NETLINK_EXT_ACK:
2695 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2696 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2697         case NETLINK_GET_STRICT_CHK:
2698 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2699             if (get_user_u32(len, optlen)) {
2700                 return -TARGET_EFAULT;
2701             }
2702             if (len != sizeof(val)) {
2703                 return -TARGET_EINVAL;
2704             }
2705             lv = len;
2706             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2707             if (ret < 0) {
2708                 return ret;
2709             }
2710             if (put_user_u32(lv, optlen)
2711                 || put_user_u32(val, optval_addr)) {
2712                 return -TARGET_EFAULT;
2713             }
2714             break;
2715 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2716         case NETLINK_LIST_MEMBERSHIPS:
2717         {
2718             uint32_t *results;
2719             int i;
2720             if (get_user_u32(len, optlen)) {
2721                 return -TARGET_EFAULT;
2722             }
2723             if (len < 0) {
2724                 return -TARGET_EINVAL;
2725             }
2726             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2727             if (!results) {
2728                 return -TARGET_EFAULT;
2729             }
2730             lv = len;
2731             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2732             if (ret < 0) {
2733                 unlock_user(results, optval_addr, 0);
2734                 return ret;
2735             }
2736             /* swap host endianess to target endianess. */
2737             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2738                 results[i] = tswap32(results[i]);
2739             }
2740             if (put_user_u32(lv, optlen)) {
2741                 return -TARGET_EFAULT;
2742             }
2743             unlock_user(results, optval_addr, 0);
2744             break;
2745         }
2746 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2747         default:
2748             goto unimplemented;
2749         }
2750         break;
2751 #endif /* SOL_NETLINK */
2752     default:
2753     unimplemented:
2754         qemu_log_mask(LOG_UNIMP,
2755                       "getsockopt level=%d optname=%d not yet supported\n",
2756                       level, optname);
2757         ret = -TARGET_EOPNOTSUPP;
2758         break;
2759     }
2760     return ret;
2761 }
2762 
2763 /* Convert target low/high pair representing file offset into the host
2764  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2765  * as the kernel doesn't handle them either.
2766  */
2767 static void target_to_host_low_high(abi_ulong tlow,
2768                                     abi_ulong thigh,
2769                                     unsigned long *hlow,
2770                                     unsigned long *hhigh)
2771 {
2772     uint64_t off = tlow |
2773         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2774         TARGET_LONG_BITS / 2;
2775 
2776     *hlow = off;
2777     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2778 }
2779 
2780 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2781                                 abi_ulong count, int copy)
2782 {
2783     struct target_iovec *target_vec;
2784     struct iovec *vec;
2785     abi_ulong total_len, max_len;
2786     int i;
2787     int err = 0;
2788     bool bad_address = false;
2789 
2790     if (count == 0) {
2791         errno = 0;
2792         return NULL;
2793     }
2794     if (count > IOV_MAX) {
2795         errno = EINVAL;
2796         return NULL;
2797     }
2798 
2799     vec = g_try_new0(struct iovec, count);
2800     if (vec == NULL) {
2801         errno = ENOMEM;
2802         return NULL;
2803     }
2804 
2805     target_vec = lock_user(VERIFY_READ, target_addr,
2806                            count * sizeof(struct target_iovec), 1);
2807     if (target_vec == NULL) {
2808         err = EFAULT;
2809         goto fail2;
2810     }
2811 
2812     /* ??? If host page size > target page size, this will result in a
2813        value larger than what we can actually support.  */
2814     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2815     total_len = 0;
2816 
2817     for (i = 0; i < count; i++) {
2818         abi_ulong base = tswapal(target_vec[i].iov_base);
2819         abi_long len = tswapal(target_vec[i].iov_len);
2820 
2821         if (len < 0) {
2822             err = EINVAL;
2823             goto fail;
2824         } else if (len == 0) {
2825             /* Zero length pointer is ignored.  */
2826             vec[i].iov_base = 0;
2827         } else {
2828             vec[i].iov_base = lock_user(type, base, len, copy);
2829             /* If the first buffer pointer is bad, this is a fault.  But
2830              * subsequent bad buffers will result in a partial write; this
2831              * is realized by filling the vector with null pointers and
2832              * zero lengths. */
2833             if (!vec[i].iov_base) {
2834                 if (i == 0) {
2835                     err = EFAULT;
2836                     goto fail;
2837                 } else {
2838                     bad_address = true;
2839                 }
2840             }
2841             if (bad_address) {
2842                 len = 0;
2843             }
2844             if (len > max_len - total_len) {
2845                 len = max_len - total_len;
2846             }
2847         }
2848         vec[i].iov_len = len;
2849         total_len += len;
2850     }
2851 
2852     unlock_user(target_vec, target_addr, 0);
2853     return vec;
2854 
2855  fail:
2856     while (--i >= 0) {
2857         if (tswapal(target_vec[i].iov_len) > 0) {
2858             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2859         }
2860     }
2861     unlock_user(target_vec, target_addr, 0);
2862  fail2:
2863     g_free(vec);
2864     errno = err;
2865     return NULL;
2866 }
2867 
2868 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2869                          abi_ulong count, int copy)
2870 {
2871     struct target_iovec *target_vec;
2872     int i;
2873 
2874     target_vec = lock_user(VERIFY_READ, target_addr,
2875                            count * sizeof(struct target_iovec), 1);
2876     if (target_vec) {
2877         for (i = 0; i < count; i++) {
2878             abi_ulong base = tswapal(target_vec[i].iov_base);
2879             abi_long len = tswapal(target_vec[i].iov_len);
2880             if (len < 0) {
2881                 break;
2882             }
2883             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2884         }
2885         unlock_user(target_vec, target_addr, 0);
2886     }
2887 
2888     g_free(vec);
2889 }
2890 
2891 static inline int target_to_host_sock_type(int *type)
2892 {
2893     int host_type = 0;
2894     int target_type = *type;
2895 
2896     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2897     case TARGET_SOCK_DGRAM:
2898         host_type = SOCK_DGRAM;
2899         break;
2900     case TARGET_SOCK_STREAM:
2901         host_type = SOCK_STREAM;
2902         break;
2903     default:
2904         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2905         break;
2906     }
2907     if (target_type & TARGET_SOCK_CLOEXEC) {
2908 #if defined(SOCK_CLOEXEC)
2909         host_type |= SOCK_CLOEXEC;
2910 #else
2911         return -TARGET_EINVAL;
2912 #endif
2913     }
2914     if (target_type & TARGET_SOCK_NONBLOCK) {
2915 #if defined(SOCK_NONBLOCK)
2916         host_type |= SOCK_NONBLOCK;
2917 #elif !defined(O_NONBLOCK)
2918         return -TARGET_EINVAL;
2919 #endif
2920     }
2921     *type = host_type;
2922     return 0;
2923 }
2924 
2925 /* Try to emulate socket type flags after socket creation.  */
2926 static int sock_flags_fixup(int fd, int target_type)
2927 {
2928 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2929     if (target_type & TARGET_SOCK_NONBLOCK) {
2930         int flags = fcntl(fd, F_GETFL);
2931         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2932             close(fd);
2933             return -TARGET_EINVAL;
2934         }
2935     }
2936 #endif
2937     return fd;
2938 }
2939 
2940 /* do_socket() Must return target values and target errnos. */
2941 static abi_long do_socket(int domain, int type, int protocol)
2942 {
2943     int target_type = type;
2944     int ret;
2945 
2946     ret = target_to_host_sock_type(&type);
2947     if (ret) {
2948         return ret;
2949     }
2950 
2951     if (domain == PF_NETLINK && !(
2952 #ifdef CONFIG_RTNETLINK
2953          protocol == NETLINK_ROUTE ||
2954 #endif
2955          protocol == NETLINK_KOBJECT_UEVENT ||
2956          protocol == NETLINK_AUDIT)) {
2957         return -EPFNOSUPPORT;
2958     }
2959 
2960     if (domain == AF_PACKET ||
2961         (domain == AF_INET && type == SOCK_PACKET)) {
2962         protocol = tswap16(protocol);
2963     }
2964 
2965     ret = get_errno(socket(domain, type, protocol));
2966     if (ret >= 0) {
2967         ret = sock_flags_fixup(ret, target_type);
2968         if (type == SOCK_PACKET) {
2969             /* Manage an obsolete case :
2970              * if socket type is SOCK_PACKET, bind by name
2971              */
2972             fd_trans_register(ret, &target_packet_trans);
2973         } else if (domain == PF_NETLINK) {
2974             switch (protocol) {
2975 #ifdef CONFIG_RTNETLINK
2976             case NETLINK_ROUTE:
2977                 fd_trans_register(ret, &target_netlink_route_trans);
2978                 break;
2979 #endif
2980             case NETLINK_KOBJECT_UEVENT:
2981                 /* nothing to do: messages are strings */
2982                 break;
2983             case NETLINK_AUDIT:
2984                 fd_trans_register(ret, &target_netlink_audit_trans);
2985                 break;
2986             default:
2987                 g_assert_not_reached();
2988             }
2989         }
2990     }
2991     return ret;
2992 }
2993 
2994 /* do_bind() Must return target values and target errnos. */
2995 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2996                         socklen_t addrlen)
2997 {
2998     void *addr;
2999     abi_long ret;
3000 
3001     if ((int)addrlen < 0) {
3002         return -TARGET_EINVAL;
3003     }
3004 
3005     addr = alloca(addrlen+1);
3006 
3007     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3008     if (ret)
3009         return ret;
3010 
3011     return get_errno(bind(sockfd, addr, addrlen));
3012 }
3013 
3014 /* do_connect() Must return target values and target errnos. */
3015 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3016                            socklen_t addrlen)
3017 {
3018     void *addr;
3019     abi_long ret;
3020 
3021     if ((int)addrlen < 0) {
3022         return -TARGET_EINVAL;
3023     }
3024 
3025     addr = alloca(addrlen+1);
3026 
3027     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3028     if (ret)
3029         return ret;
3030 
3031     return get_errno(safe_connect(sockfd, addr, addrlen));
3032 }
3033 
3034 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3035 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3036                                       int flags, int send)
3037 {
3038     abi_long ret, len;
3039     struct msghdr msg;
3040     abi_ulong count;
3041     struct iovec *vec;
3042     abi_ulong target_vec;
3043 
3044     if (msgp->msg_name) {
3045         msg.msg_namelen = tswap32(msgp->msg_namelen);
3046         msg.msg_name = alloca(msg.msg_namelen+1);
3047         ret = target_to_host_sockaddr(fd, msg.msg_name,
3048                                       tswapal(msgp->msg_name),
3049                                       msg.msg_namelen);
3050         if (ret == -TARGET_EFAULT) {
3051             /* For connected sockets msg_name and msg_namelen must
3052              * be ignored, so returning EFAULT immediately is wrong.
3053              * Instead, pass a bad msg_name to the host kernel, and
3054              * let it decide whether to return EFAULT or not.
3055              */
3056             msg.msg_name = (void *)-1;
3057         } else if (ret) {
3058             goto out2;
3059         }
3060     } else {
3061         msg.msg_name = NULL;
3062         msg.msg_namelen = 0;
3063     }
3064     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3065     msg.msg_control = alloca(msg.msg_controllen);
3066     memset(msg.msg_control, 0, msg.msg_controllen);
3067 
3068     msg.msg_flags = tswap32(msgp->msg_flags);
3069 
3070     count = tswapal(msgp->msg_iovlen);
3071     target_vec = tswapal(msgp->msg_iov);
3072 
3073     if (count > IOV_MAX) {
3074         /* sendrcvmsg returns a different errno for this condition than
3075          * readv/writev, so we must catch it here before lock_iovec() does.
3076          */
3077         ret = -TARGET_EMSGSIZE;
3078         goto out2;
3079     }
3080 
3081     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3082                      target_vec, count, send);
3083     if (vec == NULL) {
3084         ret = -host_to_target_errno(errno);
3085         goto out2;
3086     }
3087     msg.msg_iovlen = count;
3088     msg.msg_iov = vec;
3089 
3090     if (send) {
3091         if (fd_trans_target_to_host_data(fd)) {
3092             void *host_msg;
3093 
3094             host_msg = g_malloc(msg.msg_iov->iov_len);
3095             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3096             ret = fd_trans_target_to_host_data(fd)(host_msg,
3097                                                    msg.msg_iov->iov_len);
3098             if (ret >= 0) {
3099                 msg.msg_iov->iov_base = host_msg;
3100                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3101             }
3102             g_free(host_msg);
3103         } else {
3104             ret = target_to_host_cmsg(&msg, msgp);
3105             if (ret == 0) {
3106                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3107             }
3108         }
3109     } else {
3110         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3111         if (!is_error(ret)) {
3112             len = ret;
3113             if (fd_trans_host_to_target_data(fd)) {
3114                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3115                                                MIN(msg.msg_iov->iov_len, len));
3116             } else {
3117                 ret = host_to_target_cmsg(msgp, &msg);
3118             }
3119             if (!is_error(ret)) {
3120                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3121                 msgp->msg_flags = tswap32(msg.msg_flags);
3122                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3123                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3124                                     msg.msg_name, msg.msg_namelen);
3125                     if (ret) {
3126                         goto out;
3127                     }
3128                 }
3129 
3130                 ret = len;
3131             }
3132         }
3133     }
3134 
3135 out:
3136     unlock_iovec(vec, target_vec, count, !send);
3137 out2:
3138     return ret;
3139 }
3140 
3141 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3142                                int flags, int send)
3143 {
3144     abi_long ret;
3145     struct target_msghdr *msgp;
3146 
3147     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3148                           msgp,
3149                           target_msg,
3150                           send ? 1 : 0)) {
3151         return -TARGET_EFAULT;
3152     }
3153     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3154     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3155     return ret;
3156 }
3157 
3158 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3159  * so it might not have this *mmsg-specific flag either.
3160  */
3161 #ifndef MSG_WAITFORONE
3162 #define MSG_WAITFORONE 0x10000
3163 #endif
3164 
3165 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3166                                 unsigned int vlen, unsigned int flags,
3167                                 int send)
3168 {
3169     struct target_mmsghdr *mmsgp;
3170     abi_long ret = 0;
3171     int i;
3172 
3173     if (vlen > UIO_MAXIOV) {
3174         vlen = UIO_MAXIOV;
3175     }
3176 
3177     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3178     if (!mmsgp) {
3179         return -TARGET_EFAULT;
3180     }
3181 
3182     for (i = 0; i < vlen; i++) {
3183         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3184         if (is_error(ret)) {
3185             break;
3186         }
3187         mmsgp[i].msg_len = tswap32(ret);
3188         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3189         if (flags & MSG_WAITFORONE) {
3190             flags |= MSG_DONTWAIT;
3191         }
3192     }
3193 
3194     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3195 
3196     /* Return number of datagrams sent if we sent any at all;
3197      * otherwise return the error.
3198      */
3199     if (i) {
3200         return i;
3201     }
3202     return ret;
3203 }
3204 
3205 /* do_accept4() Must return target values and target errnos. */
3206 static abi_long do_accept4(int fd, abi_ulong target_addr,
3207                            abi_ulong target_addrlen_addr, int flags)
3208 {
3209     socklen_t addrlen, ret_addrlen;
3210     void *addr;
3211     abi_long ret;
3212     int host_flags;
3213 
3214     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3215 
3216     if (target_addr == 0) {
3217         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3218     }
3219 
3220     /* linux returns EINVAL if addrlen pointer is invalid */
3221     if (get_user_u32(addrlen, target_addrlen_addr))
3222         return -TARGET_EINVAL;
3223 
3224     if ((int)addrlen < 0) {
3225         return -TARGET_EINVAL;
3226     }
3227 
3228     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3229         return -TARGET_EINVAL;
3230 
3231     addr = alloca(addrlen);
3232 
3233     ret_addrlen = addrlen;
3234     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3235     if (!is_error(ret)) {
3236         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3237         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3238             ret = -TARGET_EFAULT;
3239         }
3240     }
3241     return ret;
3242 }
3243 
3244 /* do_getpeername() Must return target values and target errnos. */
3245 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3246                                abi_ulong target_addrlen_addr)
3247 {
3248     socklen_t addrlen, ret_addrlen;
3249     void *addr;
3250     abi_long ret;
3251 
3252     if (get_user_u32(addrlen, target_addrlen_addr))
3253         return -TARGET_EFAULT;
3254 
3255     if ((int)addrlen < 0) {
3256         return -TARGET_EINVAL;
3257     }
3258 
3259     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3260         return -TARGET_EFAULT;
3261 
3262     addr = alloca(addrlen);
3263 
3264     ret_addrlen = addrlen;
3265     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3266     if (!is_error(ret)) {
3267         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3268         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3269             ret = -TARGET_EFAULT;
3270         }
3271     }
3272     return ret;
3273 }
3274 
3275 /* do_getsockname() Must return target values and target errnos. */
3276 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3277                                abi_ulong target_addrlen_addr)
3278 {
3279     socklen_t addrlen, ret_addrlen;
3280     void *addr;
3281     abi_long ret;
3282 
3283     if (get_user_u32(addrlen, target_addrlen_addr))
3284         return -TARGET_EFAULT;
3285 
3286     if ((int)addrlen < 0) {
3287         return -TARGET_EINVAL;
3288     }
3289 
3290     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3291         return -TARGET_EFAULT;
3292 
3293     addr = alloca(addrlen);
3294 
3295     ret_addrlen = addrlen;
3296     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3297     if (!is_error(ret)) {
3298         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3299         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3300             ret = -TARGET_EFAULT;
3301         }
3302     }
3303     return ret;
3304 }
3305 
3306 /* do_socketpair() Must return target values and target errnos. */
3307 static abi_long do_socketpair(int domain, int type, int protocol,
3308                               abi_ulong target_tab_addr)
3309 {
3310     int tab[2];
3311     abi_long ret;
3312 
3313     target_to_host_sock_type(&type);
3314 
3315     ret = get_errno(socketpair(domain, type, protocol, tab));
3316     if (!is_error(ret)) {
3317         if (put_user_s32(tab[0], target_tab_addr)
3318             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3319             ret = -TARGET_EFAULT;
3320     }
3321     return ret;
3322 }
3323 
3324 /* do_sendto() Must return target values and target errnos. */
3325 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3326                           abi_ulong target_addr, socklen_t addrlen)
3327 {
3328     void *addr;
3329     void *host_msg;
3330     void *copy_msg = NULL;
3331     abi_long ret;
3332 
3333     if ((int)addrlen < 0) {
3334         return -TARGET_EINVAL;
3335     }
3336 
3337     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3338     if (!host_msg)
3339         return -TARGET_EFAULT;
3340     if (fd_trans_target_to_host_data(fd)) {
3341         copy_msg = host_msg;
3342         host_msg = g_malloc(len);
3343         memcpy(host_msg, copy_msg, len);
3344         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3345         if (ret < 0) {
3346             goto fail;
3347         }
3348     }
3349     if (target_addr) {
3350         addr = alloca(addrlen+1);
3351         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3352         if (ret) {
3353             goto fail;
3354         }
3355         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3356     } else {
3357         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3358     }
3359 fail:
3360     if (copy_msg) {
3361         g_free(host_msg);
3362         host_msg = copy_msg;
3363     }
3364     unlock_user(host_msg, msg, 0);
3365     return ret;
3366 }
3367 
3368 /* do_recvfrom() Must return target values and target errnos. */
3369 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3370                             abi_ulong target_addr,
3371                             abi_ulong target_addrlen)
3372 {
3373     socklen_t addrlen, ret_addrlen;
3374     void *addr;
3375     void *host_msg;
3376     abi_long ret;
3377 
3378     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3379     if (!host_msg)
3380         return -TARGET_EFAULT;
3381     if (target_addr) {
3382         if (get_user_u32(addrlen, target_addrlen)) {
3383             ret = -TARGET_EFAULT;
3384             goto fail;
3385         }
3386         if ((int)addrlen < 0) {
3387             ret = -TARGET_EINVAL;
3388             goto fail;
3389         }
3390         addr = alloca(addrlen);
3391         ret_addrlen = addrlen;
3392         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3393                                       addr, &ret_addrlen));
3394     } else {
3395         addr = NULL; /* To keep compiler quiet.  */
3396         addrlen = 0; /* To keep compiler quiet.  */
3397         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3398     }
3399     if (!is_error(ret)) {
3400         if (fd_trans_host_to_target_data(fd)) {
3401             abi_long trans;
3402             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3403             if (is_error(trans)) {
3404                 ret = trans;
3405                 goto fail;
3406             }
3407         }
3408         if (target_addr) {
3409             host_to_target_sockaddr(target_addr, addr,
3410                                     MIN(addrlen, ret_addrlen));
3411             if (put_user_u32(ret_addrlen, target_addrlen)) {
3412                 ret = -TARGET_EFAULT;
3413                 goto fail;
3414             }
3415         }
3416         unlock_user(host_msg, msg, len);
3417     } else {
3418 fail:
3419         unlock_user(host_msg, msg, 0);
3420     }
3421     return ret;
3422 }
3423 
3424 #ifdef TARGET_NR_socketcall
3425 /* do_socketcall() must return target values and target errnos. */
3426 static abi_long do_socketcall(int num, abi_ulong vptr)
3427 {
3428     static const unsigned nargs[] = { /* number of arguments per operation */
3429         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3430         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3431         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3432         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3433         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3434         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3435         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3436         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3437         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3438         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3439         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3440         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3441         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3442         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3443         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3444         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3445         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3446         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3447         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3448         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3449     };
3450     abi_long a[6]; /* max 6 args */
3451     unsigned i;
3452 
3453     /* check the range of the first argument num */
3454     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3455     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3456         return -TARGET_EINVAL;
3457     }
3458     /* ensure we have space for args */
3459     if (nargs[num] > ARRAY_SIZE(a)) {
3460         return -TARGET_EINVAL;
3461     }
3462     /* collect the arguments in a[] according to nargs[] */
3463     for (i = 0; i < nargs[num]; ++i) {
3464         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3465             return -TARGET_EFAULT;
3466         }
3467     }
3468     /* now when we have the args, invoke the appropriate underlying function */
3469     switch (num) {
3470     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3471         return do_socket(a[0], a[1], a[2]);
3472     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3473         return do_bind(a[0], a[1], a[2]);
3474     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3475         return do_connect(a[0], a[1], a[2]);
3476     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3477         return get_errno(listen(a[0], a[1]));
3478     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3479         return do_accept4(a[0], a[1], a[2], 0);
3480     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3481         return do_getsockname(a[0], a[1], a[2]);
3482     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3483         return do_getpeername(a[0], a[1], a[2]);
3484     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3485         return do_socketpair(a[0], a[1], a[2], a[3]);
3486     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3487         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3488     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3489         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3490     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3491         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3492     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3493         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3494     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3495         return get_errno(shutdown(a[0], a[1]));
3496     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3497         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3498     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3499         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3500     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3501         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3502     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3503         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3504     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3505         return do_accept4(a[0], a[1], a[2], a[3]);
3506     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3507         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3508     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3509         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3510     default:
3511         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3512         return -TARGET_EINVAL;
3513     }
3514 }
3515 #endif
3516 
3517 #define N_SHM_REGIONS	32
3518 
3519 static struct shm_region {
3520     abi_ulong start;
3521     abi_ulong size;
3522     bool in_use;
3523 } shm_regions[N_SHM_REGIONS];
3524 
3525 #ifndef TARGET_SEMID64_DS
3526 /* asm-generic version of this struct */
3527 struct target_semid64_ds
3528 {
3529   struct target_ipc_perm sem_perm;
3530   abi_ulong sem_otime;
3531 #if TARGET_ABI_BITS == 32
3532   abi_ulong __unused1;
3533 #endif
3534   abi_ulong sem_ctime;
3535 #if TARGET_ABI_BITS == 32
3536   abi_ulong __unused2;
3537 #endif
3538   abi_ulong sem_nsems;
3539   abi_ulong __unused3;
3540   abi_ulong __unused4;
3541 };
3542 #endif
3543 
3544 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3545                                                abi_ulong target_addr)
3546 {
3547     struct target_ipc_perm *target_ip;
3548     struct target_semid64_ds *target_sd;
3549 
3550     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3551         return -TARGET_EFAULT;
3552     target_ip = &(target_sd->sem_perm);
3553     host_ip->__key = tswap32(target_ip->__key);
3554     host_ip->uid = tswap32(target_ip->uid);
3555     host_ip->gid = tswap32(target_ip->gid);
3556     host_ip->cuid = tswap32(target_ip->cuid);
3557     host_ip->cgid = tswap32(target_ip->cgid);
3558 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3559     host_ip->mode = tswap32(target_ip->mode);
3560 #else
3561     host_ip->mode = tswap16(target_ip->mode);
3562 #endif
3563 #if defined(TARGET_PPC)
3564     host_ip->__seq = tswap32(target_ip->__seq);
3565 #else
3566     host_ip->__seq = tswap16(target_ip->__seq);
3567 #endif
3568     unlock_user_struct(target_sd, target_addr, 0);
3569     return 0;
3570 }
3571 
3572 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3573                                                struct ipc_perm *host_ip)
3574 {
3575     struct target_ipc_perm *target_ip;
3576     struct target_semid64_ds *target_sd;
3577 
3578     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3579         return -TARGET_EFAULT;
3580     target_ip = &(target_sd->sem_perm);
3581     target_ip->__key = tswap32(host_ip->__key);
3582     target_ip->uid = tswap32(host_ip->uid);
3583     target_ip->gid = tswap32(host_ip->gid);
3584     target_ip->cuid = tswap32(host_ip->cuid);
3585     target_ip->cgid = tswap32(host_ip->cgid);
3586 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3587     target_ip->mode = tswap32(host_ip->mode);
3588 #else
3589     target_ip->mode = tswap16(host_ip->mode);
3590 #endif
3591 #if defined(TARGET_PPC)
3592     target_ip->__seq = tswap32(host_ip->__seq);
3593 #else
3594     target_ip->__seq = tswap16(host_ip->__seq);
3595 #endif
3596     unlock_user_struct(target_sd, target_addr, 1);
3597     return 0;
3598 }
3599 
3600 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3601                                                abi_ulong target_addr)
3602 {
3603     struct target_semid64_ds *target_sd;
3604 
3605     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3606         return -TARGET_EFAULT;
3607     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3608         return -TARGET_EFAULT;
3609     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3610     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3611     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3612     unlock_user_struct(target_sd, target_addr, 0);
3613     return 0;
3614 }
3615 
3616 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3617                                                struct semid_ds *host_sd)
3618 {
3619     struct target_semid64_ds *target_sd;
3620 
3621     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3622         return -TARGET_EFAULT;
3623     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3624         return -TARGET_EFAULT;
3625     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3626     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3627     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3628     unlock_user_struct(target_sd, target_addr, 1);
3629     return 0;
3630 }
3631 
3632 struct target_seminfo {
3633     int semmap;
3634     int semmni;
3635     int semmns;
3636     int semmnu;
3637     int semmsl;
3638     int semopm;
3639     int semume;
3640     int semusz;
3641     int semvmx;
3642     int semaem;
3643 };
3644 
3645 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3646                                               struct seminfo *host_seminfo)
3647 {
3648     struct target_seminfo *target_seminfo;
3649     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3650         return -TARGET_EFAULT;
3651     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3652     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3653     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3654     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3655     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3656     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3657     __put_user(host_seminfo->semume, &target_seminfo->semume);
3658     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3659     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3660     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3661     unlock_user_struct(target_seminfo, target_addr, 1);
3662     return 0;
3663 }
3664 
3665 union semun {
3666 	int val;
3667 	struct semid_ds *buf;
3668 	unsigned short *array;
3669 	struct seminfo *__buf;
3670 };
3671 
3672 union target_semun {
3673 	int val;
3674 	abi_ulong buf;
3675 	abi_ulong array;
3676 	abi_ulong __buf;
3677 };
3678 
3679 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3680                                                abi_ulong target_addr)
3681 {
3682     int nsems;
3683     unsigned short *array;
3684     union semun semun;
3685     struct semid_ds semid_ds;
3686     int i, ret;
3687 
3688     semun.buf = &semid_ds;
3689 
3690     ret = semctl(semid, 0, IPC_STAT, semun);
3691     if (ret == -1)
3692         return get_errno(ret);
3693 
3694     nsems = semid_ds.sem_nsems;
3695 
3696     *host_array = g_try_new(unsigned short, nsems);
3697     if (!*host_array) {
3698         return -TARGET_ENOMEM;
3699     }
3700     array = lock_user(VERIFY_READ, target_addr,
3701                       nsems*sizeof(unsigned short), 1);
3702     if (!array) {
3703         g_free(*host_array);
3704         return -TARGET_EFAULT;
3705     }
3706 
3707     for(i=0; i<nsems; i++) {
3708         __get_user((*host_array)[i], &array[i]);
3709     }
3710     unlock_user(array, target_addr, 0);
3711 
3712     return 0;
3713 }
3714 
3715 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3716                                                unsigned short **host_array)
3717 {
3718     int nsems;
3719     unsigned short *array;
3720     union semun semun;
3721     struct semid_ds semid_ds;
3722     int i, ret;
3723 
3724     semun.buf = &semid_ds;
3725 
3726     ret = semctl(semid, 0, IPC_STAT, semun);
3727     if (ret == -1)
3728         return get_errno(ret);
3729 
3730     nsems = semid_ds.sem_nsems;
3731 
3732     array = lock_user(VERIFY_WRITE, target_addr,
3733                       nsems*sizeof(unsigned short), 0);
3734     if (!array)
3735         return -TARGET_EFAULT;
3736 
3737     for(i=0; i<nsems; i++) {
3738         __put_user((*host_array)[i], &array[i]);
3739     }
3740     g_free(*host_array);
3741     unlock_user(array, target_addr, 1);
3742 
3743     return 0;
3744 }
3745 
3746 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3747                                  abi_ulong target_arg)
3748 {
3749     union target_semun target_su = { .buf = target_arg };
3750     union semun arg;
3751     struct semid_ds dsarg;
3752     unsigned short *array = NULL;
3753     struct seminfo seminfo;
3754     abi_long ret = -TARGET_EINVAL;
3755     abi_long err;
3756     cmd &= 0xff;
3757 
3758     switch( cmd ) {
3759 	case GETVAL:
3760 	case SETVAL:
3761             /* In 64 bit cross-endian situations, we will erroneously pick up
3762              * the wrong half of the union for the "val" element.  To rectify
3763              * this, the entire 8-byte structure is byteswapped, followed by
3764 	     * a swap of the 4 byte val field. In other cases, the data is
3765 	     * already in proper host byte order. */
3766 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3767 		target_su.buf = tswapal(target_su.buf);
3768 		arg.val = tswap32(target_su.val);
3769 	    } else {
3770 		arg.val = target_su.val;
3771 	    }
3772             ret = get_errno(semctl(semid, semnum, cmd, arg));
3773             break;
3774 	case GETALL:
3775 	case SETALL:
3776             err = target_to_host_semarray(semid, &array, target_su.array);
3777             if (err)
3778                 return err;
3779             arg.array = array;
3780             ret = get_errno(semctl(semid, semnum, cmd, arg));
3781             err = host_to_target_semarray(semid, target_su.array, &array);
3782             if (err)
3783                 return err;
3784             break;
3785 	case IPC_STAT:
3786 	case IPC_SET:
3787 	case SEM_STAT:
3788             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3789             if (err)
3790                 return err;
3791             arg.buf = &dsarg;
3792             ret = get_errno(semctl(semid, semnum, cmd, arg));
3793             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3794             if (err)
3795                 return err;
3796             break;
3797 	case IPC_INFO:
3798 	case SEM_INFO:
3799             arg.__buf = &seminfo;
3800             ret = get_errno(semctl(semid, semnum, cmd, arg));
3801             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3802             if (err)
3803                 return err;
3804             break;
3805 	case IPC_RMID:
3806 	case GETPID:
3807 	case GETNCNT:
3808 	case GETZCNT:
3809             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3810             break;
3811     }
3812 
3813     return ret;
3814 }
3815 
3816 struct target_sembuf {
3817     unsigned short sem_num;
3818     short sem_op;
3819     short sem_flg;
3820 };
3821 
3822 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3823                                              abi_ulong target_addr,
3824                                              unsigned nsops)
3825 {
3826     struct target_sembuf *target_sembuf;
3827     int i;
3828 
3829     target_sembuf = lock_user(VERIFY_READ, target_addr,
3830                               nsops*sizeof(struct target_sembuf), 1);
3831     if (!target_sembuf)
3832         return -TARGET_EFAULT;
3833 
3834     for(i=0; i<nsops; i++) {
3835         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3836         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3837         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3838     }
3839 
3840     unlock_user(target_sembuf, target_addr, 0);
3841 
3842     return 0;
3843 }
3844 
3845 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3846 {
3847     struct sembuf sops[nsops];
3848     abi_long ret;
3849 
3850     if (target_to_host_sembuf(sops, ptr, nsops))
3851         return -TARGET_EFAULT;
3852 
3853     ret = -TARGET_ENOSYS;
3854 #ifdef __NR_semtimedop
3855     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3856 #endif
3857 #ifdef __NR_ipc
3858     if (ret == -TARGET_ENOSYS) {
3859         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3860     }
3861 #endif
3862     return ret;
3863 }
3864 
3865 struct target_msqid_ds
3866 {
3867     struct target_ipc_perm msg_perm;
3868     abi_ulong msg_stime;
3869 #if TARGET_ABI_BITS == 32
3870     abi_ulong __unused1;
3871 #endif
3872     abi_ulong msg_rtime;
3873 #if TARGET_ABI_BITS == 32
3874     abi_ulong __unused2;
3875 #endif
3876     abi_ulong msg_ctime;
3877 #if TARGET_ABI_BITS == 32
3878     abi_ulong __unused3;
3879 #endif
3880     abi_ulong __msg_cbytes;
3881     abi_ulong msg_qnum;
3882     abi_ulong msg_qbytes;
3883     abi_ulong msg_lspid;
3884     abi_ulong msg_lrpid;
3885     abi_ulong __unused4;
3886     abi_ulong __unused5;
3887 };
3888 
3889 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3890                                                abi_ulong target_addr)
3891 {
3892     struct target_msqid_ds *target_md;
3893 
3894     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3895         return -TARGET_EFAULT;
3896     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3897         return -TARGET_EFAULT;
3898     host_md->msg_stime = tswapal(target_md->msg_stime);
3899     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3900     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3901     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3902     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3903     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3904     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3905     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3906     unlock_user_struct(target_md, target_addr, 0);
3907     return 0;
3908 }
3909 
3910 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3911                                                struct msqid_ds *host_md)
3912 {
3913     struct target_msqid_ds *target_md;
3914 
3915     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3916         return -TARGET_EFAULT;
3917     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3918         return -TARGET_EFAULT;
3919     target_md->msg_stime = tswapal(host_md->msg_stime);
3920     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3921     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3922     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3923     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3924     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3925     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3926     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3927     unlock_user_struct(target_md, target_addr, 1);
3928     return 0;
3929 }
3930 
3931 struct target_msginfo {
3932     int msgpool;
3933     int msgmap;
3934     int msgmax;
3935     int msgmnb;
3936     int msgmni;
3937     int msgssz;
3938     int msgtql;
3939     unsigned short int msgseg;
3940 };
3941 
3942 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3943                                               struct msginfo *host_msginfo)
3944 {
3945     struct target_msginfo *target_msginfo;
3946     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3947         return -TARGET_EFAULT;
3948     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3949     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3950     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3951     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3952     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3953     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3954     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3955     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3956     unlock_user_struct(target_msginfo, target_addr, 1);
3957     return 0;
3958 }
3959 
3960 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3961 {
3962     struct msqid_ds dsarg;
3963     struct msginfo msginfo;
3964     abi_long ret = -TARGET_EINVAL;
3965 
3966     cmd &= 0xff;
3967 
3968     switch (cmd) {
3969     case IPC_STAT:
3970     case IPC_SET:
3971     case MSG_STAT:
3972         if (target_to_host_msqid_ds(&dsarg,ptr))
3973             return -TARGET_EFAULT;
3974         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3975         if (host_to_target_msqid_ds(ptr,&dsarg))
3976             return -TARGET_EFAULT;
3977         break;
3978     case IPC_RMID:
3979         ret = get_errno(msgctl(msgid, cmd, NULL));
3980         break;
3981     case IPC_INFO:
3982     case MSG_INFO:
3983         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3984         if (host_to_target_msginfo(ptr, &msginfo))
3985             return -TARGET_EFAULT;
3986         break;
3987     }
3988 
3989     return ret;
3990 }
3991 
3992 struct target_msgbuf {
3993     abi_long mtype;
3994     char	mtext[1];
3995 };
3996 
3997 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3998                                  ssize_t msgsz, int msgflg)
3999 {
4000     struct target_msgbuf *target_mb;
4001     struct msgbuf *host_mb;
4002     abi_long ret = 0;
4003 
4004     if (msgsz < 0) {
4005         return -TARGET_EINVAL;
4006     }
4007 
4008     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4009         return -TARGET_EFAULT;
4010     host_mb = g_try_malloc(msgsz + sizeof(long));
4011     if (!host_mb) {
4012         unlock_user_struct(target_mb, msgp, 0);
4013         return -TARGET_ENOMEM;
4014     }
4015     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4016     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4017     ret = -TARGET_ENOSYS;
4018 #ifdef __NR_msgsnd
4019     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4020 #endif
4021 #ifdef __NR_ipc
4022     if (ret == -TARGET_ENOSYS) {
4023         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4024                                  host_mb, 0));
4025     }
4026 #endif
4027     g_free(host_mb);
4028     unlock_user_struct(target_mb, msgp, 0);
4029 
4030     return ret;
4031 }
4032 
4033 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4034                                  ssize_t msgsz, abi_long msgtyp,
4035                                  int msgflg)
4036 {
4037     struct target_msgbuf *target_mb;
4038     char *target_mtext;
4039     struct msgbuf *host_mb;
4040     abi_long ret = 0;
4041 
4042     if (msgsz < 0) {
4043         return -TARGET_EINVAL;
4044     }
4045 
4046     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4047         return -TARGET_EFAULT;
4048 
4049     host_mb = g_try_malloc(msgsz + sizeof(long));
4050     if (!host_mb) {
4051         ret = -TARGET_ENOMEM;
4052         goto end;
4053     }
4054     ret = -TARGET_ENOSYS;
4055 #ifdef __NR_msgrcv
4056     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4057 #endif
4058 #ifdef __NR_ipc
4059     if (ret == -TARGET_ENOSYS) {
4060         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4061                         msgflg, host_mb, msgtyp));
4062     }
4063 #endif
4064 
4065     if (ret > 0) {
4066         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4067         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4068         if (!target_mtext) {
4069             ret = -TARGET_EFAULT;
4070             goto end;
4071         }
4072         memcpy(target_mb->mtext, host_mb->mtext, ret);
4073         unlock_user(target_mtext, target_mtext_addr, ret);
4074     }
4075 
4076     target_mb->mtype = tswapal(host_mb->mtype);
4077 
4078 end:
4079     if (target_mb)
4080         unlock_user_struct(target_mb, msgp, 1);
4081     g_free(host_mb);
4082     return ret;
4083 }
4084 
4085 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4086                                                abi_ulong target_addr)
4087 {
4088     struct target_shmid_ds *target_sd;
4089 
4090     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4091         return -TARGET_EFAULT;
4092     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4093         return -TARGET_EFAULT;
4094     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4095     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4096     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4097     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4098     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4099     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4100     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4101     unlock_user_struct(target_sd, target_addr, 0);
4102     return 0;
4103 }
4104 
4105 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4106                                                struct shmid_ds *host_sd)
4107 {
4108     struct target_shmid_ds *target_sd;
4109 
4110     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4111         return -TARGET_EFAULT;
4112     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4113         return -TARGET_EFAULT;
4114     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4115     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4116     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4117     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4118     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4119     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4120     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4121     unlock_user_struct(target_sd, target_addr, 1);
4122     return 0;
4123 }
4124 
4125 struct  target_shminfo {
4126     abi_ulong shmmax;
4127     abi_ulong shmmin;
4128     abi_ulong shmmni;
4129     abi_ulong shmseg;
4130     abi_ulong shmall;
4131 };
4132 
4133 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4134                                               struct shminfo *host_shminfo)
4135 {
4136     struct target_shminfo *target_shminfo;
4137     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4138         return -TARGET_EFAULT;
4139     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4140     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4141     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4142     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4143     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4144     unlock_user_struct(target_shminfo, target_addr, 1);
4145     return 0;
4146 }
4147 
4148 struct target_shm_info {
4149     int used_ids;
4150     abi_ulong shm_tot;
4151     abi_ulong shm_rss;
4152     abi_ulong shm_swp;
4153     abi_ulong swap_attempts;
4154     abi_ulong swap_successes;
4155 };
4156 
4157 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4158                                                struct shm_info *host_shm_info)
4159 {
4160     struct target_shm_info *target_shm_info;
4161     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4162         return -TARGET_EFAULT;
4163     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4164     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4165     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4166     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4167     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4168     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4169     unlock_user_struct(target_shm_info, target_addr, 1);
4170     return 0;
4171 }
4172 
4173 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4174 {
4175     struct shmid_ds dsarg;
4176     struct shminfo shminfo;
4177     struct shm_info shm_info;
4178     abi_long ret = -TARGET_EINVAL;
4179 
4180     cmd &= 0xff;
4181 
4182     switch(cmd) {
4183     case IPC_STAT:
4184     case IPC_SET:
4185     case SHM_STAT:
4186         if (target_to_host_shmid_ds(&dsarg, buf))
4187             return -TARGET_EFAULT;
4188         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4189         if (host_to_target_shmid_ds(buf, &dsarg))
4190             return -TARGET_EFAULT;
4191         break;
4192     case IPC_INFO:
4193         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4194         if (host_to_target_shminfo(buf, &shminfo))
4195             return -TARGET_EFAULT;
4196         break;
4197     case SHM_INFO:
4198         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4199         if (host_to_target_shm_info(buf, &shm_info))
4200             return -TARGET_EFAULT;
4201         break;
4202     case IPC_RMID:
4203     case SHM_LOCK:
4204     case SHM_UNLOCK:
4205         ret = get_errno(shmctl(shmid, cmd, NULL));
4206         break;
4207     }
4208 
4209     return ret;
4210 }
4211 
4212 #ifndef TARGET_FORCE_SHMLBA
4213 /* For most architectures, SHMLBA is the same as the page size;
4214  * some architectures have larger values, in which case they should
4215  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4216  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4217  * and defining its own value for SHMLBA.
4218  *
4219  * The kernel also permits SHMLBA to be set by the architecture to a
4220  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4221  * this means that addresses are rounded to the large size if
4222  * SHM_RND is set but addresses not aligned to that size are not rejected
4223  * as long as they are at least page-aligned. Since the only architecture
4224  * which uses this is ia64 this code doesn't provide for that oddity.
4225  */
4226 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4227 {
4228     return TARGET_PAGE_SIZE;
4229 }
4230 #endif
4231 
4232 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4233                                  int shmid, abi_ulong shmaddr, int shmflg)
4234 {
4235     abi_long raddr;
4236     void *host_raddr;
4237     struct shmid_ds shm_info;
4238     int i,ret;
4239     abi_ulong shmlba;
4240 
4241     /* find out the length of the shared memory segment */
4242     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4243     if (is_error(ret)) {
4244         /* can't get length, bail out */
4245         return ret;
4246     }
4247 
4248     shmlba = target_shmlba(cpu_env);
4249 
4250     if (shmaddr & (shmlba - 1)) {
4251         if (shmflg & SHM_RND) {
4252             shmaddr &= ~(shmlba - 1);
4253         } else {
4254             return -TARGET_EINVAL;
4255         }
4256     }
4257     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4258         return -TARGET_EINVAL;
4259     }
4260 
4261     mmap_lock();
4262 
4263     if (shmaddr)
4264         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4265     else {
4266         abi_ulong mmap_start;
4267 
4268         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4269         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4270 
4271         if (mmap_start == -1) {
4272             errno = ENOMEM;
4273             host_raddr = (void *)-1;
4274         } else
4275             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4276     }
4277 
4278     if (host_raddr == (void *)-1) {
4279         mmap_unlock();
4280         return get_errno((long)host_raddr);
4281     }
4282     raddr=h2g((unsigned long)host_raddr);
4283 
4284     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4285                    PAGE_VALID | PAGE_READ |
4286                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4287 
4288     for (i = 0; i < N_SHM_REGIONS; i++) {
4289         if (!shm_regions[i].in_use) {
4290             shm_regions[i].in_use = true;
4291             shm_regions[i].start = raddr;
4292             shm_regions[i].size = shm_info.shm_segsz;
4293             break;
4294         }
4295     }
4296 
4297     mmap_unlock();
4298     return raddr;
4299 
4300 }
4301 
4302 static inline abi_long do_shmdt(abi_ulong shmaddr)
4303 {
4304     int i;
4305     abi_long rv;
4306 
4307     mmap_lock();
4308 
4309     for (i = 0; i < N_SHM_REGIONS; ++i) {
4310         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4311             shm_regions[i].in_use = false;
4312             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4313             break;
4314         }
4315     }
4316     rv = get_errno(shmdt(g2h(shmaddr)));
4317 
4318     mmap_unlock();
4319 
4320     return rv;
4321 }
4322 
4323 #ifdef TARGET_NR_ipc
4324 /* ??? This only works with linear mappings.  */
4325 /* do_ipc() must return target values and target errnos. */
4326 static abi_long do_ipc(CPUArchState *cpu_env,
4327                        unsigned int call, abi_long first,
4328                        abi_long second, abi_long third,
4329                        abi_long ptr, abi_long fifth)
4330 {
4331     int version;
4332     abi_long ret = 0;
4333 
4334     version = call >> 16;
4335     call &= 0xffff;
4336 
4337     switch (call) {
4338     case IPCOP_semop:
4339         ret = do_semop(first, ptr, second);
4340         break;
4341 
4342     case IPCOP_semget:
4343         ret = get_errno(semget(first, second, third));
4344         break;
4345 
4346     case IPCOP_semctl: {
4347         /* The semun argument to semctl is passed by value, so dereference the
4348          * ptr argument. */
4349         abi_ulong atptr;
4350         get_user_ual(atptr, ptr);
4351         ret = do_semctl(first, second, third, atptr);
4352         break;
4353     }
4354 
4355     case IPCOP_msgget:
4356         ret = get_errno(msgget(first, second));
4357         break;
4358 
4359     case IPCOP_msgsnd:
4360         ret = do_msgsnd(first, ptr, second, third);
4361         break;
4362 
4363     case IPCOP_msgctl:
4364         ret = do_msgctl(first, second, ptr);
4365         break;
4366 
4367     case IPCOP_msgrcv:
4368         switch (version) {
4369         case 0:
4370             {
4371                 struct target_ipc_kludge {
4372                     abi_long msgp;
4373                     abi_long msgtyp;
4374                 } *tmp;
4375 
4376                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4377                     ret = -TARGET_EFAULT;
4378                     break;
4379                 }
4380 
4381                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4382 
4383                 unlock_user_struct(tmp, ptr, 0);
4384                 break;
4385             }
4386         default:
4387             ret = do_msgrcv(first, ptr, second, fifth, third);
4388         }
4389         break;
4390 
4391     case IPCOP_shmat:
4392         switch (version) {
4393         default:
4394         {
4395             abi_ulong raddr;
4396             raddr = do_shmat(cpu_env, first, ptr, second);
4397             if (is_error(raddr))
4398                 return get_errno(raddr);
4399             if (put_user_ual(raddr, third))
4400                 return -TARGET_EFAULT;
4401             break;
4402         }
4403         case 1:
4404             ret = -TARGET_EINVAL;
4405             break;
4406         }
4407 	break;
4408     case IPCOP_shmdt:
4409         ret = do_shmdt(ptr);
4410 	break;
4411 
4412     case IPCOP_shmget:
4413 	/* IPC_* flag values are the same on all linux platforms */
4414 	ret = get_errno(shmget(first, second, third));
4415 	break;
4416 
4417 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4418     case IPCOP_shmctl:
4419         ret = do_shmctl(first, second, ptr);
4420         break;
4421     default:
4422         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4423                       call, version);
4424 	ret = -TARGET_ENOSYS;
4425 	break;
4426     }
4427     return ret;
4428 }
4429 #endif
4430 
4431 /* kernel structure types definitions */
4432 
4433 #define STRUCT(name, ...) STRUCT_ ## name,
4434 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4435 enum {
4436 #include "syscall_types.h"
4437 STRUCT_MAX
4438 };
4439 #undef STRUCT
4440 #undef STRUCT_SPECIAL
4441 
4442 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4443 #define STRUCT_SPECIAL(name)
4444 #include "syscall_types.h"
4445 #undef STRUCT
4446 #undef STRUCT_SPECIAL
4447 
4448 typedef struct IOCTLEntry IOCTLEntry;
4449 
4450 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4451                              int fd, int cmd, abi_long arg);
4452 
4453 struct IOCTLEntry {
4454     int target_cmd;
4455     unsigned int host_cmd;
4456     const char *name;
4457     int access;
4458     do_ioctl_fn *do_ioctl;
4459     const argtype arg_type[5];
4460 };
4461 
4462 #define IOC_R 0x0001
4463 #define IOC_W 0x0002
4464 #define IOC_RW (IOC_R | IOC_W)
4465 
4466 #define MAX_STRUCT_SIZE 4096
4467 
4468 #ifdef CONFIG_FIEMAP
4469 /* So fiemap access checks don't overflow on 32 bit systems.
4470  * This is very slightly smaller than the limit imposed by
4471  * the underlying kernel.
4472  */
4473 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4474                             / sizeof(struct fiemap_extent))
4475 
4476 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4477                                        int fd, int cmd, abi_long arg)
4478 {
4479     /* The parameter for this ioctl is a struct fiemap followed
4480      * by an array of struct fiemap_extent whose size is set
4481      * in fiemap->fm_extent_count. The array is filled in by the
4482      * ioctl.
4483      */
4484     int target_size_in, target_size_out;
4485     struct fiemap *fm;
4486     const argtype *arg_type = ie->arg_type;
4487     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4488     void *argptr, *p;
4489     abi_long ret;
4490     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4491     uint32_t outbufsz;
4492     int free_fm = 0;
4493 
4494     assert(arg_type[0] == TYPE_PTR);
4495     assert(ie->access == IOC_RW);
4496     arg_type++;
4497     target_size_in = thunk_type_size(arg_type, 0);
4498     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4499     if (!argptr) {
4500         return -TARGET_EFAULT;
4501     }
4502     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4503     unlock_user(argptr, arg, 0);
4504     fm = (struct fiemap *)buf_temp;
4505     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4506         return -TARGET_EINVAL;
4507     }
4508 
4509     outbufsz = sizeof (*fm) +
4510         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4511 
4512     if (outbufsz > MAX_STRUCT_SIZE) {
4513         /* We can't fit all the extents into the fixed size buffer.
4514          * Allocate one that is large enough and use it instead.
4515          */
4516         fm = g_try_malloc(outbufsz);
4517         if (!fm) {
4518             return -TARGET_ENOMEM;
4519         }
4520         memcpy(fm, buf_temp, sizeof(struct fiemap));
4521         free_fm = 1;
4522     }
4523     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4524     if (!is_error(ret)) {
4525         target_size_out = target_size_in;
4526         /* An extent_count of 0 means we were only counting the extents
4527          * so there are no structs to copy
4528          */
4529         if (fm->fm_extent_count != 0) {
4530             target_size_out += fm->fm_mapped_extents * extent_size;
4531         }
4532         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4533         if (!argptr) {
4534             ret = -TARGET_EFAULT;
4535         } else {
4536             /* Convert the struct fiemap */
4537             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4538             if (fm->fm_extent_count != 0) {
4539                 p = argptr + target_size_in;
4540                 /* ...and then all the struct fiemap_extents */
4541                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4542                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4543                                   THUNK_TARGET);
4544                     p += extent_size;
4545                 }
4546             }
4547             unlock_user(argptr, arg, target_size_out);
4548         }
4549     }
4550     if (free_fm) {
4551         g_free(fm);
4552     }
4553     return ret;
4554 }
4555 #endif
4556 
4557 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4558                                 int fd, int cmd, abi_long arg)
4559 {
4560     const argtype *arg_type = ie->arg_type;
4561     int target_size;
4562     void *argptr;
4563     int ret;
4564     struct ifconf *host_ifconf;
4565     uint32_t outbufsz;
4566     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4567     int target_ifreq_size;
4568     int nb_ifreq;
4569     int free_buf = 0;
4570     int i;
4571     int target_ifc_len;
4572     abi_long target_ifc_buf;
4573     int host_ifc_len;
4574     char *host_ifc_buf;
4575 
4576     assert(arg_type[0] == TYPE_PTR);
4577     assert(ie->access == IOC_RW);
4578 
4579     arg_type++;
4580     target_size = thunk_type_size(arg_type, 0);
4581 
4582     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4583     if (!argptr)
4584         return -TARGET_EFAULT;
4585     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4586     unlock_user(argptr, arg, 0);
4587 
4588     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4589     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4590     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4591 
4592     if (target_ifc_buf != 0) {
4593         target_ifc_len = host_ifconf->ifc_len;
4594         nb_ifreq = target_ifc_len / target_ifreq_size;
4595         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4596 
4597         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4598         if (outbufsz > MAX_STRUCT_SIZE) {
4599             /*
4600              * We can't fit all the extents into the fixed size buffer.
4601              * Allocate one that is large enough and use it instead.
4602              */
4603             host_ifconf = malloc(outbufsz);
4604             if (!host_ifconf) {
4605                 return -TARGET_ENOMEM;
4606             }
4607             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4608             free_buf = 1;
4609         }
4610         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4611 
4612         host_ifconf->ifc_len = host_ifc_len;
4613     } else {
4614       host_ifc_buf = NULL;
4615     }
4616     host_ifconf->ifc_buf = host_ifc_buf;
4617 
4618     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4619     if (!is_error(ret)) {
4620 	/* convert host ifc_len to target ifc_len */
4621 
4622         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4623         target_ifc_len = nb_ifreq * target_ifreq_size;
4624         host_ifconf->ifc_len = target_ifc_len;
4625 
4626 	/* restore target ifc_buf */
4627 
4628         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4629 
4630 	/* copy struct ifconf to target user */
4631 
4632         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4633         if (!argptr)
4634             return -TARGET_EFAULT;
4635         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4636         unlock_user(argptr, arg, target_size);
4637 
4638         if (target_ifc_buf != 0) {
4639             /* copy ifreq[] to target user */
4640             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4641             for (i = 0; i < nb_ifreq ; i++) {
4642                 thunk_convert(argptr + i * target_ifreq_size,
4643                               host_ifc_buf + i * sizeof(struct ifreq),
4644                               ifreq_arg_type, THUNK_TARGET);
4645             }
4646             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4647         }
4648     }
4649 
4650     if (free_buf) {
4651         free(host_ifconf);
4652     }
4653 
4654     return ret;
4655 }
4656 
4657 #if defined(CONFIG_USBFS)
4658 #if HOST_LONG_BITS > 64
4659 #error USBDEVFS thunks do not support >64 bit hosts yet.
4660 #endif
4661 struct live_urb {
4662     uint64_t target_urb_adr;
4663     uint64_t target_buf_adr;
4664     char *target_buf_ptr;
4665     struct usbdevfs_urb host_urb;
4666 };
4667 
4668 static GHashTable *usbdevfs_urb_hashtable(void)
4669 {
4670     static GHashTable *urb_hashtable;
4671 
4672     if (!urb_hashtable) {
4673         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4674     }
4675     return urb_hashtable;
4676 }
4677 
4678 static void urb_hashtable_insert(struct live_urb *urb)
4679 {
4680     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4681     g_hash_table_insert(urb_hashtable, urb, urb);
4682 }
4683 
4684 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4685 {
4686     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4687     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4688 }
4689 
4690 static void urb_hashtable_remove(struct live_urb *urb)
4691 {
4692     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4693     g_hash_table_remove(urb_hashtable, urb);
4694 }
4695 
4696 static abi_long
4697 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4698                           int fd, int cmd, abi_long arg)
4699 {
4700     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4701     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4702     struct live_urb *lurb;
4703     void *argptr;
4704     uint64_t hurb;
4705     int target_size;
4706     uintptr_t target_urb_adr;
4707     abi_long ret;
4708 
4709     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4710 
4711     memset(buf_temp, 0, sizeof(uint64_t));
4712     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4713     if (is_error(ret)) {
4714         return ret;
4715     }
4716 
4717     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4718     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4719     if (!lurb->target_urb_adr) {
4720         return -TARGET_EFAULT;
4721     }
4722     urb_hashtable_remove(lurb);
4723     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4724         lurb->host_urb.buffer_length);
4725     lurb->target_buf_ptr = NULL;
4726 
4727     /* restore the guest buffer pointer */
4728     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4729 
4730     /* update the guest urb struct */
4731     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4732     if (!argptr) {
4733         g_free(lurb);
4734         return -TARGET_EFAULT;
4735     }
4736     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4737     unlock_user(argptr, lurb->target_urb_adr, target_size);
4738 
4739     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4740     /* write back the urb handle */
4741     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4742     if (!argptr) {
4743         g_free(lurb);
4744         return -TARGET_EFAULT;
4745     }
4746 
4747     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4748     target_urb_adr = lurb->target_urb_adr;
4749     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4750     unlock_user(argptr, arg, target_size);
4751 
4752     g_free(lurb);
4753     return ret;
4754 }
4755 
4756 static abi_long
4757 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4758                              uint8_t *buf_temp __attribute__((unused)),
4759                              int fd, int cmd, abi_long arg)
4760 {
4761     struct live_urb *lurb;
4762 
4763     /* map target address back to host URB with metadata. */
4764     lurb = urb_hashtable_lookup(arg);
4765     if (!lurb) {
4766         return -TARGET_EFAULT;
4767     }
4768     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4769 }
4770 
4771 static abi_long
4772 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4773                             int fd, int cmd, abi_long arg)
4774 {
4775     const argtype *arg_type = ie->arg_type;
4776     int target_size;
4777     abi_long ret;
4778     void *argptr;
4779     int rw_dir;
4780     struct live_urb *lurb;
4781 
4782     /*
4783      * each submitted URB needs to map to a unique ID for the
4784      * kernel, and that unique ID needs to be a pointer to
4785      * host memory.  hence, we need to malloc for each URB.
4786      * isochronous transfers have a variable length struct.
4787      */
4788     arg_type++;
4789     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4790 
4791     /* construct host copy of urb and metadata */
4792     lurb = g_try_malloc0(sizeof(struct live_urb));
4793     if (!lurb) {
4794         return -TARGET_ENOMEM;
4795     }
4796 
4797     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4798     if (!argptr) {
4799         g_free(lurb);
4800         return -TARGET_EFAULT;
4801     }
4802     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4803     unlock_user(argptr, arg, 0);
4804 
4805     lurb->target_urb_adr = arg;
4806     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4807 
4808     /* buffer space used depends on endpoint type so lock the entire buffer */
4809     /* control type urbs should check the buffer contents for true direction */
4810     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4811     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4812         lurb->host_urb.buffer_length, 1);
4813     if (lurb->target_buf_ptr == NULL) {
4814         g_free(lurb);
4815         return -TARGET_EFAULT;
4816     }
4817 
4818     /* update buffer pointer in host copy */
4819     lurb->host_urb.buffer = lurb->target_buf_ptr;
4820 
4821     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4822     if (is_error(ret)) {
4823         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4824         g_free(lurb);
4825     } else {
4826         urb_hashtable_insert(lurb);
4827     }
4828 
4829     return ret;
4830 }
4831 #endif /* CONFIG_USBFS */
4832 
4833 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4834                             int cmd, abi_long arg)
4835 {
4836     void *argptr;
4837     struct dm_ioctl *host_dm;
4838     abi_long guest_data;
4839     uint32_t guest_data_size;
4840     int target_size;
4841     const argtype *arg_type = ie->arg_type;
4842     abi_long ret;
4843     void *big_buf = NULL;
4844     char *host_data;
4845 
4846     arg_type++;
4847     target_size = thunk_type_size(arg_type, 0);
4848     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4849     if (!argptr) {
4850         ret = -TARGET_EFAULT;
4851         goto out;
4852     }
4853     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4854     unlock_user(argptr, arg, 0);
4855 
4856     /* buf_temp is too small, so fetch things into a bigger buffer */
4857     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4858     memcpy(big_buf, buf_temp, target_size);
4859     buf_temp = big_buf;
4860     host_dm = big_buf;
4861 
4862     guest_data = arg + host_dm->data_start;
4863     if ((guest_data - arg) < 0) {
4864         ret = -TARGET_EINVAL;
4865         goto out;
4866     }
4867     guest_data_size = host_dm->data_size - host_dm->data_start;
4868     host_data = (char*)host_dm + host_dm->data_start;
4869 
4870     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4871     if (!argptr) {
4872         ret = -TARGET_EFAULT;
4873         goto out;
4874     }
4875 
4876     switch (ie->host_cmd) {
4877     case DM_REMOVE_ALL:
4878     case DM_LIST_DEVICES:
4879     case DM_DEV_CREATE:
4880     case DM_DEV_REMOVE:
4881     case DM_DEV_SUSPEND:
4882     case DM_DEV_STATUS:
4883     case DM_DEV_WAIT:
4884     case DM_TABLE_STATUS:
4885     case DM_TABLE_CLEAR:
4886     case DM_TABLE_DEPS:
4887     case DM_LIST_VERSIONS:
4888         /* no input data */
4889         break;
4890     case DM_DEV_RENAME:
4891     case DM_DEV_SET_GEOMETRY:
4892         /* data contains only strings */
4893         memcpy(host_data, argptr, guest_data_size);
4894         break;
4895     case DM_TARGET_MSG:
4896         memcpy(host_data, argptr, guest_data_size);
4897         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4898         break;
4899     case DM_TABLE_LOAD:
4900     {
4901         void *gspec = argptr;
4902         void *cur_data = host_data;
4903         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4904         int spec_size = thunk_type_size(arg_type, 0);
4905         int i;
4906 
4907         for (i = 0; i < host_dm->target_count; i++) {
4908             struct dm_target_spec *spec = cur_data;
4909             uint32_t next;
4910             int slen;
4911 
4912             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4913             slen = strlen((char*)gspec + spec_size) + 1;
4914             next = spec->next;
4915             spec->next = sizeof(*spec) + slen;
4916             strcpy((char*)&spec[1], gspec + spec_size);
4917             gspec += next;
4918             cur_data += spec->next;
4919         }
4920         break;
4921     }
4922     default:
4923         ret = -TARGET_EINVAL;
4924         unlock_user(argptr, guest_data, 0);
4925         goto out;
4926     }
4927     unlock_user(argptr, guest_data, 0);
4928 
4929     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4930     if (!is_error(ret)) {
4931         guest_data = arg + host_dm->data_start;
4932         guest_data_size = host_dm->data_size - host_dm->data_start;
4933         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4934         switch (ie->host_cmd) {
4935         case DM_REMOVE_ALL:
4936         case DM_DEV_CREATE:
4937         case DM_DEV_REMOVE:
4938         case DM_DEV_RENAME:
4939         case DM_DEV_SUSPEND:
4940         case DM_DEV_STATUS:
4941         case DM_TABLE_LOAD:
4942         case DM_TABLE_CLEAR:
4943         case DM_TARGET_MSG:
4944         case DM_DEV_SET_GEOMETRY:
4945             /* no return data */
4946             break;
4947         case DM_LIST_DEVICES:
4948         {
4949             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4950             uint32_t remaining_data = guest_data_size;
4951             void *cur_data = argptr;
4952             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4953             int nl_size = 12; /* can't use thunk_size due to alignment */
4954 
4955             while (1) {
4956                 uint32_t next = nl->next;
4957                 if (next) {
4958                     nl->next = nl_size + (strlen(nl->name) + 1);
4959                 }
4960                 if (remaining_data < nl->next) {
4961                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4962                     break;
4963                 }
4964                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4965                 strcpy(cur_data + nl_size, nl->name);
4966                 cur_data += nl->next;
4967                 remaining_data -= nl->next;
4968                 if (!next) {
4969                     break;
4970                 }
4971                 nl = (void*)nl + next;
4972             }
4973             break;
4974         }
4975         case DM_DEV_WAIT:
4976         case DM_TABLE_STATUS:
4977         {
4978             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4979             void *cur_data = argptr;
4980             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4981             int spec_size = thunk_type_size(arg_type, 0);
4982             int i;
4983 
4984             for (i = 0; i < host_dm->target_count; i++) {
4985                 uint32_t next = spec->next;
4986                 int slen = strlen((char*)&spec[1]) + 1;
4987                 spec->next = (cur_data - argptr) + spec_size + slen;
4988                 if (guest_data_size < spec->next) {
4989                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4990                     break;
4991                 }
4992                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4993                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4994                 cur_data = argptr + spec->next;
4995                 spec = (void*)host_dm + host_dm->data_start + next;
4996             }
4997             break;
4998         }
4999         case DM_TABLE_DEPS:
5000         {
5001             void *hdata = (void*)host_dm + host_dm->data_start;
5002             int count = *(uint32_t*)hdata;
5003             uint64_t *hdev = hdata + 8;
5004             uint64_t *gdev = argptr + 8;
5005             int i;
5006 
5007             *(uint32_t*)argptr = tswap32(count);
5008             for (i = 0; i < count; i++) {
5009                 *gdev = tswap64(*hdev);
5010                 gdev++;
5011                 hdev++;
5012             }
5013             break;
5014         }
5015         case DM_LIST_VERSIONS:
5016         {
5017             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5018             uint32_t remaining_data = guest_data_size;
5019             void *cur_data = argptr;
5020             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5021             int vers_size = thunk_type_size(arg_type, 0);
5022 
5023             while (1) {
5024                 uint32_t next = vers->next;
5025                 if (next) {
5026                     vers->next = vers_size + (strlen(vers->name) + 1);
5027                 }
5028                 if (remaining_data < vers->next) {
5029                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5030                     break;
5031                 }
5032                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5033                 strcpy(cur_data + vers_size, vers->name);
5034                 cur_data += vers->next;
5035                 remaining_data -= vers->next;
5036                 if (!next) {
5037                     break;
5038                 }
5039                 vers = (void*)vers + next;
5040             }
5041             break;
5042         }
5043         default:
5044             unlock_user(argptr, guest_data, 0);
5045             ret = -TARGET_EINVAL;
5046             goto out;
5047         }
5048         unlock_user(argptr, guest_data, guest_data_size);
5049 
5050         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5051         if (!argptr) {
5052             ret = -TARGET_EFAULT;
5053             goto out;
5054         }
5055         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5056         unlock_user(argptr, arg, target_size);
5057     }
5058 out:
5059     g_free(big_buf);
5060     return ret;
5061 }
5062 
5063 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5064                                int cmd, abi_long arg)
5065 {
5066     void *argptr;
5067     int target_size;
5068     const argtype *arg_type = ie->arg_type;
5069     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5070     abi_long ret;
5071 
5072     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5073     struct blkpg_partition host_part;
5074 
5075     /* Read and convert blkpg */
5076     arg_type++;
5077     target_size = thunk_type_size(arg_type, 0);
5078     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5079     if (!argptr) {
5080         ret = -TARGET_EFAULT;
5081         goto out;
5082     }
5083     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5084     unlock_user(argptr, arg, 0);
5085 
5086     switch (host_blkpg->op) {
5087     case BLKPG_ADD_PARTITION:
5088     case BLKPG_DEL_PARTITION:
5089         /* payload is struct blkpg_partition */
5090         break;
5091     default:
5092         /* Unknown opcode */
5093         ret = -TARGET_EINVAL;
5094         goto out;
5095     }
5096 
5097     /* Read and convert blkpg->data */
5098     arg = (abi_long)(uintptr_t)host_blkpg->data;
5099     target_size = thunk_type_size(part_arg_type, 0);
5100     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5101     if (!argptr) {
5102         ret = -TARGET_EFAULT;
5103         goto out;
5104     }
5105     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5106     unlock_user(argptr, arg, 0);
5107 
5108     /* Swizzle the data pointer to our local copy and call! */
5109     host_blkpg->data = &host_part;
5110     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5111 
5112 out:
5113     return ret;
5114 }
5115 
5116 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5117                                 int fd, int cmd, abi_long arg)
5118 {
5119     const argtype *arg_type = ie->arg_type;
5120     const StructEntry *se;
5121     const argtype *field_types;
5122     const int *dst_offsets, *src_offsets;
5123     int target_size;
5124     void *argptr;
5125     abi_ulong *target_rt_dev_ptr = NULL;
5126     unsigned long *host_rt_dev_ptr = NULL;
5127     abi_long ret;
5128     int i;
5129 
5130     assert(ie->access == IOC_W);
5131     assert(*arg_type == TYPE_PTR);
5132     arg_type++;
5133     assert(*arg_type == TYPE_STRUCT);
5134     target_size = thunk_type_size(arg_type, 0);
5135     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136     if (!argptr) {
5137         return -TARGET_EFAULT;
5138     }
5139     arg_type++;
5140     assert(*arg_type == (int)STRUCT_rtentry);
5141     se = struct_entries + *arg_type++;
5142     assert(se->convert[0] == NULL);
5143     /* convert struct here to be able to catch rt_dev string */
5144     field_types = se->field_types;
5145     dst_offsets = se->field_offsets[THUNK_HOST];
5146     src_offsets = se->field_offsets[THUNK_TARGET];
5147     for (i = 0; i < se->nb_fields; i++) {
5148         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5149             assert(*field_types == TYPE_PTRVOID);
5150             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5151             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5152             if (*target_rt_dev_ptr != 0) {
5153                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5154                                                   tswapal(*target_rt_dev_ptr));
5155                 if (!*host_rt_dev_ptr) {
5156                     unlock_user(argptr, arg, 0);
5157                     return -TARGET_EFAULT;
5158                 }
5159             } else {
5160                 *host_rt_dev_ptr = 0;
5161             }
5162             field_types++;
5163             continue;
5164         }
5165         field_types = thunk_convert(buf_temp + dst_offsets[i],
5166                                     argptr + src_offsets[i],
5167                                     field_types, THUNK_HOST);
5168     }
5169     unlock_user(argptr, arg, 0);
5170 
5171     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5172 
5173     assert(host_rt_dev_ptr != NULL);
5174     assert(target_rt_dev_ptr != NULL);
5175     if (*host_rt_dev_ptr != 0) {
5176         unlock_user((void *)*host_rt_dev_ptr,
5177                     *target_rt_dev_ptr, 0);
5178     }
5179     return ret;
5180 }
5181 
5182 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5183                                      int fd, int cmd, abi_long arg)
5184 {
5185     int sig = target_to_host_signal(arg);
5186     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5187 }
5188 
5189 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5190                                     int fd, int cmd, abi_long arg)
5191 {
5192     struct timeval tv;
5193     abi_long ret;
5194 
5195     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5196     if (is_error(ret)) {
5197         return ret;
5198     }
5199 
5200     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5201         if (copy_to_user_timeval(arg, &tv)) {
5202             return -TARGET_EFAULT;
5203         }
5204     } else {
5205         if (copy_to_user_timeval64(arg, &tv)) {
5206             return -TARGET_EFAULT;
5207         }
5208     }
5209 
5210     return ret;
5211 }
5212 
5213 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5214                                       int fd, int cmd, abi_long arg)
5215 {
5216     struct timespec ts;
5217     abi_long ret;
5218 
5219     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5220     if (is_error(ret)) {
5221         return ret;
5222     }
5223 
5224     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5225         if (host_to_target_timespec(arg, &ts)) {
5226             return -TARGET_EFAULT;
5227         }
5228     } else{
5229         if (host_to_target_timespec64(arg, &ts)) {
5230             return -TARGET_EFAULT;
5231         }
5232     }
5233 
5234     return ret;
5235 }
5236 
5237 #ifdef TIOCGPTPEER
5238 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5239                                      int fd, int cmd, abi_long arg)
5240 {
5241     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5242     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5243 }
5244 #endif
5245 
5246 static IOCTLEntry ioctl_entries[] = {
5247 #define IOCTL(cmd, access, ...) \
5248     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5249 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5250     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5251 #define IOCTL_IGNORE(cmd) \
5252     { TARGET_ ## cmd, 0, #cmd },
5253 #include "ioctls.h"
5254     { 0, 0, },
5255 };
5256 
5257 /* ??? Implement proper locking for ioctls.  */
5258 /* do_ioctl() Must return target values and target errnos. */
5259 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5260 {
5261     const IOCTLEntry *ie;
5262     const argtype *arg_type;
5263     abi_long ret;
5264     uint8_t buf_temp[MAX_STRUCT_SIZE];
5265     int target_size;
5266     void *argptr;
5267 
5268     ie = ioctl_entries;
5269     for(;;) {
5270         if (ie->target_cmd == 0) {
5271             qemu_log_mask(
5272                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5273             return -TARGET_ENOSYS;
5274         }
5275         if (ie->target_cmd == cmd)
5276             break;
5277         ie++;
5278     }
5279     arg_type = ie->arg_type;
5280     if (ie->do_ioctl) {
5281         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5282     } else if (!ie->host_cmd) {
5283         /* Some architectures define BSD ioctls in their headers
5284            that are not implemented in Linux.  */
5285         return -TARGET_ENOSYS;
5286     }
5287 
5288     switch(arg_type[0]) {
5289     case TYPE_NULL:
5290         /* no argument */
5291         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5292         break;
5293     case TYPE_PTRVOID:
5294     case TYPE_INT:
5295     case TYPE_LONG:
5296     case TYPE_ULONG:
5297         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5298         break;
5299     case TYPE_PTR:
5300         arg_type++;
5301         target_size = thunk_type_size(arg_type, 0);
5302         switch(ie->access) {
5303         case IOC_R:
5304             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5305             if (!is_error(ret)) {
5306                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5307                 if (!argptr)
5308                     return -TARGET_EFAULT;
5309                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5310                 unlock_user(argptr, arg, target_size);
5311             }
5312             break;
5313         case IOC_W:
5314             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5315             if (!argptr)
5316                 return -TARGET_EFAULT;
5317             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5318             unlock_user(argptr, arg, 0);
5319             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5320             break;
5321         default:
5322         case IOC_RW:
5323             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5324             if (!argptr)
5325                 return -TARGET_EFAULT;
5326             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5327             unlock_user(argptr, arg, 0);
5328             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5329             if (!is_error(ret)) {
5330                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5331                 if (!argptr)
5332                     return -TARGET_EFAULT;
5333                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5334                 unlock_user(argptr, arg, target_size);
5335             }
5336             break;
5337         }
5338         break;
5339     default:
5340         qemu_log_mask(LOG_UNIMP,
5341                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5342                       (long)cmd, arg_type[0]);
5343         ret = -TARGET_ENOSYS;
5344         break;
5345     }
5346     return ret;
5347 }
5348 
5349 static const bitmask_transtbl iflag_tbl[] = {
5350         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5351         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5352         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5353         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5354         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5355         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5356         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5357         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5358         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5359         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5360         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5361         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5362         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5363         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5364         { 0, 0, 0, 0 }
5365 };
5366 
5367 static const bitmask_transtbl oflag_tbl[] = {
5368 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5369 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5370 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5371 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5372 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5373 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5374 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5375 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5376 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5377 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5378 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5379 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5380 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5381 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5382 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5383 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5384 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5385 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5386 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5387 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5388 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5389 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5390 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5391 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5392 	{ 0, 0, 0, 0 }
5393 };
5394 
5395 static const bitmask_transtbl cflag_tbl[] = {
5396 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5397 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5398 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5399 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5400 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5401 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5402 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5403 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5404 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5405 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5406 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5407 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5408 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5409 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5410 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5411 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5412 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5413 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5414 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5415 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5416 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5417 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5418 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5419 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5420 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5421 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5422 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5423 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5424 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5425 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5426 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5427 	{ 0, 0, 0, 0 }
5428 };
5429 
5430 static const bitmask_transtbl lflag_tbl[] = {
5431 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5432 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5433 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5434 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5435 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5436 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5437 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5438 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5439 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5440 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5441 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5442 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5443 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5444 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5445 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5446 	{ 0, 0, 0, 0 }
5447 };
5448 
5449 static void target_to_host_termios (void *dst, const void *src)
5450 {
5451     struct host_termios *host = dst;
5452     const struct target_termios *target = src;
5453 
5454     host->c_iflag =
5455         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5456     host->c_oflag =
5457         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5458     host->c_cflag =
5459         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5460     host->c_lflag =
5461         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5462     host->c_line = target->c_line;
5463 
5464     memset(host->c_cc, 0, sizeof(host->c_cc));
5465     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5466     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5467     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5468     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5469     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5470     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5471     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5472     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5473     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5474     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5475     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5476     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5477     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5478     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5479     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5480     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5481     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5482 }
5483 
5484 static void host_to_target_termios (void *dst, const void *src)
5485 {
5486     struct target_termios *target = dst;
5487     const struct host_termios *host = src;
5488 
5489     target->c_iflag =
5490         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5491     target->c_oflag =
5492         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5493     target->c_cflag =
5494         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5495     target->c_lflag =
5496         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5497     target->c_line = host->c_line;
5498 
5499     memset(target->c_cc, 0, sizeof(target->c_cc));
5500     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5501     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5502     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5503     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5504     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5505     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5506     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5507     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5508     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5509     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5510     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5511     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5512     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5513     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5514     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5515     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5516     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5517 }
5518 
5519 static const StructEntry struct_termios_def = {
5520     .convert = { host_to_target_termios, target_to_host_termios },
5521     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5522     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5523 };
5524 
5525 static bitmask_transtbl mmap_flags_tbl[] = {
5526     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5527     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5528     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5529     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5530       MAP_ANONYMOUS, MAP_ANONYMOUS },
5531     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5532       MAP_GROWSDOWN, MAP_GROWSDOWN },
5533     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5534       MAP_DENYWRITE, MAP_DENYWRITE },
5535     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5536       MAP_EXECUTABLE, MAP_EXECUTABLE },
5537     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5538     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5539       MAP_NORESERVE, MAP_NORESERVE },
5540     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5541     /* MAP_STACK had been ignored by the kernel for quite some time.
5542        Recognize it for the target insofar as we do not want to pass
5543        it through to the host.  */
5544     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5545     { 0, 0, 0, 0 }
5546 };
5547 
5548 #if defined(TARGET_I386)
5549 
5550 /* NOTE: there is really one LDT for all the threads */
5551 static uint8_t *ldt_table;
5552 
5553 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5554 {
5555     int size;
5556     void *p;
5557 
5558     if (!ldt_table)
5559         return 0;
5560     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5561     if (size > bytecount)
5562         size = bytecount;
5563     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5564     if (!p)
5565         return -TARGET_EFAULT;
5566     /* ??? Should this by byteswapped?  */
5567     memcpy(p, ldt_table, size);
5568     unlock_user(p, ptr, size);
5569     return size;
5570 }
5571 
5572 /* XXX: add locking support */
5573 static abi_long write_ldt(CPUX86State *env,
5574                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5575 {
5576     struct target_modify_ldt_ldt_s ldt_info;
5577     struct target_modify_ldt_ldt_s *target_ldt_info;
5578     int seg_32bit, contents, read_exec_only, limit_in_pages;
5579     int seg_not_present, useable, lm;
5580     uint32_t *lp, entry_1, entry_2;
5581 
5582     if (bytecount != sizeof(ldt_info))
5583         return -TARGET_EINVAL;
5584     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5585         return -TARGET_EFAULT;
5586     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5587     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5588     ldt_info.limit = tswap32(target_ldt_info->limit);
5589     ldt_info.flags = tswap32(target_ldt_info->flags);
5590     unlock_user_struct(target_ldt_info, ptr, 0);
5591 
5592     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5593         return -TARGET_EINVAL;
5594     seg_32bit = ldt_info.flags & 1;
5595     contents = (ldt_info.flags >> 1) & 3;
5596     read_exec_only = (ldt_info.flags >> 3) & 1;
5597     limit_in_pages = (ldt_info.flags >> 4) & 1;
5598     seg_not_present = (ldt_info.flags >> 5) & 1;
5599     useable = (ldt_info.flags >> 6) & 1;
5600 #ifdef TARGET_ABI32
5601     lm = 0;
5602 #else
5603     lm = (ldt_info.flags >> 7) & 1;
5604 #endif
5605     if (contents == 3) {
5606         if (oldmode)
5607             return -TARGET_EINVAL;
5608         if (seg_not_present == 0)
5609             return -TARGET_EINVAL;
5610     }
5611     /* allocate the LDT */
5612     if (!ldt_table) {
5613         env->ldt.base = target_mmap(0,
5614                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5615                                     PROT_READ|PROT_WRITE,
5616                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5617         if (env->ldt.base == -1)
5618             return -TARGET_ENOMEM;
5619         memset(g2h(env->ldt.base), 0,
5620                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5621         env->ldt.limit = 0xffff;
5622         ldt_table = g2h(env->ldt.base);
5623     }
5624 
5625     /* NOTE: same code as Linux kernel */
5626     /* Allow LDTs to be cleared by the user. */
5627     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5628         if (oldmode ||
5629             (contents == 0		&&
5630              read_exec_only == 1	&&
5631              seg_32bit == 0		&&
5632              limit_in_pages == 0	&&
5633              seg_not_present == 1	&&
5634              useable == 0 )) {
5635             entry_1 = 0;
5636             entry_2 = 0;
5637             goto install;
5638         }
5639     }
5640 
5641     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5642         (ldt_info.limit & 0x0ffff);
5643     entry_2 = (ldt_info.base_addr & 0xff000000) |
5644         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5645         (ldt_info.limit & 0xf0000) |
5646         ((read_exec_only ^ 1) << 9) |
5647         (contents << 10) |
5648         ((seg_not_present ^ 1) << 15) |
5649         (seg_32bit << 22) |
5650         (limit_in_pages << 23) |
5651         (lm << 21) |
5652         0x7000;
5653     if (!oldmode)
5654         entry_2 |= (useable << 20);
5655 
5656     /* Install the new entry ...  */
5657 install:
5658     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5659     lp[0] = tswap32(entry_1);
5660     lp[1] = tswap32(entry_2);
5661     return 0;
5662 }
5663 
5664 /* specific and weird i386 syscalls */
5665 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5666                               unsigned long bytecount)
5667 {
5668     abi_long ret;
5669 
5670     switch (func) {
5671     case 0:
5672         ret = read_ldt(ptr, bytecount);
5673         break;
5674     case 1:
5675         ret = write_ldt(env, ptr, bytecount, 1);
5676         break;
5677     case 0x11:
5678         ret = write_ldt(env, ptr, bytecount, 0);
5679         break;
5680     default:
5681         ret = -TARGET_ENOSYS;
5682         break;
5683     }
5684     return ret;
5685 }
5686 
5687 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5688 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5689 {
5690     uint64_t *gdt_table = g2h(env->gdt.base);
5691     struct target_modify_ldt_ldt_s ldt_info;
5692     struct target_modify_ldt_ldt_s *target_ldt_info;
5693     int seg_32bit, contents, read_exec_only, limit_in_pages;
5694     int seg_not_present, useable, lm;
5695     uint32_t *lp, entry_1, entry_2;
5696     int i;
5697 
5698     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5699     if (!target_ldt_info)
5700         return -TARGET_EFAULT;
5701     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5702     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5703     ldt_info.limit = tswap32(target_ldt_info->limit);
5704     ldt_info.flags = tswap32(target_ldt_info->flags);
5705     if (ldt_info.entry_number == -1) {
5706         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5707             if (gdt_table[i] == 0) {
5708                 ldt_info.entry_number = i;
5709                 target_ldt_info->entry_number = tswap32(i);
5710                 break;
5711             }
5712         }
5713     }
5714     unlock_user_struct(target_ldt_info, ptr, 1);
5715 
5716     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5717         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5718            return -TARGET_EINVAL;
5719     seg_32bit = ldt_info.flags & 1;
5720     contents = (ldt_info.flags >> 1) & 3;
5721     read_exec_only = (ldt_info.flags >> 3) & 1;
5722     limit_in_pages = (ldt_info.flags >> 4) & 1;
5723     seg_not_present = (ldt_info.flags >> 5) & 1;
5724     useable = (ldt_info.flags >> 6) & 1;
5725 #ifdef TARGET_ABI32
5726     lm = 0;
5727 #else
5728     lm = (ldt_info.flags >> 7) & 1;
5729 #endif
5730 
5731     if (contents == 3) {
5732         if (seg_not_present == 0)
5733             return -TARGET_EINVAL;
5734     }
5735 
5736     /* NOTE: same code as Linux kernel */
5737     /* Allow LDTs to be cleared by the user. */
5738     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5739         if ((contents == 0             &&
5740              read_exec_only == 1       &&
5741              seg_32bit == 0            &&
5742              limit_in_pages == 0       &&
5743              seg_not_present == 1      &&
5744              useable == 0 )) {
5745             entry_1 = 0;
5746             entry_2 = 0;
5747             goto install;
5748         }
5749     }
5750 
5751     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5752         (ldt_info.limit & 0x0ffff);
5753     entry_2 = (ldt_info.base_addr & 0xff000000) |
5754         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5755         (ldt_info.limit & 0xf0000) |
5756         ((read_exec_only ^ 1) << 9) |
5757         (contents << 10) |
5758         ((seg_not_present ^ 1) << 15) |
5759         (seg_32bit << 22) |
5760         (limit_in_pages << 23) |
5761         (useable << 20) |
5762         (lm << 21) |
5763         0x7000;
5764 
5765     /* Install the new entry ...  */
5766 install:
5767     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5768     lp[0] = tswap32(entry_1);
5769     lp[1] = tswap32(entry_2);
5770     return 0;
5771 }
5772 
5773 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5774 {
5775     struct target_modify_ldt_ldt_s *target_ldt_info;
5776     uint64_t *gdt_table = g2h(env->gdt.base);
5777     uint32_t base_addr, limit, flags;
5778     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5779     int seg_not_present, useable, lm;
5780     uint32_t *lp, entry_1, entry_2;
5781 
5782     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5783     if (!target_ldt_info)
5784         return -TARGET_EFAULT;
5785     idx = tswap32(target_ldt_info->entry_number);
5786     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5787         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5788         unlock_user_struct(target_ldt_info, ptr, 1);
5789         return -TARGET_EINVAL;
5790     }
5791     lp = (uint32_t *)(gdt_table + idx);
5792     entry_1 = tswap32(lp[0]);
5793     entry_2 = tswap32(lp[1]);
5794 
5795     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5796     contents = (entry_2 >> 10) & 3;
5797     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5798     seg_32bit = (entry_2 >> 22) & 1;
5799     limit_in_pages = (entry_2 >> 23) & 1;
5800     useable = (entry_2 >> 20) & 1;
5801 #ifdef TARGET_ABI32
5802     lm = 0;
5803 #else
5804     lm = (entry_2 >> 21) & 1;
5805 #endif
5806     flags = (seg_32bit << 0) | (contents << 1) |
5807         (read_exec_only << 3) | (limit_in_pages << 4) |
5808         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5809     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5810     base_addr = (entry_1 >> 16) |
5811         (entry_2 & 0xff000000) |
5812         ((entry_2 & 0xff) << 16);
5813     target_ldt_info->base_addr = tswapal(base_addr);
5814     target_ldt_info->limit = tswap32(limit);
5815     target_ldt_info->flags = tswap32(flags);
5816     unlock_user_struct(target_ldt_info, ptr, 1);
5817     return 0;
5818 }
5819 #endif /* TARGET_I386 && TARGET_ABI32 */
5820 
5821 #ifndef TARGET_ABI32
5822 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5823 {
5824     abi_long ret = 0;
5825     abi_ulong val;
5826     int idx;
5827 
5828     switch(code) {
5829     case TARGET_ARCH_SET_GS:
5830     case TARGET_ARCH_SET_FS:
5831         if (code == TARGET_ARCH_SET_GS)
5832             idx = R_GS;
5833         else
5834             idx = R_FS;
5835         cpu_x86_load_seg(env, idx, 0);
5836         env->segs[idx].base = addr;
5837         break;
5838     case TARGET_ARCH_GET_GS:
5839     case TARGET_ARCH_GET_FS:
5840         if (code == TARGET_ARCH_GET_GS)
5841             idx = R_GS;
5842         else
5843             idx = R_FS;
5844         val = env->segs[idx].base;
5845         if (put_user(val, addr, abi_ulong))
5846             ret = -TARGET_EFAULT;
5847         break;
5848     default:
5849         ret = -TARGET_EINVAL;
5850         break;
5851     }
5852     return ret;
5853 }
5854 #endif
5855 
5856 #endif /* defined(TARGET_I386) */
5857 
5858 #define NEW_STACK_SIZE 0x40000
5859 
5860 
5861 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5862 typedef struct {
5863     CPUArchState *env;
5864     pthread_mutex_t mutex;
5865     pthread_cond_t cond;
5866     pthread_t thread;
5867     uint32_t tid;
5868     abi_ulong child_tidptr;
5869     abi_ulong parent_tidptr;
5870     sigset_t sigmask;
5871 } new_thread_info;
5872 
5873 static void *clone_func(void *arg)
5874 {
5875     new_thread_info *info = arg;
5876     CPUArchState *env;
5877     CPUState *cpu;
5878     TaskState *ts;
5879 
5880     rcu_register_thread();
5881     tcg_register_thread();
5882     env = info->env;
5883     cpu = env_cpu(env);
5884     thread_cpu = cpu;
5885     ts = (TaskState *)cpu->opaque;
5886     info->tid = sys_gettid();
5887     task_settid(ts);
5888     if (info->child_tidptr)
5889         put_user_u32(info->tid, info->child_tidptr);
5890     if (info->parent_tidptr)
5891         put_user_u32(info->tid, info->parent_tidptr);
5892     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5893     /* Enable signals.  */
5894     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5895     /* Signal to the parent that we're ready.  */
5896     pthread_mutex_lock(&info->mutex);
5897     pthread_cond_broadcast(&info->cond);
5898     pthread_mutex_unlock(&info->mutex);
5899     /* Wait until the parent has finished initializing the tls state.  */
5900     pthread_mutex_lock(&clone_lock);
5901     pthread_mutex_unlock(&clone_lock);
5902     cpu_loop(env);
5903     /* never exits */
5904     return NULL;
5905 }
5906 
5907 /* do_fork() Must return host values and target errnos (unlike most
5908    do_*() functions). */
5909 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5910                    abi_ulong parent_tidptr, target_ulong newtls,
5911                    abi_ulong child_tidptr)
5912 {
5913     CPUState *cpu = env_cpu(env);
5914     int ret;
5915     TaskState *ts;
5916     CPUState *new_cpu;
5917     CPUArchState *new_env;
5918     sigset_t sigmask;
5919 
5920     flags &= ~CLONE_IGNORED_FLAGS;
5921 
5922     /* Emulate vfork() with fork() */
5923     if (flags & CLONE_VFORK)
5924         flags &= ~(CLONE_VFORK | CLONE_VM);
5925 
5926     if (flags & CLONE_VM) {
5927         TaskState *parent_ts = (TaskState *)cpu->opaque;
5928         new_thread_info info;
5929         pthread_attr_t attr;
5930 
5931         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5932             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5933             return -TARGET_EINVAL;
5934         }
5935 
5936         ts = g_new0(TaskState, 1);
5937         init_task_state(ts);
5938 
5939         /* Grab a mutex so that thread setup appears atomic.  */
5940         pthread_mutex_lock(&clone_lock);
5941 
5942         /* we create a new CPU instance. */
5943         new_env = cpu_copy(env);
5944         /* Init regs that differ from the parent.  */
5945         cpu_clone_regs_child(new_env, newsp, flags);
5946         cpu_clone_regs_parent(env, flags);
5947         new_cpu = env_cpu(new_env);
5948         new_cpu->opaque = ts;
5949         ts->bprm = parent_ts->bprm;
5950         ts->info = parent_ts->info;
5951         ts->signal_mask = parent_ts->signal_mask;
5952 
5953         if (flags & CLONE_CHILD_CLEARTID) {
5954             ts->child_tidptr = child_tidptr;
5955         }
5956 
5957         if (flags & CLONE_SETTLS) {
5958             cpu_set_tls (new_env, newtls);
5959         }
5960 
5961         memset(&info, 0, sizeof(info));
5962         pthread_mutex_init(&info.mutex, NULL);
5963         pthread_mutex_lock(&info.mutex);
5964         pthread_cond_init(&info.cond, NULL);
5965         info.env = new_env;
5966         if (flags & CLONE_CHILD_SETTID) {
5967             info.child_tidptr = child_tidptr;
5968         }
5969         if (flags & CLONE_PARENT_SETTID) {
5970             info.parent_tidptr = parent_tidptr;
5971         }
5972 
5973         ret = pthread_attr_init(&attr);
5974         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5975         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5976         /* It is not safe to deliver signals until the child has finished
5977            initializing, so temporarily block all signals.  */
5978         sigfillset(&sigmask);
5979         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5980         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5981 
5982         /* If this is our first additional thread, we need to ensure we
5983          * generate code for parallel execution and flush old translations.
5984          */
5985         if (!parallel_cpus) {
5986             parallel_cpus = true;
5987             tb_flush(cpu);
5988         }
5989 
5990         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5991         /* TODO: Free new CPU state if thread creation failed.  */
5992 
5993         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5994         pthread_attr_destroy(&attr);
5995         if (ret == 0) {
5996             /* Wait for the child to initialize.  */
5997             pthread_cond_wait(&info.cond, &info.mutex);
5998             ret = info.tid;
5999         } else {
6000             ret = -1;
6001         }
6002         pthread_mutex_unlock(&info.mutex);
6003         pthread_cond_destroy(&info.cond);
6004         pthread_mutex_destroy(&info.mutex);
6005         pthread_mutex_unlock(&clone_lock);
6006     } else {
6007         /* if no CLONE_VM, we consider it is a fork */
6008         if (flags & CLONE_INVALID_FORK_FLAGS) {
6009             return -TARGET_EINVAL;
6010         }
6011 
6012         /* We can't support custom termination signals */
6013         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6014             return -TARGET_EINVAL;
6015         }
6016 
6017         if (block_signals()) {
6018             return -TARGET_ERESTARTSYS;
6019         }
6020 
6021         fork_start();
6022         ret = fork();
6023         if (ret == 0) {
6024             /* Child Process.  */
6025             cpu_clone_regs_child(env, newsp, flags);
6026             fork_end(1);
6027             /* There is a race condition here.  The parent process could
6028                theoretically read the TID in the child process before the child
6029                tid is set.  This would require using either ptrace
6030                (not implemented) or having *_tidptr to point at a shared memory
6031                mapping.  We can't repeat the spinlock hack used above because
6032                the child process gets its own copy of the lock.  */
6033             if (flags & CLONE_CHILD_SETTID)
6034                 put_user_u32(sys_gettid(), child_tidptr);
6035             if (flags & CLONE_PARENT_SETTID)
6036                 put_user_u32(sys_gettid(), parent_tidptr);
6037             ts = (TaskState *)cpu->opaque;
6038             if (flags & CLONE_SETTLS)
6039                 cpu_set_tls (env, newtls);
6040             if (flags & CLONE_CHILD_CLEARTID)
6041                 ts->child_tidptr = child_tidptr;
6042         } else {
6043             cpu_clone_regs_parent(env, flags);
6044             fork_end(0);
6045         }
6046     }
6047     return ret;
6048 }
6049 
6050 /* warning : doesn't handle linux specific flags... */
6051 static int target_to_host_fcntl_cmd(int cmd)
6052 {
6053     int ret;
6054 
6055     switch(cmd) {
6056     case TARGET_F_DUPFD:
6057     case TARGET_F_GETFD:
6058     case TARGET_F_SETFD:
6059     case TARGET_F_GETFL:
6060     case TARGET_F_SETFL:
6061         ret = cmd;
6062         break;
6063     case TARGET_F_GETLK:
6064         ret = F_GETLK64;
6065         break;
6066     case TARGET_F_SETLK:
6067         ret = F_SETLK64;
6068         break;
6069     case TARGET_F_SETLKW:
6070         ret = F_SETLKW64;
6071         break;
6072     case TARGET_F_GETOWN:
6073         ret = F_GETOWN;
6074         break;
6075     case TARGET_F_SETOWN:
6076         ret = F_SETOWN;
6077         break;
6078     case TARGET_F_GETSIG:
6079         ret = F_GETSIG;
6080         break;
6081     case TARGET_F_SETSIG:
6082         ret = F_SETSIG;
6083         break;
6084 #if TARGET_ABI_BITS == 32
6085     case TARGET_F_GETLK64:
6086         ret = F_GETLK64;
6087         break;
6088     case TARGET_F_SETLK64:
6089         ret = F_SETLK64;
6090         break;
6091     case TARGET_F_SETLKW64:
6092         ret = F_SETLKW64;
6093         break;
6094 #endif
6095     case TARGET_F_SETLEASE:
6096         ret = F_SETLEASE;
6097         break;
6098     case TARGET_F_GETLEASE:
6099         ret = F_GETLEASE;
6100         break;
6101 #ifdef F_DUPFD_CLOEXEC
6102     case TARGET_F_DUPFD_CLOEXEC:
6103         ret = F_DUPFD_CLOEXEC;
6104         break;
6105 #endif
6106     case TARGET_F_NOTIFY:
6107         ret = F_NOTIFY;
6108         break;
6109 #ifdef F_GETOWN_EX
6110     case TARGET_F_GETOWN_EX:
6111         ret = F_GETOWN_EX;
6112         break;
6113 #endif
6114 #ifdef F_SETOWN_EX
6115     case TARGET_F_SETOWN_EX:
6116         ret = F_SETOWN_EX;
6117         break;
6118 #endif
6119 #ifdef F_SETPIPE_SZ
6120     case TARGET_F_SETPIPE_SZ:
6121         ret = F_SETPIPE_SZ;
6122         break;
6123     case TARGET_F_GETPIPE_SZ:
6124         ret = F_GETPIPE_SZ;
6125         break;
6126 #endif
6127     default:
6128         ret = -TARGET_EINVAL;
6129         break;
6130     }
6131 
6132 #if defined(__powerpc64__)
6133     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6134      * is not supported by kernel. The glibc fcntl call actually adjusts
6135      * them to 5, 6 and 7 before making the syscall(). Since we make the
6136      * syscall directly, adjust to what is supported by the kernel.
6137      */
6138     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6139         ret -= F_GETLK64 - 5;
6140     }
6141 #endif
6142 
6143     return ret;
6144 }
6145 
6146 #define FLOCK_TRANSTBL \
6147     switch (type) { \
6148     TRANSTBL_CONVERT(F_RDLCK); \
6149     TRANSTBL_CONVERT(F_WRLCK); \
6150     TRANSTBL_CONVERT(F_UNLCK); \
6151     TRANSTBL_CONVERT(F_EXLCK); \
6152     TRANSTBL_CONVERT(F_SHLCK); \
6153     }
6154 
6155 static int target_to_host_flock(int type)
6156 {
6157 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6158     FLOCK_TRANSTBL
6159 #undef  TRANSTBL_CONVERT
6160     return -TARGET_EINVAL;
6161 }
6162 
6163 static int host_to_target_flock(int type)
6164 {
6165 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6166     FLOCK_TRANSTBL
6167 #undef  TRANSTBL_CONVERT
6168     /* if we don't know how to convert the value coming
6169      * from the host we copy to the target field as-is
6170      */
6171     return type;
6172 }
6173 
6174 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6175                                             abi_ulong target_flock_addr)
6176 {
6177     struct target_flock *target_fl;
6178     int l_type;
6179 
6180     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6181         return -TARGET_EFAULT;
6182     }
6183 
6184     __get_user(l_type, &target_fl->l_type);
6185     l_type = target_to_host_flock(l_type);
6186     if (l_type < 0) {
6187         return l_type;
6188     }
6189     fl->l_type = l_type;
6190     __get_user(fl->l_whence, &target_fl->l_whence);
6191     __get_user(fl->l_start, &target_fl->l_start);
6192     __get_user(fl->l_len, &target_fl->l_len);
6193     __get_user(fl->l_pid, &target_fl->l_pid);
6194     unlock_user_struct(target_fl, target_flock_addr, 0);
6195     return 0;
6196 }
6197 
6198 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6199                                           const struct flock64 *fl)
6200 {
6201     struct target_flock *target_fl;
6202     short l_type;
6203 
6204     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6205         return -TARGET_EFAULT;
6206     }
6207 
6208     l_type = host_to_target_flock(fl->l_type);
6209     __put_user(l_type, &target_fl->l_type);
6210     __put_user(fl->l_whence, &target_fl->l_whence);
6211     __put_user(fl->l_start, &target_fl->l_start);
6212     __put_user(fl->l_len, &target_fl->l_len);
6213     __put_user(fl->l_pid, &target_fl->l_pid);
6214     unlock_user_struct(target_fl, target_flock_addr, 1);
6215     return 0;
6216 }
6217 
6218 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6219 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6220 
6221 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6222 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6223                                                    abi_ulong target_flock_addr)
6224 {
6225     struct target_oabi_flock64 *target_fl;
6226     int l_type;
6227 
6228     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6229         return -TARGET_EFAULT;
6230     }
6231 
6232     __get_user(l_type, &target_fl->l_type);
6233     l_type = target_to_host_flock(l_type);
6234     if (l_type < 0) {
6235         return l_type;
6236     }
6237     fl->l_type = l_type;
6238     __get_user(fl->l_whence, &target_fl->l_whence);
6239     __get_user(fl->l_start, &target_fl->l_start);
6240     __get_user(fl->l_len, &target_fl->l_len);
6241     __get_user(fl->l_pid, &target_fl->l_pid);
6242     unlock_user_struct(target_fl, target_flock_addr, 0);
6243     return 0;
6244 }
6245 
6246 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6247                                                  const struct flock64 *fl)
6248 {
6249     struct target_oabi_flock64 *target_fl;
6250     short l_type;
6251 
6252     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6253         return -TARGET_EFAULT;
6254     }
6255 
6256     l_type = host_to_target_flock(fl->l_type);
6257     __put_user(l_type, &target_fl->l_type);
6258     __put_user(fl->l_whence, &target_fl->l_whence);
6259     __put_user(fl->l_start, &target_fl->l_start);
6260     __put_user(fl->l_len, &target_fl->l_len);
6261     __put_user(fl->l_pid, &target_fl->l_pid);
6262     unlock_user_struct(target_fl, target_flock_addr, 1);
6263     return 0;
6264 }
6265 #endif
6266 
6267 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6268                                               abi_ulong target_flock_addr)
6269 {
6270     struct target_flock64 *target_fl;
6271     int l_type;
6272 
6273     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6274         return -TARGET_EFAULT;
6275     }
6276 
6277     __get_user(l_type, &target_fl->l_type);
6278     l_type = target_to_host_flock(l_type);
6279     if (l_type < 0) {
6280         return l_type;
6281     }
6282     fl->l_type = l_type;
6283     __get_user(fl->l_whence, &target_fl->l_whence);
6284     __get_user(fl->l_start, &target_fl->l_start);
6285     __get_user(fl->l_len, &target_fl->l_len);
6286     __get_user(fl->l_pid, &target_fl->l_pid);
6287     unlock_user_struct(target_fl, target_flock_addr, 0);
6288     return 0;
6289 }
6290 
6291 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6292                                             const struct flock64 *fl)
6293 {
6294     struct target_flock64 *target_fl;
6295     short l_type;
6296 
6297     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6298         return -TARGET_EFAULT;
6299     }
6300 
6301     l_type = host_to_target_flock(fl->l_type);
6302     __put_user(l_type, &target_fl->l_type);
6303     __put_user(fl->l_whence, &target_fl->l_whence);
6304     __put_user(fl->l_start, &target_fl->l_start);
6305     __put_user(fl->l_len, &target_fl->l_len);
6306     __put_user(fl->l_pid, &target_fl->l_pid);
6307     unlock_user_struct(target_fl, target_flock_addr, 1);
6308     return 0;
6309 }
6310 
6311 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6312 {
6313     struct flock64 fl64;
6314 #ifdef F_GETOWN_EX
6315     struct f_owner_ex fox;
6316     struct target_f_owner_ex *target_fox;
6317 #endif
6318     abi_long ret;
6319     int host_cmd = target_to_host_fcntl_cmd(cmd);
6320 
6321     if (host_cmd == -TARGET_EINVAL)
6322 	    return host_cmd;
6323 
6324     switch(cmd) {
6325     case TARGET_F_GETLK:
6326         ret = copy_from_user_flock(&fl64, arg);
6327         if (ret) {
6328             return ret;
6329         }
6330         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6331         if (ret == 0) {
6332             ret = copy_to_user_flock(arg, &fl64);
6333         }
6334         break;
6335 
6336     case TARGET_F_SETLK:
6337     case TARGET_F_SETLKW:
6338         ret = copy_from_user_flock(&fl64, arg);
6339         if (ret) {
6340             return ret;
6341         }
6342         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6343         break;
6344 
6345     case TARGET_F_GETLK64:
6346         ret = copy_from_user_flock64(&fl64, arg);
6347         if (ret) {
6348             return ret;
6349         }
6350         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6351         if (ret == 0) {
6352             ret = copy_to_user_flock64(arg, &fl64);
6353         }
6354         break;
6355     case TARGET_F_SETLK64:
6356     case TARGET_F_SETLKW64:
6357         ret = copy_from_user_flock64(&fl64, arg);
6358         if (ret) {
6359             return ret;
6360         }
6361         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6362         break;
6363 
6364     case TARGET_F_GETFL:
6365         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6366         if (ret >= 0) {
6367             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6368         }
6369         break;
6370 
6371     case TARGET_F_SETFL:
6372         ret = get_errno(safe_fcntl(fd, host_cmd,
6373                                    target_to_host_bitmask(arg,
6374                                                           fcntl_flags_tbl)));
6375         break;
6376 
6377 #ifdef F_GETOWN_EX
6378     case TARGET_F_GETOWN_EX:
6379         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6380         if (ret >= 0) {
6381             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6382                 return -TARGET_EFAULT;
6383             target_fox->type = tswap32(fox.type);
6384             target_fox->pid = tswap32(fox.pid);
6385             unlock_user_struct(target_fox, arg, 1);
6386         }
6387         break;
6388 #endif
6389 
6390 #ifdef F_SETOWN_EX
6391     case TARGET_F_SETOWN_EX:
6392         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6393             return -TARGET_EFAULT;
6394         fox.type = tswap32(target_fox->type);
6395         fox.pid = tswap32(target_fox->pid);
6396         unlock_user_struct(target_fox, arg, 0);
6397         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6398         break;
6399 #endif
6400 
6401     case TARGET_F_SETOWN:
6402     case TARGET_F_GETOWN:
6403     case TARGET_F_SETSIG:
6404     case TARGET_F_GETSIG:
6405     case TARGET_F_SETLEASE:
6406     case TARGET_F_GETLEASE:
6407     case TARGET_F_SETPIPE_SZ:
6408     case TARGET_F_GETPIPE_SZ:
6409         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6410         break;
6411 
6412     default:
6413         ret = get_errno(safe_fcntl(fd, cmd, arg));
6414         break;
6415     }
6416     return ret;
6417 }
6418 
6419 #ifdef USE_UID16
6420 
6421 static inline int high2lowuid(int uid)
6422 {
6423     if (uid > 65535)
6424         return 65534;
6425     else
6426         return uid;
6427 }
6428 
6429 static inline int high2lowgid(int gid)
6430 {
6431     if (gid > 65535)
6432         return 65534;
6433     else
6434         return gid;
6435 }
6436 
6437 static inline int low2highuid(int uid)
6438 {
6439     if ((int16_t)uid == -1)
6440         return -1;
6441     else
6442         return uid;
6443 }
6444 
6445 static inline int low2highgid(int gid)
6446 {
6447     if ((int16_t)gid == -1)
6448         return -1;
6449     else
6450         return gid;
6451 }
6452 static inline int tswapid(int id)
6453 {
6454     return tswap16(id);
6455 }
6456 
6457 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6458 
6459 #else /* !USE_UID16 */
6460 static inline int high2lowuid(int uid)
6461 {
6462     return uid;
6463 }
6464 static inline int high2lowgid(int gid)
6465 {
6466     return gid;
6467 }
6468 static inline int low2highuid(int uid)
6469 {
6470     return uid;
6471 }
6472 static inline int low2highgid(int gid)
6473 {
6474     return gid;
6475 }
6476 static inline int tswapid(int id)
6477 {
6478     return tswap32(id);
6479 }
6480 
6481 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6482 
6483 #endif /* USE_UID16 */
6484 
6485 /* We must do direct syscalls for setting UID/GID, because we want to
6486  * implement the Linux system call semantics of "change only for this thread",
6487  * not the libc/POSIX semantics of "change for all threads in process".
6488  * (See http://ewontfix.com/17/ for more details.)
6489  * We use the 32-bit version of the syscalls if present; if it is not
6490  * then either the host architecture supports 32-bit UIDs natively with
6491  * the standard syscall, or the 16-bit UID is the best we can do.
6492  */
6493 #ifdef __NR_setuid32
6494 #define __NR_sys_setuid __NR_setuid32
6495 #else
6496 #define __NR_sys_setuid __NR_setuid
6497 #endif
6498 #ifdef __NR_setgid32
6499 #define __NR_sys_setgid __NR_setgid32
6500 #else
6501 #define __NR_sys_setgid __NR_setgid
6502 #endif
6503 #ifdef __NR_setresuid32
6504 #define __NR_sys_setresuid __NR_setresuid32
6505 #else
6506 #define __NR_sys_setresuid __NR_setresuid
6507 #endif
6508 #ifdef __NR_setresgid32
6509 #define __NR_sys_setresgid __NR_setresgid32
6510 #else
6511 #define __NR_sys_setresgid __NR_setresgid
6512 #endif
6513 
6514 _syscall1(int, sys_setuid, uid_t, uid)
6515 _syscall1(int, sys_setgid, gid_t, gid)
6516 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6517 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6518 
6519 void syscall_init(void)
6520 {
6521     IOCTLEntry *ie;
6522     const argtype *arg_type;
6523     int size;
6524     int i;
6525 
6526     thunk_init(STRUCT_MAX);
6527 
6528 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6529 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6530 #include "syscall_types.h"
6531 #undef STRUCT
6532 #undef STRUCT_SPECIAL
6533 
6534     /* Build target_to_host_errno_table[] table from
6535      * host_to_target_errno_table[]. */
6536     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6537         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6538     }
6539 
6540     /* we patch the ioctl size if necessary. We rely on the fact that
6541        no ioctl has all the bits at '1' in the size field */
6542     ie = ioctl_entries;
6543     while (ie->target_cmd != 0) {
6544         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6545             TARGET_IOC_SIZEMASK) {
6546             arg_type = ie->arg_type;
6547             if (arg_type[0] != TYPE_PTR) {
6548                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6549                         ie->target_cmd);
6550                 exit(1);
6551             }
6552             arg_type++;
6553             size = thunk_type_size(arg_type, 0);
6554             ie->target_cmd = (ie->target_cmd &
6555                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6556                 (size << TARGET_IOC_SIZESHIFT);
6557         }
6558 
6559         /* automatic consistency check if same arch */
6560 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6561     (defined(__x86_64__) && defined(TARGET_X86_64))
6562         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6563             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6564                     ie->name, ie->target_cmd, ie->host_cmd);
6565         }
6566 #endif
6567         ie++;
6568     }
6569 }
6570 
6571 #if TARGET_ABI_BITS == 32
6572 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6573 {
6574 #ifdef TARGET_WORDS_BIGENDIAN
6575     return ((uint64_t)word0 << 32) | word1;
6576 #else
6577     return ((uint64_t)word1 << 32) | word0;
6578 #endif
6579 }
6580 #else /* TARGET_ABI_BITS == 32 */
6581 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6582 {
6583     return word0;
6584 }
6585 #endif /* TARGET_ABI_BITS != 32 */
6586 
6587 #ifdef TARGET_NR_truncate64
6588 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6589                                          abi_long arg2,
6590                                          abi_long arg3,
6591                                          abi_long arg4)
6592 {
6593     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6594         arg2 = arg3;
6595         arg3 = arg4;
6596     }
6597     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6598 }
6599 #endif
6600 
6601 #ifdef TARGET_NR_ftruncate64
6602 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6603                                           abi_long arg2,
6604                                           abi_long arg3,
6605                                           abi_long arg4)
6606 {
6607     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6608         arg2 = arg3;
6609         arg3 = arg4;
6610     }
6611     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6612 }
6613 #endif
6614 
6615 #if defined(TARGET_NR_timer_settime) || \
6616     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6617 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6618                                                  abi_ulong target_addr)
6619 {
6620     struct target_itimerspec *target_itspec;
6621 
6622     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6623         return -TARGET_EFAULT;
6624     }
6625 
6626     host_itspec->it_interval.tv_sec =
6627                             tswapal(target_itspec->it_interval.tv_sec);
6628     host_itspec->it_interval.tv_nsec =
6629                             tswapal(target_itspec->it_interval.tv_nsec);
6630     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6631     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6632 
6633     unlock_user_struct(target_itspec, target_addr, 1);
6634     return 0;
6635 }
6636 #endif
6637 
6638 #if ((defined(TARGET_NR_timerfd_gettime) || \
6639       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6640     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6641 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6642                                                struct itimerspec *host_its)
6643 {
6644     struct target_itimerspec *target_itspec;
6645 
6646     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6647         return -TARGET_EFAULT;
6648     }
6649 
6650     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6651     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6652 
6653     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6654     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6655 
6656     unlock_user_struct(target_itspec, target_addr, 0);
6657     return 0;
6658 }
6659 #endif
6660 
6661 #if defined(TARGET_NR_adjtimex) || \
6662     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6663 static inline abi_long target_to_host_timex(struct timex *host_tx,
6664                                             abi_long target_addr)
6665 {
6666     struct target_timex *target_tx;
6667 
6668     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6669         return -TARGET_EFAULT;
6670     }
6671 
6672     __get_user(host_tx->modes, &target_tx->modes);
6673     __get_user(host_tx->offset, &target_tx->offset);
6674     __get_user(host_tx->freq, &target_tx->freq);
6675     __get_user(host_tx->maxerror, &target_tx->maxerror);
6676     __get_user(host_tx->esterror, &target_tx->esterror);
6677     __get_user(host_tx->status, &target_tx->status);
6678     __get_user(host_tx->constant, &target_tx->constant);
6679     __get_user(host_tx->precision, &target_tx->precision);
6680     __get_user(host_tx->tolerance, &target_tx->tolerance);
6681     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6682     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6683     __get_user(host_tx->tick, &target_tx->tick);
6684     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6685     __get_user(host_tx->jitter, &target_tx->jitter);
6686     __get_user(host_tx->shift, &target_tx->shift);
6687     __get_user(host_tx->stabil, &target_tx->stabil);
6688     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6689     __get_user(host_tx->calcnt, &target_tx->calcnt);
6690     __get_user(host_tx->errcnt, &target_tx->errcnt);
6691     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6692     __get_user(host_tx->tai, &target_tx->tai);
6693 
6694     unlock_user_struct(target_tx, target_addr, 0);
6695     return 0;
6696 }
6697 
6698 static inline abi_long host_to_target_timex(abi_long target_addr,
6699                                             struct timex *host_tx)
6700 {
6701     struct target_timex *target_tx;
6702 
6703     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6704         return -TARGET_EFAULT;
6705     }
6706 
6707     __put_user(host_tx->modes, &target_tx->modes);
6708     __put_user(host_tx->offset, &target_tx->offset);
6709     __put_user(host_tx->freq, &target_tx->freq);
6710     __put_user(host_tx->maxerror, &target_tx->maxerror);
6711     __put_user(host_tx->esterror, &target_tx->esterror);
6712     __put_user(host_tx->status, &target_tx->status);
6713     __put_user(host_tx->constant, &target_tx->constant);
6714     __put_user(host_tx->precision, &target_tx->precision);
6715     __put_user(host_tx->tolerance, &target_tx->tolerance);
6716     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6717     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6718     __put_user(host_tx->tick, &target_tx->tick);
6719     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6720     __put_user(host_tx->jitter, &target_tx->jitter);
6721     __put_user(host_tx->shift, &target_tx->shift);
6722     __put_user(host_tx->stabil, &target_tx->stabil);
6723     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6724     __put_user(host_tx->calcnt, &target_tx->calcnt);
6725     __put_user(host_tx->errcnt, &target_tx->errcnt);
6726     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6727     __put_user(host_tx->tai, &target_tx->tai);
6728 
6729     unlock_user_struct(target_tx, target_addr, 1);
6730     return 0;
6731 }
6732 #endif
6733 
6734 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6735                                                abi_ulong target_addr)
6736 {
6737     struct target_sigevent *target_sevp;
6738 
6739     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6740         return -TARGET_EFAULT;
6741     }
6742 
6743     /* This union is awkward on 64 bit systems because it has a 32 bit
6744      * integer and a pointer in it; we follow the conversion approach
6745      * used for handling sigval types in signal.c so the guest should get
6746      * the correct value back even if we did a 64 bit byteswap and it's
6747      * using the 32 bit integer.
6748      */
6749     host_sevp->sigev_value.sival_ptr =
6750         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6751     host_sevp->sigev_signo =
6752         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6753     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6754     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6755 
6756     unlock_user_struct(target_sevp, target_addr, 1);
6757     return 0;
6758 }
6759 
6760 #if defined(TARGET_NR_mlockall)
6761 static inline int target_to_host_mlockall_arg(int arg)
6762 {
6763     int result = 0;
6764 
6765     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6766         result |= MCL_CURRENT;
6767     }
6768     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6769         result |= MCL_FUTURE;
6770     }
6771     return result;
6772 }
6773 #endif
6774 
6775 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6776      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6777      defined(TARGET_NR_newfstatat))
6778 static inline abi_long host_to_target_stat64(void *cpu_env,
6779                                              abi_ulong target_addr,
6780                                              struct stat *host_st)
6781 {
6782 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6783     if (((CPUARMState *)cpu_env)->eabi) {
6784         struct target_eabi_stat64 *target_st;
6785 
6786         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6787             return -TARGET_EFAULT;
6788         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6789         __put_user(host_st->st_dev, &target_st->st_dev);
6790         __put_user(host_st->st_ino, &target_st->st_ino);
6791 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6792         __put_user(host_st->st_ino, &target_st->__st_ino);
6793 #endif
6794         __put_user(host_st->st_mode, &target_st->st_mode);
6795         __put_user(host_st->st_nlink, &target_st->st_nlink);
6796         __put_user(host_st->st_uid, &target_st->st_uid);
6797         __put_user(host_st->st_gid, &target_st->st_gid);
6798         __put_user(host_st->st_rdev, &target_st->st_rdev);
6799         __put_user(host_st->st_size, &target_st->st_size);
6800         __put_user(host_st->st_blksize, &target_st->st_blksize);
6801         __put_user(host_st->st_blocks, &target_st->st_blocks);
6802         __put_user(host_st->st_atime, &target_st->target_st_atime);
6803         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6804         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6805 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6806         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6807         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6808         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6809 #endif
6810         unlock_user_struct(target_st, target_addr, 1);
6811     } else
6812 #endif
6813     {
6814 #if defined(TARGET_HAS_STRUCT_STAT64)
6815         struct target_stat64 *target_st;
6816 #else
6817         struct target_stat *target_st;
6818 #endif
6819 
6820         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6821             return -TARGET_EFAULT;
6822         memset(target_st, 0, sizeof(*target_st));
6823         __put_user(host_st->st_dev, &target_st->st_dev);
6824         __put_user(host_st->st_ino, &target_st->st_ino);
6825 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6826         __put_user(host_st->st_ino, &target_st->__st_ino);
6827 #endif
6828         __put_user(host_st->st_mode, &target_st->st_mode);
6829         __put_user(host_st->st_nlink, &target_st->st_nlink);
6830         __put_user(host_st->st_uid, &target_st->st_uid);
6831         __put_user(host_st->st_gid, &target_st->st_gid);
6832         __put_user(host_st->st_rdev, &target_st->st_rdev);
6833         /* XXX: better use of kernel struct */
6834         __put_user(host_st->st_size, &target_st->st_size);
6835         __put_user(host_st->st_blksize, &target_st->st_blksize);
6836         __put_user(host_st->st_blocks, &target_st->st_blocks);
6837         __put_user(host_st->st_atime, &target_st->target_st_atime);
6838         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6839         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6840 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6841         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6842         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6843         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6844 #endif
6845         unlock_user_struct(target_st, target_addr, 1);
6846     }
6847 
6848     return 0;
6849 }
6850 #endif
6851 
6852 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6853 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6854                                             abi_ulong target_addr)
6855 {
6856     struct target_statx *target_stx;
6857 
6858     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6859         return -TARGET_EFAULT;
6860     }
6861     memset(target_stx, 0, sizeof(*target_stx));
6862 
6863     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6864     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6865     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6866     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6867     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6868     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6869     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6870     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6871     __put_user(host_stx->stx_size, &target_stx->stx_size);
6872     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6873     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6874     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6875     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6876     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6877     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6878     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6879     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6880     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6881     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6882     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6883     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6884     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6885     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6886 
6887     unlock_user_struct(target_stx, target_addr, 1);
6888 
6889     return 0;
6890 }
6891 #endif
6892 
6893 
6894 /* ??? Using host futex calls even when target atomic operations
6895    are not really atomic probably breaks things.  However implementing
6896    futexes locally would make futexes shared between multiple processes
6897    tricky.  However they're probably useless because guest atomic
6898    operations won't work either.  */
6899 #if defined(TARGET_NR_futex)
6900 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6901                     target_ulong uaddr2, int val3)
6902 {
6903     struct timespec ts, *pts;
6904     int base_op;
6905 
6906     /* ??? We assume FUTEX_* constants are the same on both host
6907        and target.  */
6908 #ifdef FUTEX_CMD_MASK
6909     base_op = op & FUTEX_CMD_MASK;
6910 #else
6911     base_op = op;
6912 #endif
6913     switch (base_op) {
6914     case FUTEX_WAIT:
6915     case FUTEX_WAIT_BITSET:
6916         if (timeout) {
6917             pts = &ts;
6918             target_to_host_timespec(pts, timeout);
6919         } else {
6920             pts = NULL;
6921         }
6922         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6923                          pts, NULL, val3));
6924     case FUTEX_WAKE:
6925         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6926     case FUTEX_FD:
6927         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6928     case FUTEX_REQUEUE:
6929     case FUTEX_CMP_REQUEUE:
6930     case FUTEX_WAKE_OP:
6931         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6932            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6933            But the prototype takes a `struct timespec *'; insert casts
6934            to satisfy the compiler.  We do not need to tswap TIMEOUT
6935            since it's not compared to guest memory.  */
6936         pts = (struct timespec *)(uintptr_t) timeout;
6937         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6938                                     g2h(uaddr2),
6939                                     (base_op == FUTEX_CMP_REQUEUE
6940                                      ? tswap32(val3)
6941                                      : val3)));
6942     default:
6943         return -TARGET_ENOSYS;
6944     }
6945 }
6946 #endif
6947 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6948 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6949                                      abi_long handle, abi_long mount_id,
6950                                      abi_long flags)
6951 {
6952     struct file_handle *target_fh;
6953     struct file_handle *fh;
6954     int mid = 0;
6955     abi_long ret;
6956     char *name;
6957     unsigned int size, total_size;
6958 
6959     if (get_user_s32(size, handle)) {
6960         return -TARGET_EFAULT;
6961     }
6962 
6963     name = lock_user_string(pathname);
6964     if (!name) {
6965         return -TARGET_EFAULT;
6966     }
6967 
6968     total_size = sizeof(struct file_handle) + size;
6969     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6970     if (!target_fh) {
6971         unlock_user(name, pathname, 0);
6972         return -TARGET_EFAULT;
6973     }
6974 
6975     fh = g_malloc0(total_size);
6976     fh->handle_bytes = size;
6977 
6978     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6979     unlock_user(name, pathname, 0);
6980 
6981     /* man name_to_handle_at(2):
6982      * Other than the use of the handle_bytes field, the caller should treat
6983      * the file_handle structure as an opaque data type
6984      */
6985 
6986     memcpy(target_fh, fh, total_size);
6987     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6988     target_fh->handle_type = tswap32(fh->handle_type);
6989     g_free(fh);
6990     unlock_user(target_fh, handle, total_size);
6991 
6992     if (put_user_s32(mid, mount_id)) {
6993         return -TARGET_EFAULT;
6994     }
6995 
6996     return ret;
6997 
6998 }
6999 #endif
7000 
7001 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7002 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7003                                      abi_long flags)
7004 {
7005     struct file_handle *target_fh;
7006     struct file_handle *fh;
7007     unsigned int size, total_size;
7008     abi_long ret;
7009 
7010     if (get_user_s32(size, handle)) {
7011         return -TARGET_EFAULT;
7012     }
7013 
7014     total_size = sizeof(struct file_handle) + size;
7015     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7016     if (!target_fh) {
7017         return -TARGET_EFAULT;
7018     }
7019 
7020     fh = g_memdup(target_fh, total_size);
7021     fh->handle_bytes = size;
7022     fh->handle_type = tswap32(target_fh->handle_type);
7023 
7024     ret = get_errno(open_by_handle_at(mount_fd, fh,
7025                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7026 
7027     g_free(fh);
7028 
7029     unlock_user(target_fh, handle, total_size);
7030 
7031     return ret;
7032 }
7033 #endif
7034 
7035 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7036 
7037 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7038 {
7039     int host_flags;
7040     target_sigset_t *target_mask;
7041     sigset_t host_mask;
7042     abi_long ret;
7043 
7044     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7045         return -TARGET_EINVAL;
7046     }
7047     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7048         return -TARGET_EFAULT;
7049     }
7050 
7051     target_to_host_sigset(&host_mask, target_mask);
7052 
7053     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7054 
7055     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7056     if (ret >= 0) {
7057         fd_trans_register(ret, &target_signalfd_trans);
7058     }
7059 
7060     unlock_user_struct(target_mask, mask, 0);
7061 
7062     return ret;
7063 }
7064 #endif
7065 
7066 /* Map host to target signal numbers for the wait family of syscalls.
7067    Assume all other status bits are the same.  */
7068 int host_to_target_waitstatus(int status)
7069 {
7070     if (WIFSIGNALED(status)) {
7071         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7072     }
7073     if (WIFSTOPPED(status)) {
7074         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7075                | (status & 0xff);
7076     }
7077     return status;
7078 }
7079 
7080 static int open_self_cmdline(void *cpu_env, int fd)
7081 {
7082     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7083     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7084     int i;
7085 
7086     for (i = 0; i < bprm->argc; i++) {
7087         size_t len = strlen(bprm->argv[i]) + 1;
7088 
7089         if (write(fd, bprm->argv[i], len) != len) {
7090             return -1;
7091         }
7092     }
7093 
7094     return 0;
7095 }
7096 
7097 static int open_self_maps(void *cpu_env, int fd)
7098 {
7099     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7100     TaskState *ts = cpu->opaque;
7101     FILE *fp;
7102     char *line = NULL;
7103     size_t len = 0;
7104     ssize_t read;
7105 
7106     fp = fopen("/proc/self/maps", "r");
7107     if (fp == NULL) {
7108         return -1;
7109     }
7110 
7111     while ((read = getline(&line, &len, fp)) != -1) {
7112         int fields, dev_maj, dev_min, inode;
7113         uint64_t min, max, offset;
7114         char flag_r, flag_w, flag_x, flag_p;
7115         char path[512] = "";
7116         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7117                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7118                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7119 
7120         if ((fields < 10) || (fields > 11)) {
7121             continue;
7122         }
7123         if (h2g_valid(min)) {
7124             int flags = page_get_flags(h2g(min));
7125             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7126             if (page_check_range(h2g(min), max - min, flags) == -1) {
7127                 continue;
7128             }
7129             if (h2g(min) == ts->info->stack_limit) {
7130                 pstrcpy(path, sizeof(path), "      [stack]");
7131             }
7132             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7133                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7134                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7135                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7136                     path[0] ? "         " : "", path);
7137         }
7138     }
7139 
7140     free(line);
7141     fclose(fp);
7142 
7143     return 0;
7144 }
7145 
7146 static int open_self_stat(void *cpu_env, int fd)
7147 {
7148     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7149     TaskState *ts = cpu->opaque;
7150     abi_ulong start_stack = ts->info->start_stack;
7151     int i;
7152 
7153     for (i = 0; i < 44; i++) {
7154       char buf[128];
7155       int len;
7156       uint64_t val = 0;
7157 
7158       if (i == 0) {
7159         /* pid */
7160         val = getpid();
7161         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7162       } else if (i == 1) {
7163         /* app name */
7164         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7165       } else if (i == 27) {
7166         /* stack bottom */
7167         val = start_stack;
7168         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7169       } else {
7170         /* for the rest, there is MasterCard */
7171         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7172       }
7173 
7174       len = strlen(buf);
7175       if (write(fd, buf, len) != len) {
7176           return -1;
7177       }
7178     }
7179 
7180     return 0;
7181 }
7182 
7183 static int open_self_auxv(void *cpu_env, int fd)
7184 {
7185     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7186     TaskState *ts = cpu->opaque;
7187     abi_ulong auxv = ts->info->saved_auxv;
7188     abi_ulong len = ts->info->auxv_len;
7189     char *ptr;
7190 
7191     /*
7192      * Auxiliary vector is stored in target process stack.
7193      * read in whole auxv vector and copy it to file
7194      */
7195     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7196     if (ptr != NULL) {
7197         while (len > 0) {
7198             ssize_t r;
7199             r = write(fd, ptr, len);
7200             if (r <= 0) {
7201                 break;
7202             }
7203             len -= r;
7204             ptr += r;
7205         }
7206         lseek(fd, 0, SEEK_SET);
7207         unlock_user(ptr, auxv, len);
7208     }
7209 
7210     return 0;
7211 }
7212 
7213 static int is_proc_myself(const char *filename, const char *entry)
7214 {
7215     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7216         filename += strlen("/proc/");
7217         if (!strncmp(filename, "self/", strlen("self/"))) {
7218             filename += strlen("self/");
7219         } else if (*filename >= '1' && *filename <= '9') {
7220             char myself[80];
7221             snprintf(myself, sizeof(myself), "%d/", getpid());
7222             if (!strncmp(filename, myself, strlen(myself))) {
7223                 filename += strlen(myself);
7224             } else {
7225                 return 0;
7226             }
7227         } else {
7228             return 0;
7229         }
7230         if (!strcmp(filename, entry)) {
7231             return 1;
7232         }
7233     }
7234     return 0;
7235 }
7236 
7237 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7238     defined(TARGET_SPARC) || defined(TARGET_M68K)
7239 static int is_proc(const char *filename, const char *entry)
7240 {
7241     return strcmp(filename, entry) == 0;
7242 }
7243 #endif
7244 
7245 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7246 static int open_net_route(void *cpu_env, int fd)
7247 {
7248     FILE *fp;
7249     char *line = NULL;
7250     size_t len = 0;
7251     ssize_t read;
7252 
7253     fp = fopen("/proc/net/route", "r");
7254     if (fp == NULL) {
7255         return -1;
7256     }
7257 
7258     /* read header */
7259 
7260     read = getline(&line, &len, fp);
7261     dprintf(fd, "%s", line);
7262 
7263     /* read routes */
7264 
7265     while ((read = getline(&line, &len, fp)) != -1) {
7266         char iface[16];
7267         uint32_t dest, gw, mask;
7268         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7269         int fields;
7270 
7271         fields = sscanf(line,
7272                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7273                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7274                         &mask, &mtu, &window, &irtt);
7275         if (fields != 11) {
7276             continue;
7277         }
7278         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7279                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7280                 metric, tswap32(mask), mtu, window, irtt);
7281     }
7282 
7283     free(line);
7284     fclose(fp);
7285 
7286     return 0;
7287 }
7288 #endif
7289 
7290 #if defined(TARGET_SPARC)
7291 static int open_cpuinfo(void *cpu_env, int fd)
7292 {
7293     dprintf(fd, "type\t\t: sun4u\n");
7294     return 0;
7295 }
7296 #endif
7297 
7298 #if defined(TARGET_M68K)
7299 static int open_hardware(void *cpu_env, int fd)
7300 {
7301     dprintf(fd, "Model:\t\tqemu-m68k\n");
7302     return 0;
7303 }
7304 #endif
7305 
7306 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7307 {
7308     struct fake_open {
7309         const char *filename;
7310         int (*fill)(void *cpu_env, int fd);
7311         int (*cmp)(const char *s1, const char *s2);
7312     };
7313     const struct fake_open *fake_open;
7314     static const struct fake_open fakes[] = {
7315         { "maps", open_self_maps, is_proc_myself },
7316         { "stat", open_self_stat, is_proc_myself },
7317         { "auxv", open_self_auxv, is_proc_myself },
7318         { "cmdline", open_self_cmdline, is_proc_myself },
7319 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7320         { "/proc/net/route", open_net_route, is_proc },
7321 #endif
7322 #if defined(TARGET_SPARC)
7323         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7324 #endif
7325 #if defined(TARGET_M68K)
7326         { "/proc/hardware", open_hardware, is_proc },
7327 #endif
7328         { NULL, NULL, NULL }
7329     };
7330 
7331     if (is_proc_myself(pathname, "exe")) {
7332         int execfd = qemu_getauxval(AT_EXECFD);
7333         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7334     }
7335 
7336     for (fake_open = fakes; fake_open->filename; fake_open++) {
7337         if (fake_open->cmp(pathname, fake_open->filename)) {
7338             break;
7339         }
7340     }
7341 
7342     if (fake_open->filename) {
7343         const char *tmpdir;
7344         char filename[PATH_MAX];
7345         int fd, r;
7346 
7347         /* create temporary file to map stat to */
7348         tmpdir = getenv("TMPDIR");
7349         if (!tmpdir)
7350             tmpdir = "/tmp";
7351         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7352         fd = mkstemp(filename);
7353         if (fd < 0) {
7354             return fd;
7355         }
7356         unlink(filename);
7357 
7358         if ((r = fake_open->fill(cpu_env, fd))) {
7359             int e = errno;
7360             close(fd);
7361             errno = e;
7362             return r;
7363         }
7364         lseek(fd, 0, SEEK_SET);
7365 
7366         return fd;
7367     }
7368 
7369     return safe_openat(dirfd, path(pathname), flags, mode);
7370 }
7371 
7372 #define TIMER_MAGIC 0x0caf0000
7373 #define TIMER_MAGIC_MASK 0xffff0000
7374 
7375 /* Convert QEMU provided timer ID back to internal 16bit index format */
7376 static target_timer_t get_timer_id(abi_long arg)
7377 {
7378     target_timer_t timerid = arg;
7379 
7380     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7381         return -TARGET_EINVAL;
7382     }
7383 
7384     timerid &= 0xffff;
7385 
7386     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7387         return -TARGET_EINVAL;
7388     }
7389 
7390     return timerid;
7391 }
7392 
7393 static int target_to_host_cpu_mask(unsigned long *host_mask,
7394                                    size_t host_size,
7395                                    abi_ulong target_addr,
7396                                    size_t target_size)
7397 {
7398     unsigned target_bits = sizeof(abi_ulong) * 8;
7399     unsigned host_bits = sizeof(*host_mask) * 8;
7400     abi_ulong *target_mask;
7401     unsigned i, j;
7402 
7403     assert(host_size >= target_size);
7404 
7405     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7406     if (!target_mask) {
7407         return -TARGET_EFAULT;
7408     }
7409     memset(host_mask, 0, host_size);
7410 
7411     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7412         unsigned bit = i * target_bits;
7413         abi_ulong val;
7414 
7415         __get_user(val, &target_mask[i]);
7416         for (j = 0; j < target_bits; j++, bit++) {
7417             if (val & (1UL << j)) {
7418                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7419             }
7420         }
7421     }
7422 
7423     unlock_user(target_mask, target_addr, 0);
7424     return 0;
7425 }
7426 
7427 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7428                                    size_t host_size,
7429                                    abi_ulong target_addr,
7430                                    size_t target_size)
7431 {
7432     unsigned target_bits = sizeof(abi_ulong) * 8;
7433     unsigned host_bits = sizeof(*host_mask) * 8;
7434     abi_ulong *target_mask;
7435     unsigned i, j;
7436 
7437     assert(host_size >= target_size);
7438 
7439     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7440     if (!target_mask) {
7441         return -TARGET_EFAULT;
7442     }
7443 
7444     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7445         unsigned bit = i * target_bits;
7446         abi_ulong val = 0;
7447 
7448         for (j = 0; j < target_bits; j++, bit++) {
7449             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7450                 val |= 1UL << j;
7451             }
7452         }
7453         __put_user(val, &target_mask[i]);
7454     }
7455 
7456     unlock_user(target_mask, target_addr, target_size);
7457     return 0;
7458 }
7459 
7460 /* This is an internal helper for do_syscall so that it is easier
7461  * to have a single return point, so that actions, such as logging
7462  * of syscall results, can be performed.
7463  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7464  */
7465 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7466                             abi_long arg2, abi_long arg3, abi_long arg4,
7467                             abi_long arg5, abi_long arg6, abi_long arg7,
7468                             abi_long arg8)
7469 {
7470     CPUState *cpu = env_cpu(cpu_env);
7471     abi_long ret;
7472 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7473     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7474     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7475     || defined(TARGET_NR_statx)
7476     struct stat st;
7477 #endif
7478 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7479     || defined(TARGET_NR_fstatfs)
7480     struct statfs stfs;
7481 #endif
7482     void *p;
7483 
7484     switch(num) {
7485     case TARGET_NR_exit:
7486         /* In old applications this may be used to implement _exit(2).
7487            However in threaded applictions it is used for thread termination,
7488            and _exit_group is used for application termination.
7489            Do thread termination if we have more then one thread.  */
7490 
7491         if (block_signals()) {
7492             return -TARGET_ERESTARTSYS;
7493         }
7494 
7495         cpu_list_lock();
7496 
7497         if (CPU_NEXT(first_cpu)) {
7498             TaskState *ts;
7499 
7500             /* Remove the CPU from the list.  */
7501             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7502 
7503             cpu_list_unlock();
7504 
7505             ts = cpu->opaque;
7506             if (ts->child_tidptr) {
7507                 put_user_u32(0, ts->child_tidptr);
7508                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7509                           NULL, NULL, 0);
7510             }
7511             thread_cpu = NULL;
7512             object_unref(OBJECT(cpu));
7513             g_free(ts);
7514             rcu_unregister_thread();
7515             pthread_exit(NULL);
7516         }
7517 
7518         cpu_list_unlock();
7519         preexit_cleanup(cpu_env, arg1);
7520         _exit(arg1);
7521         return 0; /* avoid warning */
7522     case TARGET_NR_read:
7523         if (arg2 == 0 && arg3 == 0) {
7524             return get_errno(safe_read(arg1, 0, 0));
7525         } else {
7526             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7527                 return -TARGET_EFAULT;
7528             ret = get_errno(safe_read(arg1, p, arg3));
7529             if (ret >= 0 &&
7530                 fd_trans_host_to_target_data(arg1)) {
7531                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7532             }
7533             unlock_user(p, arg2, ret);
7534         }
7535         return ret;
7536     case TARGET_NR_write:
7537         if (arg2 == 0 && arg3 == 0) {
7538             return get_errno(safe_write(arg1, 0, 0));
7539         }
7540         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7541             return -TARGET_EFAULT;
7542         if (fd_trans_target_to_host_data(arg1)) {
7543             void *copy = g_malloc(arg3);
7544             memcpy(copy, p, arg3);
7545             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7546             if (ret >= 0) {
7547                 ret = get_errno(safe_write(arg1, copy, ret));
7548             }
7549             g_free(copy);
7550         } else {
7551             ret = get_errno(safe_write(arg1, p, arg3));
7552         }
7553         unlock_user(p, arg2, 0);
7554         return ret;
7555 
7556 #ifdef TARGET_NR_open
7557     case TARGET_NR_open:
7558         if (!(p = lock_user_string(arg1)))
7559             return -TARGET_EFAULT;
7560         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7561                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7562                                   arg3));
7563         fd_trans_unregister(ret);
7564         unlock_user(p, arg1, 0);
7565         return ret;
7566 #endif
7567     case TARGET_NR_openat:
7568         if (!(p = lock_user_string(arg2)))
7569             return -TARGET_EFAULT;
7570         ret = get_errno(do_openat(cpu_env, arg1, p,
7571                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7572                                   arg4));
7573         fd_trans_unregister(ret);
7574         unlock_user(p, arg2, 0);
7575         return ret;
7576 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7577     case TARGET_NR_name_to_handle_at:
7578         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7579         return ret;
7580 #endif
7581 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7582     case TARGET_NR_open_by_handle_at:
7583         ret = do_open_by_handle_at(arg1, arg2, arg3);
7584         fd_trans_unregister(ret);
7585         return ret;
7586 #endif
7587     case TARGET_NR_close:
7588         fd_trans_unregister(arg1);
7589         return get_errno(close(arg1));
7590 
7591     case TARGET_NR_brk:
7592         return do_brk(arg1);
7593 #ifdef TARGET_NR_fork
7594     case TARGET_NR_fork:
7595         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7596 #endif
7597 #ifdef TARGET_NR_waitpid
7598     case TARGET_NR_waitpid:
7599         {
7600             int status;
7601             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7602             if (!is_error(ret) && arg2 && ret
7603                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7604                 return -TARGET_EFAULT;
7605         }
7606         return ret;
7607 #endif
7608 #ifdef TARGET_NR_waitid
7609     case TARGET_NR_waitid:
7610         {
7611             siginfo_t info;
7612             info.si_pid = 0;
7613             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7614             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7615                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7616                     return -TARGET_EFAULT;
7617                 host_to_target_siginfo(p, &info);
7618                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7619             }
7620         }
7621         return ret;
7622 #endif
7623 #ifdef TARGET_NR_creat /* not on alpha */
7624     case TARGET_NR_creat:
7625         if (!(p = lock_user_string(arg1)))
7626             return -TARGET_EFAULT;
7627         ret = get_errno(creat(p, arg2));
7628         fd_trans_unregister(ret);
7629         unlock_user(p, arg1, 0);
7630         return ret;
7631 #endif
7632 #ifdef TARGET_NR_link
7633     case TARGET_NR_link:
7634         {
7635             void * p2;
7636             p = lock_user_string(arg1);
7637             p2 = lock_user_string(arg2);
7638             if (!p || !p2)
7639                 ret = -TARGET_EFAULT;
7640             else
7641                 ret = get_errno(link(p, p2));
7642             unlock_user(p2, arg2, 0);
7643             unlock_user(p, arg1, 0);
7644         }
7645         return ret;
7646 #endif
7647 #if defined(TARGET_NR_linkat)
7648     case TARGET_NR_linkat:
7649         {
7650             void * p2 = NULL;
7651             if (!arg2 || !arg4)
7652                 return -TARGET_EFAULT;
7653             p  = lock_user_string(arg2);
7654             p2 = lock_user_string(arg4);
7655             if (!p || !p2)
7656                 ret = -TARGET_EFAULT;
7657             else
7658                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7659             unlock_user(p, arg2, 0);
7660             unlock_user(p2, arg4, 0);
7661         }
7662         return ret;
7663 #endif
7664 #ifdef TARGET_NR_unlink
7665     case TARGET_NR_unlink:
7666         if (!(p = lock_user_string(arg1)))
7667             return -TARGET_EFAULT;
7668         ret = get_errno(unlink(p));
7669         unlock_user(p, arg1, 0);
7670         return ret;
7671 #endif
7672 #if defined(TARGET_NR_unlinkat)
7673     case TARGET_NR_unlinkat:
7674         if (!(p = lock_user_string(arg2)))
7675             return -TARGET_EFAULT;
7676         ret = get_errno(unlinkat(arg1, p, arg3));
7677         unlock_user(p, arg2, 0);
7678         return ret;
7679 #endif
7680     case TARGET_NR_execve:
7681         {
7682             char **argp, **envp;
7683             int argc, envc;
7684             abi_ulong gp;
7685             abi_ulong guest_argp;
7686             abi_ulong guest_envp;
7687             abi_ulong addr;
7688             char **q;
7689             int total_size = 0;
7690 
7691             argc = 0;
7692             guest_argp = arg2;
7693             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7694                 if (get_user_ual(addr, gp))
7695                     return -TARGET_EFAULT;
7696                 if (!addr)
7697                     break;
7698                 argc++;
7699             }
7700             envc = 0;
7701             guest_envp = arg3;
7702             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7703                 if (get_user_ual(addr, gp))
7704                     return -TARGET_EFAULT;
7705                 if (!addr)
7706                     break;
7707                 envc++;
7708             }
7709 
7710             argp = g_new0(char *, argc + 1);
7711             envp = g_new0(char *, envc + 1);
7712 
7713             for (gp = guest_argp, q = argp; gp;
7714                   gp += sizeof(abi_ulong), q++) {
7715                 if (get_user_ual(addr, gp))
7716                     goto execve_efault;
7717                 if (!addr)
7718                     break;
7719                 if (!(*q = lock_user_string(addr)))
7720                     goto execve_efault;
7721                 total_size += strlen(*q) + 1;
7722             }
7723             *q = NULL;
7724 
7725             for (gp = guest_envp, q = envp; gp;
7726                   gp += sizeof(abi_ulong), q++) {
7727                 if (get_user_ual(addr, gp))
7728                     goto execve_efault;
7729                 if (!addr)
7730                     break;
7731                 if (!(*q = lock_user_string(addr)))
7732                     goto execve_efault;
7733                 total_size += strlen(*q) + 1;
7734             }
7735             *q = NULL;
7736 
7737             if (!(p = lock_user_string(arg1)))
7738                 goto execve_efault;
7739             /* Although execve() is not an interruptible syscall it is
7740              * a special case where we must use the safe_syscall wrapper:
7741              * if we allow a signal to happen before we make the host
7742              * syscall then we will 'lose' it, because at the point of
7743              * execve the process leaves QEMU's control. So we use the
7744              * safe syscall wrapper to ensure that we either take the
7745              * signal as a guest signal, or else it does not happen
7746              * before the execve completes and makes it the other
7747              * program's problem.
7748              */
7749             ret = get_errno(safe_execve(p, argp, envp));
7750             unlock_user(p, arg1, 0);
7751 
7752             goto execve_end;
7753 
7754         execve_efault:
7755             ret = -TARGET_EFAULT;
7756 
7757         execve_end:
7758             for (gp = guest_argp, q = argp; *q;
7759                   gp += sizeof(abi_ulong), q++) {
7760                 if (get_user_ual(addr, gp)
7761                     || !addr)
7762                     break;
7763                 unlock_user(*q, addr, 0);
7764             }
7765             for (gp = guest_envp, q = envp; *q;
7766                   gp += sizeof(abi_ulong), q++) {
7767                 if (get_user_ual(addr, gp)
7768                     || !addr)
7769                     break;
7770                 unlock_user(*q, addr, 0);
7771             }
7772 
7773             g_free(argp);
7774             g_free(envp);
7775         }
7776         return ret;
7777     case TARGET_NR_chdir:
7778         if (!(p = lock_user_string(arg1)))
7779             return -TARGET_EFAULT;
7780         ret = get_errno(chdir(p));
7781         unlock_user(p, arg1, 0);
7782         return ret;
7783 #ifdef TARGET_NR_time
7784     case TARGET_NR_time:
7785         {
7786             time_t host_time;
7787             ret = get_errno(time(&host_time));
7788             if (!is_error(ret)
7789                 && arg1
7790                 && put_user_sal(host_time, arg1))
7791                 return -TARGET_EFAULT;
7792         }
7793         return ret;
7794 #endif
7795 #ifdef TARGET_NR_mknod
7796     case TARGET_NR_mknod:
7797         if (!(p = lock_user_string(arg1)))
7798             return -TARGET_EFAULT;
7799         ret = get_errno(mknod(p, arg2, arg3));
7800         unlock_user(p, arg1, 0);
7801         return ret;
7802 #endif
7803 #if defined(TARGET_NR_mknodat)
7804     case TARGET_NR_mknodat:
7805         if (!(p = lock_user_string(arg2)))
7806             return -TARGET_EFAULT;
7807         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7808         unlock_user(p, arg2, 0);
7809         return ret;
7810 #endif
7811 #ifdef TARGET_NR_chmod
7812     case TARGET_NR_chmod:
7813         if (!(p = lock_user_string(arg1)))
7814             return -TARGET_EFAULT;
7815         ret = get_errno(chmod(p, arg2));
7816         unlock_user(p, arg1, 0);
7817         return ret;
7818 #endif
7819 #ifdef TARGET_NR_lseek
7820     case TARGET_NR_lseek:
7821         return get_errno(lseek(arg1, arg2, arg3));
7822 #endif
7823 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7824     /* Alpha specific */
7825     case TARGET_NR_getxpid:
7826         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7827         return get_errno(getpid());
7828 #endif
7829 #ifdef TARGET_NR_getpid
7830     case TARGET_NR_getpid:
7831         return get_errno(getpid());
7832 #endif
7833     case TARGET_NR_mount:
7834         {
7835             /* need to look at the data field */
7836             void *p2, *p3;
7837 
7838             if (arg1) {
7839                 p = lock_user_string(arg1);
7840                 if (!p) {
7841                     return -TARGET_EFAULT;
7842                 }
7843             } else {
7844                 p = NULL;
7845             }
7846 
7847             p2 = lock_user_string(arg2);
7848             if (!p2) {
7849                 if (arg1) {
7850                     unlock_user(p, arg1, 0);
7851                 }
7852                 return -TARGET_EFAULT;
7853             }
7854 
7855             if (arg3) {
7856                 p3 = lock_user_string(arg3);
7857                 if (!p3) {
7858                     if (arg1) {
7859                         unlock_user(p, arg1, 0);
7860                     }
7861                     unlock_user(p2, arg2, 0);
7862                     return -TARGET_EFAULT;
7863                 }
7864             } else {
7865                 p3 = NULL;
7866             }
7867 
7868             /* FIXME - arg5 should be locked, but it isn't clear how to
7869              * do that since it's not guaranteed to be a NULL-terminated
7870              * string.
7871              */
7872             if (!arg5) {
7873                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7874             } else {
7875                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7876             }
7877             ret = get_errno(ret);
7878 
7879             if (arg1) {
7880                 unlock_user(p, arg1, 0);
7881             }
7882             unlock_user(p2, arg2, 0);
7883             if (arg3) {
7884                 unlock_user(p3, arg3, 0);
7885             }
7886         }
7887         return ret;
7888 #ifdef TARGET_NR_umount
7889     case TARGET_NR_umount:
7890         if (!(p = lock_user_string(arg1)))
7891             return -TARGET_EFAULT;
7892         ret = get_errno(umount(p));
7893         unlock_user(p, arg1, 0);
7894         return ret;
7895 #endif
7896 #ifdef TARGET_NR_stime /* not on alpha */
7897     case TARGET_NR_stime:
7898         {
7899             struct timespec ts;
7900             ts.tv_nsec = 0;
7901             if (get_user_sal(ts.tv_sec, arg1)) {
7902                 return -TARGET_EFAULT;
7903             }
7904             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7905         }
7906 #endif
7907 #ifdef TARGET_NR_alarm /* not on alpha */
7908     case TARGET_NR_alarm:
7909         return alarm(arg1);
7910 #endif
7911 #ifdef TARGET_NR_pause /* not on alpha */
7912     case TARGET_NR_pause:
7913         if (!block_signals()) {
7914             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7915         }
7916         return -TARGET_EINTR;
7917 #endif
7918 #ifdef TARGET_NR_utime
7919     case TARGET_NR_utime:
7920         {
7921             struct utimbuf tbuf, *host_tbuf;
7922             struct target_utimbuf *target_tbuf;
7923             if (arg2) {
7924                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7925                     return -TARGET_EFAULT;
7926                 tbuf.actime = tswapal(target_tbuf->actime);
7927                 tbuf.modtime = tswapal(target_tbuf->modtime);
7928                 unlock_user_struct(target_tbuf, arg2, 0);
7929                 host_tbuf = &tbuf;
7930             } else {
7931                 host_tbuf = NULL;
7932             }
7933             if (!(p = lock_user_string(arg1)))
7934                 return -TARGET_EFAULT;
7935             ret = get_errno(utime(p, host_tbuf));
7936             unlock_user(p, arg1, 0);
7937         }
7938         return ret;
7939 #endif
7940 #ifdef TARGET_NR_utimes
7941     case TARGET_NR_utimes:
7942         {
7943             struct timeval *tvp, tv[2];
7944             if (arg2) {
7945                 if (copy_from_user_timeval(&tv[0], arg2)
7946                     || copy_from_user_timeval(&tv[1],
7947                                               arg2 + sizeof(struct target_timeval)))
7948                     return -TARGET_EFAULT;
7949                 tvp = tv;
7950             } else {
7951                 tvp = NULL;
7952             }
7953             if (!(p = lock_user_string(arg1)))
7954                 return -TARGET_EFAULT;
7955             ret = get_errno(utimes(p, tvp));
7956             unlock_user(p, arg1, 0);
7957         }
7958         return ret;
7959 #endif
7960 #if defined(TARGET_NR_futimesat)
7961     case TARGET_NR_futimesat:
7962         {
7963             struct timeval *tvp, tv[2];
7964             if (arg3) {
7965                 if (copy_from_user_timeval(&tv[0], arg3)
7966                     || copy_from_user_timeval(&tv[1],
7967                                               arg3 + sizeof(struct target_timeval)))
7968                     return -TARGET_EFAULT;
7969                 tvp = tv;
7970             } else {
7971                 tvp = NULL;
7972             }
7973             if (!(p = lock_user_string(arg2))) {
7974                 return -TARGET_EFAULT;
7975             }
7976             ret = get_errno(futimesat(arg1, path(p), tvp));
7977             unlock_user(p, arg2, 0);
7978         }
7979         return ret;
7980 #endif
7981 #ifdef TARGET_NR_access
7982     case TARGET_NR_access:
7983         if (!(p = lock_user_string(arg1))) {
7984             return -TARGET_EFAULT;
7985         }
7986         ret = get_errno(access(path(p), arg2));
7987         unlock_user(p, arg1, 0);
7988         return ret;
7989 #endif
7990 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7991     case TARGET_NR_faccessat:
7992         if (!(p = lock_user_string(arg2))) {
7993             return -TARGET_EFAULT;
7994         }
7995         ret = get_errno(faccessat(arg1, p, arg3, 0));
7996         unlock_user(p, arg2, 0);
7997         return ret;
7998 #endif
7999 #ifdef TARGET_NR_nice /* not on alpha */
8000     case TARGET_NR_nice:
8001         return get_errno(nice(arg1));
8002 #endif
8003     case TARGET_NR_sync:
8004         sync();
8005         return 0;
8006 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8007     case TARGET_NR_syncfs:
8008         return get_errno(syncfs(arg1));
8009 #endif
8010     case TARGET_NR_kill:
8011         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8012 #ifdef TARGET_NR_rename
8013     case TARGET_NR_rename:
8014         {
8015             void *p2;
8016             p = lock_user_string(arg1);
8017             p2 = lock_user_string(arg2);
8018             if (!p || !p2)
8019                 ret = -TARGET_EFAULT;
8020             else
8021                 ret = get_errno(rename(p, p2));
8022             unlock_user(p2, arg2, 0);
8023             unlock_user(p, arg1, 0);
8024         }
8025         return ret;
8026 #endif
8027 #if defined(TARGET_NR_renameat)
8028     case TARGET_NR_renameat:
8029         {
8030             void *p2;
8031             p  = lock_user_string(arg2);
8032             p2 = lock_user_string(arg4);
8033             if (!p || !p2)
8034                 ret = -TARGET_EFAULT;
8035             else
8036                 ret = get_errno(renameat(arg1, p, arg3, p2));
8037             unlock_user(p2, arg4, 0);
8038             unlock_user(p, arg2, 0);
8039         }
8040         return ret;
8041 #endif
8042 #if defined(TARGET_NR_renameat2)
8043     case TARGET_NR_renameat2:
8044         {
8045             void *p2;
8046             p  = lock_user_string(arg2);
8047             p2 = lock_user_string(arg4);
8048             if (!p || !p2) {
8049                 ret = -TARGET_EFAULT;
8050             } else {
8051                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8052             }
8053             unlock_user(p2, arg4, 0);
8054             unlock_user(p, arg2, 0);
8055         }
8056         return ret;
8057 #endif
8058 #ifdef TARGET_NR_mkdir
8059     case TARGET_NR_mkdir:
8060         if (!(p = lock_user_string(arg1)))
8061             return -TARGET_EFAULT;
8062         ret = get_errno(mkdir(p, arg2));
8063         unlock_user(p, arg1, 0);
8064         return ret;
8065 #endif
8066 #if defined(TARGET_NR_mkdirat)
8067     case TARGET_NR_mkdirat:
8068         if (!(p = lock_user_string(arg2)))
8069             return -TARGET_EFAULT;
8070         ret = get_errno(mkdirat(arg1, p, arg3));
8071         unlock_user(p, arg2, 0);
8072         return ret;
8073 #endif
8074 #ifdef TARGET_NR_rmdir
8075     case TARGET_NR_rmdir:
8076         if (!(p = lock_user_string(arg1)))
8077             return -TARGET_EFAULT;
8078         ret = get_errno(rmdir(p));
8079         unlock_user(p, arg1, 0);
8080         return ret;
8081 #endif
8082     case TARGET_NR_dup:
8083         ret = get_errno(dup(arg1));
8084         if (ret >= 0) {
8085             fd_trans_dup(arg1, ret);
8086         }
8087         return ret;
8088 #ifdef TARGET_NR_pipe
8089     case TARGET_NR_pipe:
8090         return do_pipe(cpu_env, arg1, 0, 0);
8091 #endif
8092 #ifdef TARGET_NR_pipe2
8093     case TARGET_NR_pipe2:
8094         return do_pipe(cpu_env, arg1,
8095                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8096 #endif
8097     case TARGET_NR_times:
8098         {
8099             struct target_tms *tmsp;
8100             struct tms tms;
8101             ret = get_errno(times(&tms));
8102             if (arg1) {
8103                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8104                 if (!tmsp)
8105                     return -TARGET_EFAULT;
8106                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8107                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8108                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8109                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8110             }
8111             if (!is_error(ret))
8112                 ret = host_to_target_clock_t(ret);
8113         }
8114         return ret;
8115     case TARGET_NR_acct:
8116         if (arg1 == 0) {
8117             ret = get_errno(acct(NULL));
8118         } else {
8119             if (!(p = lock_user_string(arg1))) {
8120                 return -TARGET_EFAULT;
8121             }
8122             ret = get_errno(acct(path(p)));
8123             unlock_user(p, arg1, 0);
8124         }
8125         return ret;
8126 #ifdef TARGET_NR_umount2
8127     case TARGET_NR_umount2:
8128         if (!(p = lock_user_string(arg1)))
8129             return -TARGET_EFAULT;
8130         ret = get_errno(umount2(p, arg2));
8131         unlock_user(p, arg1, 0);
8132         return ret;
8133 #endif
8134     case TARGET_NR_ioctl:
8135         return do_ioctl(arg1, arg2, arg3);
8136 #ifdef TARGET_NR_fcntl
8137     case TARGET_NR_fcntl:
8138         return do_fcntl(arg1, arg2, arg3);
8139 #endif
8140     case TARGET_NR_setpgid:
8141         return get_errno(setpgid(arg1, arg2));
8142     case TARGET_NR_umask:
8143         return get_errno(umask(arg1));
8144     case TARGET_NR_chroot:
8145         if (!(p = lock_user_string(arg1)))
8146             return -TARGET_EFAULT;
8147         ret = get_errno(chroot(p));
8148         unlock_user(p, arg1, 0);
8149         return ret;
8150 #ifdef TARGET_NR_dup2
8151     case TARGET_NR_dup2:
8152         ret = get_errno(dup2(arg1, arg2));
8153         if (ret >= 0) {
8154             fd_trans_dup(arg1, arg2);
8155         }
8156         return ret;
8157 #endif
8158 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8159     case TARGET_NR_dup3:
8160     {
8161         int host_flags;
8162 
8163         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8164             return -EINVAL;
8165         }
8166         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8167         ret = get_errno(dup3(arg1, arg2, host_flags));
8168         if (ret >= 0) {
8169             fd_trans_dup(arg1, arg2);
8170         }
8171         return ret;
8172     }
8173 #endif
8174 #ifdef TARGET_NR_getppid /* not on alpha */
8175     case TARGET_NR_getppid:
8176         return get_errno(getppid());
8177 #endif
8178 #ifdef TARGET_NR_getpgrp
8179     case TARGET_NR_getpgrp:
8180         return get_errno(getpgrp());
8181 #endif
8182     case TARGET_NR_setsid:
8183         return get_errno(setsid());
8184 #ifdef TARGET_NR_sigaction
8185     case TARGET_NR_sigaction:
8186         {
8187 #if defined(TARGET_ALPHA)
8188             struct target_sigaction act, oact, *pact = 0;
8189             struct target_old_sigaction *old_act;
8190             if (arg2) {
8191                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8192                     return -TARGET_EFAULT;
8193                 act._sa_handler = old_act->_sa_handler;
8194                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8195                 act.sa_flags = old_act->sa_flags;
8196                 act.sa_restorer = 0;
8197                 unlock_user_struct(old_act, arg2, 0);
8198                 pact = &act;
8199             }
8200             ret = get_errno(do_sigaction(arg1, pact, &oact));
8201             if (!is_error(ret) && arg3) {
8202                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8203                     return -TARGET_EFAULT;
8204                 old_act->_sa_handler = oact._sa_handler;
8205                 old_act->sa_mask = oact.sa_mask.sig[0];
8206                 old_act->sa_flags = oact.sa_flags;
8207                 unlock_user_struct(old_act, arg3, 1);
8208             }
8209 #elif defined(TARGET_MIPS)
8210 	    struct target_sigaction act, oact, *pact, *old_act;
8211 
8212 	    if (arg2) {
8213                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8214                     return -TARGET_EFAULT;
8215 		act._sa_handler = old_act->_sa_handler;
8216 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8217 		act.sa_flags = old_act->sa_flags;
8218 		unlock_user_struct(old_act, arg2, 0);
8219 		pact = &act;
8220 	    } else {
8221 		pact = NULL;
8222 	    }
8223 
8224 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8225 
8226 	    if (!is_error(ret) && arg3) {
8227                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8228                     return -TARGET_EFAULT;
8229 		old_act->_sa_handler = oact._sa_handler;
8230 		old_act->sa_flags = oact.sa_flags;
8231 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8232 		old_act->sa_mask.sig[1] = 0;
8233 		old_act->sa_mask.sig[2] = 0;
8234 		old_act->sa_mask.sig[3] = 0;
8235 		unlock_user_struct(old_act, arg3, 1);
8236 	    }
8237 #else
8238             struct target_old_sigaction *old_act;
8239             struct target_sigaction act, oact, *pact;
8240             if (arg2) {
8241                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8242                     return -TARGET_EFAULT;
8243                 act._sa_handler = old_act->_sa_handler;
8244                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8245                 act.sa_flags = old_act->sa_flags;
8246                 act.sa_restorer = old_act->sa_restorer;
8247 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8248                 act.ka_restorer = 0;
8249 #endif
8250                 unlock_user_struct(old_act, arg2, 0);
8251                 pact = &act;
8252             } else {
8253                 pact = NULL;
8254             }
8255             ret = get_errno(do_sigaction(arg1, pact, &oact));
8256             if (!is_error(ret) && arg3) {
8257                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8258                     return -TARGET_EFAULT;
8259                 old_act->_sa_handler = oact._sa_handler;
8260                 old_act->sa_mask = oact.sa_mask.sig[0];
8261                 old_act->sa_flags = oact.sa_flags;
8262                 old_act->sa_restorer = oact.sa_restorer;
8263                 unlock_user_struct(old_act, arg3, 1);
8264             }
8265 #endif
8266         }
8267         return ret;
8268 #endif
8269     case TARGET_NR_rt_sigaction:
8270         {
8271 #if defined(TARGET_ALPHA)
8272             /* For Alpha and SPARC this is a 5 argument syscall, with
8273              * a 'restorer' parameter which must be copied into the
8274              * sa_restorer field of the sigaction struct.
8275              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8276              * and arg5 is the sigsetsize.
8277              * Alpha also has a separate rt_sigaction struct that it uses
8278              * here; SPARC uses the usual sigaction struct.
8279              */
8280             struct target_rt_sigaction *rt_act;
8281             struct target_sigaction act, oact, *pact = 0;
8282 
8283             if (arg4 != sizeof(target_sigset_t)) {
8284                 return -TARGET_EINVAL;
8285             }
8286             if (arg2) {
8287                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8288                     return -TARGET_EFAULT;
8289                 act._sa_handler = rt_act->_sa_handler;
8290                 act.sa_mask = rt_act->sa_mask;
8291                 act.sa_flags = rt_act->sa_flags;
8292                 act.sa_restorer = arg5;
8293                 unlock_user_struct(rt_act, arg2, 0);
8294                 pact = &act;
8295             }
8296             ret = get_errno(do_sigaction(arg1, pact, &oact));
8297             if (!is_error(ret) && arg3) {
8298                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8299                     return -TARGET_EFAULT;
8300                 rt_act->_sa_handler = oact._sa_handler;
8301                 rt_act->sa_mask = oact.sa_mask;
8302                 rt_act->sa_flags = oact.sa_flags;
8303                 unlock_user_struct(rt_act, arg3, 1);
8304             }
8305 #else
8306 #ifdef TARGET_SPARC
8307             target_ulong restorer = arg4;
8308             target_ulong sigsetsize = arg5;
8309 #else
8310             target_ulong sigsetsize = arg4;
8311 #endif
8312             struct target_sigaction *act;
8313             struct target_sigaction *oact;
8314 
8315             if (sigsetsize != sizeof(target_sigset_t)) {
8316                 return -TARGET_EINVAL;
8317             }
8318             if (arg2) {
8319                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8320                     return -TARGET_EFAULT;
8321                 }
8322 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8323                 act->ka_restorer = restorer;
8324 #endif
8325             } else {
8326                 act = NULL;
8327             }
8328             if (arg3) {
8329                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8330                     ret = -TARGET_EFAULT;
8331                     goto rt_sigaction_fail;
8332                 }
8333             } else
8334                 oact = NULL;
8335             ret = get_errno(do_sigaction(arg1, act, oact));
8336 	rt_sigaction_fail:
8337             if (act)
8338                 unlock_user_struct(act, arg2, 0);
8339             if (oact)
8340                 unlock_user_struct(oact, arg3, 1);
8341 #endif
8342         }
8343         return ret;
8344 #ifdef TARGET_NR_sgetmask /* not on alpha */
8345     case TARGET_NR_sgetmask:
8346         {
8347             sigset_t cur_set;
8348             abi_ulong target_set;
8349             ret = do_sigprocmask(0, NULL, &cur_set);
8350             if (!ret) {
8351                 host_to_target_old_sigset(&target_set, &cur_set);
8352                 ret = target_set;
8353             }
8354         }
8355         return ret;
8356 #endif
8357 #ifdef TARGET_NR_ssetmask /* not on alpha */
8358     case TARGET_NR_ssetmask:
8359         {
8360             sigset_t set, oset;
8361             abi_ulong target_set = arg1;
8362             target_to_host_old_sigset(&set, &target_set);
8363             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8364             if (!ret) {
8365                 host_to_target_old_sigset(&target_set, &oset);
8366                 ret = target_set;
8367             }
8368         }
8369         return ret;
8370 #endif
8371 #ifdef TARGET_NR_sigprocmask
8372     case TARGET_NR_sigprocmask:
8373         {
8374 #if defined(TARGET_ALPHA)
8375             sigset_t set, oldset;
8376             abi_ulong mask;
8377             int how;
8378 
8379             switch (arg1) {
8380             case TARGET_SIG_BLOCK:
8381                 how = SIG_BLOCK;
8382                 break;
8383             case TARGET_SIG_UNBLOCK:
8384                 how = SIG_UNBLOCK;
8385                 break;
8386             case TARGET_SIG_SETMASK:
8387                 how = SIG_SETMASK;
8388                 break;
8389             default:
8390                 return -TARGET_EINVAL;
8391             }
8392             mask = arg2;
8393             target_to_host_old_sigset(&set, &mask);
8394 
8395             ret = do_sigprocmask(how, &set, &oldset);
8396             if (!is_error(ret)) {
8397                 host_to_target_old_sigset(&mask, &oldset);
8398                 ret = mask;
8399                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8400             }
8401 #else
8402             sigset_t set, oldset, *set_ptr;
8403             int how;
8404 
8405             if (arg2) {
8406                 switch (arg1) {
8407                 case TARGET_SIG_BLOCK:
8408                     how = SIG_BLOCK;
8409                     break;
8410                 case TARGET_SIG_UNBLOCK:
8411                     how = SIG_UNBLOCK;
8412                     break;
8413                 case TARGET_SIG_SETMASK:
8414                     how = SIG_SETMASK;
8415                     break;
8416                 default:
8417                     return -TARGET_EINVAL;
8418                 }
8419                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8420                     return -TARGET_EFAULT;
8421                 target_to_host_old_sigset(&set, p);
8422                 unlock_user(p, arg2, 0);
8423                 set_ptr = &set;
8424             } else {
8425                 how = 0;
8426                 set_ptr = NULL;
8427             }
8428             ret = do_sigprocmask(how, set_ptr, &oldset);
8429             if (!is_error(ret) && arg3) {
8430                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8431                     return -TARGET_EFAULT;
8432                 host_to_target_old_sigset(p, &oldset);
8433                 unlock_user(p, arg3, sizeof(target_sigset_t));
8434             }
8435 #endif
8436         }
8437         return ret;
8438 #endif
8439     case TARGET_NR_rt_sigprocmask:
8440         {
8441             int how = arg1;
8442             sigset_t set, oldset, *set_ptr;
8443 
8444             if (arg4 != sizeof(target_sigset_t)) {
8445                 return -TARGET_EINVAL;
8446             }
8447 
8448             if (arg2) {
8449                 switch(how) {
8450                 case TARGET_SIG_BLOCK:
8451                     how = SIG_BLOCK;
8452                     break;
8453                 case TARGET_SIG_UNBLOCK:
8454                     how = SIG_UNBLOCK;
8455                     break;
8456                 case TARGET_SIG_SETMASK:
8457                     how = SIG_SETMASK;
8458                     break;
8459                 default:
8460                     return -TARGET_EINVAL;
8461                 }
8462                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8463                     return -TARGET_EFAULT;
8464                 target_to_host_sigset(&set, p);
8465                 unlock_user(p, arg2, 0);
8466                 set_ptr = &set;
8467             } else {
8468                 how = 0;
8469                 set_ptr = NULL;
8470             }
8471             ret = do_sigprocmask(how, set_ptr, &oldset);
8472             if (!is_error(ret) && arg3) {
8473                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8474                     return -TARGET_EFAULT;
8475                 host_to_target_sigset(p, &oldset);
8476                 unlock_user(p, arg3, sizeof(target_sigset_t));
8477             }
8478         }
8479         return ret;
8480 #ifdef TARGET_NR_sigpending
8481     case TARGET_NR_sigpending:
8482         {
8483             sigset_t set;
8484             ret = get_errno(sigpending(&set));
8485             if (!is_error(ret)) {
8486                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8487                     return -TARGET_EFAULT;
8488                 host_to_target_old_sigset(p, &set);
8489                 unlock_user(p, arg1, sizeof(target_sigset_t));
8490             }
8491         }
8492         return ret;
8493 #endif
8494     case TARGET_NR_rt_sigpending:
8495         {
8496             sigset_t set;
8497 
8498             /* Yes, this check is >, not != like most. We follow the kernel's
8499              * logic and it does it like this because it implements
8500              * NR_sigpending through the same code path, and in that case
8501              * the old_sigset_t is smaller in size.
8502              */
8503             if (arg2 > sizeof(target_sigset_t)) {
8504                 return -TARGET_EINVAL;
8505             }
8506 
8507             ret = get_errno(sigpending(&set));
8508             if (!is_error(ret)) {
8509                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8510                     return -TARGET_EFAULT;
8511                 host_to_target_sigset(p, &set);
8512                 unlock_user(p, arg1, sizeof(target_sigset_t));
8513             }
8514         }
8515         return ret;
8516 #ifdef TARGET_NR_sigsuspend
8517     case TARGET_NR_sigsuspend:
8518         {
8519             TaskState *ts = cpu->opaque;
8520 #if defined(TARGET_ALPHA)
8521             abi_ulong mask = arg1;
8522             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8523 #else
8524             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8525                 return -TARGET_EFAULT;
8526             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8527             unlock_user(p, arg1, 0);
8528 #endif
8529             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8530                                                SIGSET_T_SIZE));
8531             if (ret != -TARGET_ERESTARTSYS) {
8532                 ts->in_sigsuspend = 1;
8533             }
8534         }
8535         return ret;
8536 #endif
8537     case TARGET_NR_rt_sigsuspend:
8538         {
8539             TaskState *ts = cpu->opaque;
8540 
8541             if (arg2 != sizeof(target_sigset_t)) {
8542                 return -TARGET_EINVAL;
8543             }
8544             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8545                 return -TARGET_EFAULT;
8546             target_to_host_sigset(&ts->sigsuspend_mask, p);
8547             unlock_user(p, arg1, 0);
8548             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8549                                                SIGSET_T_SIZE));
8550             if (ret != -TARGET_ERESTARTSYS) {
8551                 ts->in_sigsuspend = 1;
8552             }
8553         }
8554         return ret;
8555 #ifdef TARGET_NR_rt_sigtimedwait
8556     case TARGET_NR_rt_sigtimedwait:
8557         {
8558             sigset_t set;
8559             struct timespec uts, *puts;
8560             siginfo_t uinfo;
8561 
8562             if (arg4 != sizeof(target_sigset_t)) {
8563                 return -TARGET_EINVAL;
8564             }
8565 
8566             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8567                 return -TARGET_EFAULT;
8568             target_to_host_sigset(&set, p);
8569             unlock_user(p, arg1, 0);
8570             if (arg3) {
8571                 puts = &uts;
8572                 target_to_host_timespec(puts, arg3);
8573             } else {
8574                 puts = NULL;
8575             }
8576             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8577                                                  SIGSET_T_SIZE));
8578             if (!is_error(ret)) {
8579                 if (arg2) {
8580                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8581                                   0);
8582                     if (!p) {
8583                         return -TARGET_EFAULT;
8584                     }
8585                     host_to_target_siginfo(p, &uinfo);
8586                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8587                 }
8588                 ret = host_to_target_signal(ret);
8589             }
8590         }
8591         return ret;
8592 #endif
8593     case TARGET_NR_rt_sigqueueinfo:
8594         {
8595             siginfo_t uinfo;
8596 
8597             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8598             if (!p) {
8599                 return -TARGET_EFAULT;
8600             }
8601             target_to_host_siginfo(&uinfo, p);
8602             unlock_user(p, arg3, 0);
8603             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8604         }
8605         return ret;
8606     case TARGET_NR_rt_tgsigqueueinfo:
8607         {
8608             siginfo_t uinfo;
8609 
8610             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8611             if (!p) {
8612                 return -TARGET_EFAULT;
8613             }
8614             target_to_host_siginfo(&uinfo, p);
8615             unlock_user(p, arg4, 0);
8616             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8617         }
8618         return ret;
8619 #ifdef TARGET_NR_sigreturn
8620     case TARGET_NR_sigreturn:
8621         if (block_signals()) {
8622             return -TARGET_ERESTARTSYS;
8623         }
8624         return do_sigreturn(cpu_env);
8625 #endif
8626     case TARGET_NR_rt_sigreturn:
8627         if (block_signals()) {
8628             return -TARGET_ERESTARTSYS;
8629         }
8630         return do_rt_sigreturn(cpu_env);
8631     case TARGET_NR_sethostname:
8632         if (!(p = lock_user_string(arg1)))
8633             return -TARGET_EFAULT;
8634         ret = get_errno(sethostname(p, arg2));
8635         unlock_user(p, arg1, 0);
8636         return ret;
8637 #ifdef TARGET_NR_setrlimit
8638     case TARGET_NR_setrlimit:
8639         {
8640             int resource = target_to_host_resource(arg1);
8641             struct target_rlimit *target_rlim;
8642             struct rlimit rlim;
8643             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8644                 return -TARGET_EFAULT;
8645             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8646             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8647             unlock_user_struct(target_rlim, arg2, 0);
8648             /*
8649              * If we just passed through resource limit settings for memory then
8650              * they would also apply to QEMU's own allocations, and QEMU will
8651              * crash or hang or die if its allocations fail. Ideally we would
8652              * track the guest allocations in QEMU and apply the limits ourselves.
8653              * For now, just tell the guest the call succeeded but don't actually
8654              * limit anything.
8655              */
8656             if (resource != RLIMIT_AS &&
8657                 resource != RLIMIT_DATA &&
8658                 resource != RLIMIT_STACK) {
8659                 return get_errno(setrlimit(resource, &rlim));
8660             } else {
8661                 return 0;
8662             }
8663         }
8664 #endif
8665 #ifdef TARGET_NR_getrlimit
8666     case TARGET_NR_getrlimit:
8667         {
8668             int resource = target_to_host_resource(arg1);
8669             struct target_rlimit *target_rlim;
8670             struct rlimit rlim;
8671 
8672             ret = get_errno(getrlimit(resource, &rlim));
8673             if (!is_error(ret)) {
8674                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8675                     return -TARGET_EFAULT;
8676                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8677                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8678                 unlock_user_struct(target_rlim, arg2, 1);
8679             }
8680         }
8681         return ret;
8682 #endif
8683     case TARGET_NR_getrusage:
8684         {
8685             struct rusage rusage;
8686             ret = get_errno(getrusage(arg1, &rusage));
8687             if (!is_error(ret)) {
8688                 ret = host_to_target_rusage(arg2, &rusage);
8689             }
8690         }
8691         return ret;
8692 #if defined(TARGET_NR_gettimeofday)
8693     case TARGET_NR_gettimeofday:
8694         {
8695             struct timeval tv;
8696             ret = get_errno(gettimeofday(&tv, NULL));
8697             if (!is_error(ret)) {
8698                 if (copy_to_user_timeval(arg1, &tv))
8699                     return -TARGET_EFAULT;
8700             }
8701         }
8702         return ret;
8703 #endif
8704 #if defined(TARGET_NR_settimeofday)
8705     case TARGET_NR_settimeofday:
8706         {
8707             struct timeval tv, *ptv = NULL;
8708             struct timezone tz, *ptz = NULL;
8709 
8710             if (arg1) {
8711                 if (copy_from_user_timeval(&tv, arg1)) {
8712                     return -TARGET_EFAULT;
8713                 }
8714                 ptv = &tv;
8715             }
8716 
8717             if (arg2) {
8718                 if (copy_from_user_timezone(&tz, arg2)) {
8719                     return -TARGET_EFAULT;
8720                 }
8721                 ptz = &tz;
8722             }
8723 
8724             return get_errno(settimeofday(ptv, ptz));
8725         }
8726 #endif
8727 #if defined(TARGET_NR_select)
8728     case TARGET_NR_select:
8729 #if defined(TARGET_WANT_NI_OLD_SELECT)
8730         /* some architectures used to have old_select here
8731          * but now ENOSYS it.
8732          */
8733         ret = -TARGET_ENOSYS;
8734 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8735         ret = do_old_select(arg1);
8736 #else
8737         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8738 #endif
8739         return ret;
8740 #endif
8741 #ifdef TARGET_NR_pselect6
8742     case TARGET_NR_pselect6:
8743         {
8744             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8745             fd_set rfds, wfds, efds;
8746             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8747             struct timespec ts, *ts_ptr;
8748 
8749             /*
8750              * The 6th arg is actually two args smashed together,
8751              * so we cannot use the C library.
8752              */
8753             sigset_t set;
8754             struct {
8755                 sigset_t *set;
8756                 size_t size;
8757             } sig, *sig_ptr;
8758 
8759             abi_ulong arg_sigset, arg_sigsize, *arg7;
8760             target_sigset_t *target_sigset;
8761 
8762             n = arg1;
8763             rfd_addr = arg2;
8764             wfd_addr = arg3;
8765             efd_addr = arg4;
8766             ts_addr = arg5;
8767 
8768             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8769             if (ret) {
8770                 return ret;
8771             }
8772             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8773             if (ret) {
8774                 return ret;
8775             }
8776             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8777             if (ret) {
8778                 return ret;
8779             }
8780 
8781             /*
8782              * This takes a timespec, and not a timeval, so we cannot
8783              * use the do_select() helper ...
8784              */
8785             if (ts_addr) {
8786                 if (target_to_host_timespec(&ts, ts_addr)) {
8787                     return -TARGET_EFAULT;
8788                 }
8789                 ts_ptr = &ts;
8790             } else {
8791                 ts_ptr = NULL;
8792             }
8793 
8794             /* Extract the two packed args for the sigset */
8795             if (arg6) {
8796                 sig_ptr = &sig;
8797                 sig.size = SIGSET_T_SIZE;
8798 
8799                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8800                 if (!arg7) {
8801                     return -TARGET_EFAULT;
8802                 }
8803                 arg_sigset = tswapal(arg7[0]);
8804                 arg_sigsize = tswapal(arg7[1]);
8805                 unlock_user(arg7, arg6, 0);
8806 
8807                 if (arg_sigset) {
8808                     sig.set = &set;
8809                     if (arg_sigsize != sizeof(*target_sigset)) {
8810                         /* Like the kernel, we enforce correct size sigsets */
8811                         return -TARGET_EINVAL;
8812                     }
8813                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8814                                               sizeof(*target_sigset), 1);
8815                     if (!target_sigset) {
8816                         return -TARGET_EFAULT;
8817                     }
8818                     target_to_host_sigset(&set, target_sigset);
8819                     unlock_user(target_sigset, arg_sigset, 0);
8820                 } else {
8821                     sig.set = NULL;
8822                 }
8823             } else {
8824                 sig_ptr = NULL;
8825             }
8826 
8827             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8828                                           ts_ptr, sig_ptr));
8829 
8830             if (!is_error(ret)) {
8831                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8832                     return -TARGET_EFAULT;
8833                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8834                     return -TARGET_EFAULT;
8835                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8836                     return -TARGET_EFAULT;
8837 
8838                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8839                     return -TARGET_EFAULT;
8840             }
8841         }
8842         return ret;
8843 #endif
8844 #ifdef TARGET_NR_symlink
8845     case TARGET_NR_symlink:
8846         {
8847             void *p2;
8848             p = lock_user_string(arg1);
8849             p2 = lock_user_string(arg2);
8850             if (!p || !p2)
8851                 ret = -TARGET_EFAULT;
8852             else
8853                 ret = get_errno(symlink(p, p2));
8854             unlock_user(p2, arg2, 0);
8855             unlock_user(p, arg1, 0);
8856         }
8857         return ret;
8858 #endif
8859 #if defined(TARGET_NR_symlinkat)
8860     case TARGET_NR_symlinkat:
8861         {
8862             void *p2;
8863             p  = lock_user_string(arg1);
8864             p2 = lock_user_string(arg3);
8865             if (!p || !p2)
8866                 ret = -TARGET_EFAULT;
8867             else
8868                 ret = get_errno(symlinkat(p, arg2, p2));
8869             unlock_user(p2, arg3, 0);
8870             unlock_user(p, arg1, 0);
8871         }
8872         return ret;
8873 #endif
8874 #ifdef TARGET_NR_readlink
8875     case TARGET_NR_readlink:
8876         {
8877             void *p2;
8878             p = lock_user_string(arg1);
8879             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8880             if (!p || !p2) {
8881                 ret = -TARGET_EFAULT;
8882             } else if (!arg3) {
8883                 /* Short circuit this for the magic exe check. */
8884                 ret = -TARGET_EINVAL;
8885             } else if (is_proc_myself((const char *)p, "exe")) {
8886                 char real[PATH_MAX], *temp;
8887                 temp = realpath(exec_path, real);
8888                 /* Return value is # of bytes that we wrote to the buffer. */
8889                 if (temp == NULL) {
8890                     ret = get_errno(-1);
8891                 } else {
8892                     /* Don't worry about sign mismatch as earlier mapping
8893                      * logic would have thrown a bad address error. */
8894                     ret = MIN(strlen(real), arg3);
8895                     /* We cannot NUL terminate the string. */
8896                     memcpy(p2, real, ret);
8897                 }
8898             } else {
8899                 ret = get_errno(readlink(path(p), p2, arg3));
8900             }
8901             unlock_user(p2, arg2, ret);
8902             unlock_user(p, arg1, 0);
8903         }
8904         return ret;
8905 #endif
8906 #if defined(TARGET_NR_readlinkat)
8907     case TARGET_NR_readlinkat:
8908         {
8909             void *p2;
8910             p  = lock_user_string(arg2);
8911             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8912             if (!p || !p2) {
8913                 ret = -TARGET_EFAULT;
8914             } else if (is_proc_myself((const char *)p, "exe")) {
8915                 char real[PATH_MAX], *temp;
8916                 temp = realpath(exec_path, real);
8917                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8918                 snprintf((char *)p2, arg4, "%s", real);
8919             } else {
8920                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8921             }
8922             unlock_user(p2, arg3, ret);
8923             unlock_user(p, arg2, 0);
8924         }
8925         return ret;
8926 #endif
8927 #ifdef TARGET_NR_swapon
8928     case TARGET_NR_swapon:
8929         if (!(p = lock_user_string(arg1)))
8930             return -TARGET_EFAULT;
8931         ret = get_errno(swapon(p, arg2));
8932         unlock_user(p, arg1, 0);
8933         return ret;
8934 #endif
8935     case TARGET_NR_reboot:
8936         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8937            /* arg4 must be ignored in all other cases */
8938            p = lock_user_string(arg4);
8939            if (!p) {
8940                return -TARGET_EFAULT;
8941            }
8942            ret = get_errno(reboot(arg1, arg2, arg3, p));
8943            unlock_user(p, arg4, 0);
8944         } else {
8945            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8946         }
8947         return ret;
8948 #ifdef TARGET_NR_mmap
8949     case TARGET_NR_mmap:
8950 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8951     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8952     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8953     || defined(TARGET_S390X)
8954         {
8955             abi_ulong *v;
8956             abi_ulong v1, v2, v3, v4, v5, v6;
8957             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8958                 return -TARGET_EFAULT;
8959             v1 = tswapal(v[0]);
8960             v2 = tswapal(v[1]);
8961             v3 = tswapal(v[2]);
8962             v4 = tswapal(v[3]);
8963             v5 = tswapal(v[4]);
8964             v6 = tswapal(v[5]);
8965             unlock_user(v, arg1, 0);
8966             ret = get_errno(target_mmap(v1, v2, v3,
8967                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8968                                         v5, v6));
8969         }
8970 #else
8971         ret = get_errno(target_mmap(arg1, arg2, arg3,
8972                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8973                                     arg5,
8974                                     arg6));
8975 #endif
8976         return ret;
8977 #endif
8978 #ifdef TARGET_NR_mmap2
8979     case TARGET_NR_mmap2:
8980 #ifndef MMAP_SHIFT
8981 #define MMAP_SHIFT 12
8982 #endif
8983         ret = target_mmap(arg1, arg2, arg3,
8984                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8985                           arg5, arg6 << MMAP_SHIFT);
8986         return get_errno(ret);
8987 #endif
8988     case TARGET_NR_munmap:
8989         return get_errno(target_munmap(arg1, arg2));
8990     case TARGET_NR_mprotect:
8991         {
8992             TaskState *ts = cpu->opaque;
8993             /* Special hack to detect libc making the stack executable.  */
8994             if ((arg3 & PROT_GROWSDOWN)
8995                 && arg1 >= ts->info->stack_limit
8996                 && arg1 <= ts->info->start_stack) {
8997                 arg3 &= ~PROT_GROWSDOWN;
8998                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8999                 arg1 = ts->info->stack_limit;
9000             }
9001         }
9002         return get_errno(target_mprotect(arg1, arg2, arg3));
9003 #ifdef TARGET_NR_mremap
9004     case TARGET_NR_mremap:
9005         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9006 #endif
9007         /* ??? msync/mlock/munlock are broken for softmmu.  */
9008 #ifdef TARGET_NR_msync
9009     case TARGET_NR_msync:
9010         return get_errno(msync(g2h(arg1), arg2, arg3));
9011 #endif
9012 #ifdef TARGET_NR_mlock
9013     case TARGET_NR_mlock:
9014         return get_errno(mlock(g2h(arg1), arg2));
9015 #endif
9016 #ifdef TARGET_NR_munlock
9017     case TARGET_NR_munlock:
9018         return get_errno(munlock(g2h(arg1), arg2));
9019 #endif
9020 #ifdef TARGET_NR_mlockall
9021     case TARGET_NR_mlockall:
9022         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9023 #endif
9024 #ifdef TARGET_NR_munlockall
9025     case TARGET_NR_munlockall:
9026         return get_errno(munlockall());
9027 #endif
9028 #ifdef TARGET_NR_truncate
9029     case TARGET_NR_truncate:
9030         if (!(p = lock_user_string(arg1)))
9031             return -TARGET_EFAULT;
9032         ret = get_errno(truncate(p, arg2));
9033         unlock_user(p, arg1, 0);
9034         return ret;
9035 #endif
9036 #ifdef TARGET_NR_ftruncate
9037     case TARGET_NR_ftruncate:
9038         return get_errno(ftruncate(arg1, arg2));
9039 #endif
9040     case TARGET_NR_fchmod:
9041         return get_errno(fchmod(arg1, arg2));
9042 #if defined(TARGET_NR_fchmodat)
9043     case TARGET_NR_fchmodat:
9044         if (!(p = lock_user_string(arg2)))
9045             return -TARGET_EFAULT;
9046         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9047         unlock_user(p, arg2, 0);
9048         return ret;
9049 #endif
9050     case TARGET_NR_getpriority:
9051         /* Note that negative values are valid for getpriority, so we must
9052            differentiate based on errno settings.  */
9053         errno = 0;
9054         ret = getpriority(arg1, arg2);
9055         if (ret == -1 && errno != 0) {
9056             return -host_to_target_errno(errno);
9057         }
9058 #ifdef TARGET_ALPHA
9059         /* Return value is the unbiased priority.  Signal no error.  */
9060         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9061 #else
9062         /* Return value is a biased priority to avoid negative numbers.  */
9063         ret = 20 - ret;
9064 #endif
9065         return ret;
9066     case TARGET_NR_setpriority:
9067         return get_errno(setpriority(arg1, arg2, arg3));
9068 #ifdef TARGET_NR_statfs
9069     case TARGET_NR_statfs:
9070         if (!(p = lock_user_string(arg1))) {
9071             return -TARGET_EFAULT;
9072         }
9073         ret = get_errno(statfs(path(p), &stfs));
9074         unlock_user(p, arg1, 0);
9075     convert_statfs:
9076         if (!is_error(ret)) {
9077             struct target_statfs *target_stfs;
9078 
9079             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9080                 return -TARGET_EFAULT;
9081             __put_user(stfs.f_type, &target_stfs->f_type);
9082             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9083             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9084             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9085             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9086             __put_user(stfs.f_files, &target_stfs->f_files);
9087             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9088             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9089             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9090             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9091             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9092 #ifdef _STATFS_F_FLAGS
9093             __put_user(stfs.f_flags, &target_stfs->f_flags);
9094 #else
9095             __put_user(0, &target_stfs->f_flags);
9096 #endif
9097             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9098             unlock_user_struct(target_stfs, arg2, 1);
9099         }
9100         return ret;
9101 #endif
9102 #ifdef TARGET_NR_fstatfs
9103     case TARGET_NR_fstatfs:
9104         ret = get_errno(fstatfs(arg1, &stfs));
9105         goto convert_statfs;
9106 #endif
9107 #ifdef TARGET_NR_statfs64
9108     case TARGET_NR_statfs64:
9109         if (!(p = lock_user_string(arg1))) {
9110             return -TARGET_EFAULT;
9111         }
9112         ret = get_errno(statfs(path(p), &stfs));
9113         unlock_user(p, arg1, 0);
9114     convert_statfs64:
9115         if (!is_error(ret)) {
9116             struct target_statfs64 *target_stfs;
9117 
9118             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9119                 return -TARGET_EFAULT;
9120             __put_user(stfs.f_type, &target_stfs->f_type);
9121             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9122             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9123             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9124             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9125             __put_user(stfs.f_files, &target_stfs->f_files);
9126             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9127             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9128             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9129             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9130             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9131             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9132             unlock_user_struct(target_stfs, arg3, 1);
9133         }
9134         return ret;
9135     case TARGET_NR_fstatfs64:
9136         ret = get_errno(fstatfs(arg1, &stfs));
9137         goto convert_statfs64;
9138 #endif
9139 #ifdef TARGET_NR_socketcall
9140     case TARGET_NR_socketcall:
9141         return do_socketcall(arg1, arg2);
9142 #endif
9143 #ifdef TARGET_NR_accept
9144     case TARGET_NR_accept:
9145         return do_accept4(arg1, arg2, arg3, 0);
9146 #endif
9147 #ifdef TARGET_NR_accept4
9148     case TARGET_NR_accept4:
9149         return do_accept4(arg1, arg2, arg3, arg4);
9150 #endif
9151 #ifdef TARGET_NR_bind
9152     case TARGET_NR_bind:
9153         return do_bind(arg1, arg2, arg3);
9154 #endif
9155 #ifdef TARGET_NR_connect
9156     case TARGET_NR_connect:
9157         return do_connect(arg1, arg2, arg3);
9158 #endif
9159 #ifdef TARGET_NR_getpeername
9160     case TARGET_NR_getpeername:
9161         return do_getpeername(arg1, arg2, arg3);
9162 #endif
9163 #ifdef TARGET_NR_getsockname
9164     case TARGET_NR_getsockname:
9165         return do_getsockname(arg1, arg2, arg3);
9166 #endif
9167 #ifdef TARGET_NR_getsockopt
9168     case TARGET_NR_getsockopt:
9169         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9170 #endif
9171 #ifdef TARGET_NR_listen
9172     case TARGET_NR_listen:
9173         return get_errno(listen(arg1, arg2));
9174 #endif
9175 #ifdef TARGET_NR_recv
9176     case TARGET_NR_recv:
9177         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9178 #endif
9179 #ifdef TARGET_NR_recvfrom
9180     case TARGET_NR_recvfrom:
9181         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9182 #endif
9183 #ifdef TARGET_NR_recvmsg
9184     case TARGET_NR_recvmsg:
9185         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9186 #endif
9187 #ifdef TARGET_NR_send
9188     case TARGET_NR_send:
9189         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9190 #endif
9191 #ifdef TARGET_NR_sendmsg
9192     case TARGET_NR_sendmsg:
9193         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9194 #endif
9195 #ifdef TARGET_NR_sendmmsg
9196     case TARGET_NR_sendmmsg:
9197         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9198 #endif
9199 #ifdef TARGET_NR_recvmmsg
9200     case TARGET_NR_recvmmsg:
9201         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9202 #endif
9203 #ifdef TARGET_NR_sendto
9204     case TARGET_NR_sendto:
9205         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9206 #endif
9207 #ifdef TARGET_NR_shutdown
9208     case TARGET_NR_shutdown:
9209         return get_errno(shutdown(arg1, arg2));
9210 #endif
9211 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9212     case TARGET_NR_getrandom:
9213         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9214         if (!p) {
9215             return -TARGET_EFAULT;
9216         }
9217         ret = get_errno(getrandom(p, arg2, arg3));
9218         unlock_user(p, arg1, ret);
9219         return ret;
9220 #endif
9221 #ifdef TARGET_NR_socket
9222     case TARGET_NR_socket:
9223         return do_socket(arg1, arg2, arg3);
9224 #endif
9225 #ifdef TARGET_NR_socketpair
9226     case TARGET_NR_socketpair:
9227         return do_socketpair(arg1, arg2, arg3, arg4);
9228 #endif
9229 #ifdef TARGET_NR_setsockopt
9230     case TARGET_NR_setsockopt:
9231         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9232 #endif
9233 #if defined(TARGET_NR_syslog)
9234     case TARGET_NR_syslog:
9235         {
9236             int len = arg2;
9237 
9238             switch (arg1) {
9239             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9240             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9241             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9242             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9243             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9244             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9245             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9246             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9247                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9248             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9249             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9250             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9251                 {
9252                     if (len < 0) {
9253                         return -TARGET_EINVAL;
9254                     }
9255                     if (len == 0) {
9256                         return 0;
9257                     }
9258                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9259                     if (!p) {
9260                         return -TARGET_EFAULT;
9261                     }
9262                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9263                     unlock_user(p, arg2, arg3);
9264                 }
9265                 return ret;
9266             default:
9267                 return -TARGET_EINVAL;
9268             }
9269         }
9270         break;
9271 #endif
9272     case TARGET_NR_setitimer:
9273         {
9274             struct itimerval value, ovalue, *pvalue;
9275 
9276             if (arg2) {
9277                 pvalue = &value;
9278                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9279                     || copy_from_user_timeval(&pvalue->it_value,
9280                                               arg2 + sizeof(struct target_timeval)))
9281                     return -TARGET_EFAULT;
9282             } else {
9283                 pvalue = NULL;
9284             }
9285             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9286             if (!is_error(ret) && arg3) {
9287                 if (copy_to_user_timeval(arg3,
9288                                          &ovalue.it_interval)
9289                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9290                                             &ovalue.it_value))
9291                     return -TARGET_EFAULT;
9292             }
9293         }
9294         return ret;
9295     case TARGET_NR_getitimer:
9296         {
9297             struct itimerval value;
9298 
9299             ret = get_errno(getitimer(arg1, &value));
9300             if (!is_error(ret) && arg2) {
9301                 if (copy_to_user_timeval(arg2,
9302                                          &value.it_interval)
9303                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9304                                             &value.it_value))
9305                     return -TARGET_EFAULT;
9306             }
9307         }
9308         return ret;
9309 #ifdef TARGET_NR_stat
9310     case TARGET_NR_stat:
9311         if (!(p = lock_user_string(arg1))) {
9312             return -TARGET_EFAULT;
9313         }
9314         ret = get_errno(stat(path(p), &st));
9315         unlock_user(p, arg1, 0);
9316         goto do_stat;
9317 #endif
9318 #ifdef TARGET_NR_lstat
9319     case TARGET_NR_lstat:
9320         if (!(p = lock_user_string(arg1))) {
9321             return -TARGET_EFAULT;
9322         }
9323         ret = get_errno(lstat(path(p), &st));
9324         unlock_user(p, arg1, 0);
9325         goto do_stat;
9326 #endif
9327 #ifdef TARGET_NR_fstat
9328     case TARGET_NR_fstat:
9329         {
9330             ret = get_errno(fstat(arg1, &st));
9331 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9332         do_stat:
9333 #endif
9334             if (!is_error(ret)) {
9335                 struct target_stat *target_st;
9336 
9337                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9338                     return -TARGET_EFAULT;
9339                 memset(target_st, 0, sizeof(*target_st));
9340                 __put_user(st.st_dev, &target_st->st_dev);
9341                 __put_user(st.st_ino, &target_st->st_ino);
9342                 __put_user(st.st_mode, &target_st->st_mode);
9343                 __put_user(st.st_uid, &target_st->st_uid);
9344                 __put_user(st.st_gid, &target_st->st_gid);
9345                 __put_user(st.st_nlink, &target_st->st_nlink);
9346                 __put_user(st.st_rdev, &target_st->st_rdev);
9347                 __put_user(st.st_size, &target_st->st_size);
9348                 __put_user(st.st_blksize, &target_st->st_blksize);
9349                 __put_user(st.st_blocks, &target_st->st_blocks);
9350                 __put_user(st.st_atime, &target_st->target_st_atime);
9351                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9352                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9353 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9354     defined(TARGET_STAT_HAVE_NSEC)
9355                 __put_user(st.st_atim.tv_nsec,
9356                            &target_st->target_st_atime_nsec);
9357                 __put_user(st.st_mtim.tv_nsec,
9358                            &target_st->target_st_mtime_nsec);
9359                 __put_user(st.st_ctim.tv_nsec,
9360                            &target_st->target_st_ctime_nsec);
9361 #endif
9362                 unlock_user_struct(target_st, arg2, 1);
9363             }
9364         }
9365         return ret;
9366 #endif
9367     case TARGET_NR_vhangup:
9368         return get_errno(vhangup());
9369 #ifdef TARGET_NR_syscall
9370     case TARGET_NR_syscall:
9371         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9372                           arg6, arg7, arg8, 0);
9373 #endif
9374 #if defined(TARGET_NR_wait4)
9375     case TARGET_NR_wait4:
9376         {
9377             int status;
9378             abi_long status_ptr = arg2;
9379             struct rusage rusage, *rusage_ptr;
9380             abi_ulong target_rusage = arg4;
9381             abi_long rusage_err;
9382             if (target_rusage)
9383                 rusage_ptr = &rusage;
9384             else
9385                 rusage_ptr = NULL;
9386             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9387             if (!is_error(ret)) {
9388                 if (status_ptr && ret) {
9389                     status = host_to_target_waitstatus(status);
9390                     if (put_user_s32(status, status_ptr))
9391                         return -TARGET_EFAULT;
9392                 }
9393                 if (target_rusage) {
9394                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9395                     if (rusage_err) {
9396                         ret = rusage_err;
9397                     }
9398                 }
9399             }
9400         }
9401         return ret;
9402 #endif
9403 #ifdef TARGET_NR_swapoff
9404     case TARGET_NR_swapoff:
9405         if (!(p = lock_user_string(arg1)))
9406             return -TARGET_EFAULT;
9407         ret = get_errno(swapoff(p));
9408         unlock_user(p, arg1, 0);
9409         return ret;
9410 #endif
9411     case TARGET_NR_sysinfo:
9412         {
9413             struct target_sysinfo *target_value;
9414             struct sysinfo value;
9415             ret = get_errno(sysinfo(&value));
9416             if (!is_error(ret) && arg1)
9417             {
9418                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9419                     return -TARGET_EFAULT;
9420                 __put_user(value.uptime, &target_value->uptime);
9421                 __put_user(value.loads[0], &target_value->loads[0]);
9422                 __put_user(value.loads[1], &target_value->loads[1]);
9423                 __put_user(value.loads[2], &target_value->loads[2]);
9424                 __put_user(value.totalram, &target_value->totalram);
9425                 __put_user(value.freeram, &target_value->freeram);
9426                 __put_user(value.sharedram, &target_value->sharedram);
9427                 __put_user(value.bufferram, &target_value->bufferram);
9428                 __put_user(value.totalswap, &target_value->totalswap);
9429                 __put_user(value.freeswap, &target_value->freeswap);
9430                 __put_user(value.procs, &target_value->procs);
9431                 __put_user(value.totalhigh, &target_value->totalhigh);
9432                 __put_user(value.freehigh, &target_value->freehigh);
9433                 __put_user(value.mem_unit, &target_value->mem_unit);
9434                 unlock_user_struct(target_value, arg1, 1);
9435             }
9436         }
9437         return ret;
9438 #ifdef TARGET_NR_ipc
9439     case TARGET_NR_ipc:
9440         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9441 #endif
9442 #ifdef TARGET_NR_semget
9443     case TARGET_NR_semget:
9444         return get_errno(semget(arg1, arg2, arg3));
9445 #endif
9446 #ifdef TARGET_NR_semop
9447     case TARGET_NR_semop:
9448         return do_semop(arg1, arg2, arg3);
9449 #endif
9450 #ifdef TARGET_NR_semctl
9451     case TARGET_NR_semctl:
9452         return do_semctl(arg1, arg2, arg3, arg4);
9453 #endif
9454 #ifdef TARGET_NR_msgctl
9455     case TARGET_NR_msgctl:
9456         return do_msgctl(arg1, arg2, arg3);
9457 #endif
9458 #ifdef TARGET_NR_msgget
9459     case TARGET_NR_msgget:
9460         return get_errno(msgget(arg1, arg2));
9461 #endif
9462 #ifdef TARGET_NR_msgrcv
9463     case TARGET_NR_msgrcv:
9464         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9465 #endif
9466 #ifdef TARGET_NR_msgsnd
9467     case TARGET_NR_msgsnd:
9468         return do_msgsnd(arg1, arg2, arg3, arg4);
9469 #endif
9470 #ifdef TARGET_NR_shmget
9471     case TARGET_NR_shmget:
9472         return get_errno(shmget(arg1, arg2, arg3));
9473 #endif
9474 #ifdef TARGET_NR_shmctl
9475     case TARGET_NR_shmctl:
9476         return do_shmctl(arg1, arg2, arg3);
9477 #endif
9478 #ifdef TARGET_NR_shmat
9479     case TARGET_NR_shmat:
9480         return do_shmat(cpu_env, arg1, arg2, arg3);
9481 #endif
9482 #ifdef TARGET_NR_shmdt
9483     case TARGET_NR_shmdt:
9484         return do_shmdt(arg1);
9485 #endif
9486     case TARGET_NR_fsync:
9487         return get_errno(fsync(arg1));
9488     case TARGET_NR_clone:
9489         /* Linux manages to have three different orderings for its
9490          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9491          * match the kernel's CONFIG_CLONE_* settings.
9492          * Microblaze is further special in that it uses a sixth
9493          * implicit argument to clone for the TLS pointer.
9494          */
9495 #if defined(TARGET_MICROBLAZE)
9496         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9497 #elif defined(TARGET_CLONE_BACKWARDS)
9498         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9499 #elif defined(TARGET_CLONE_BACKWARDS2)
9500         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9501 #else
9502         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9503 #endif
9504         return ret;
9505 #ifdef __NR_exit_group
9506         /* new thread calls */
9507     case TARGET_NR_exit_group:
9508         preexit_cleanup(cpu_env, arg1);
9509         return get_errno(exit_group(arg1));
9510 #endif
9511     case TARGET_NR_setdomainname:
9512         if (!(p = lock_user_string(arg1)))
9513             return -TARGET_EFAULT;
9514         ret = get_errno(setdomainname(p, arg2));
9515         unlock_user(p, arg1, 0);
9516         return ret;
9517     case TARGET_NR_uname:
9518         /* no need to transcode because we use the linux syscall */
9519         {
9520             struct new_utsname * buf;
9521 
9522             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9523                 return -TARGET_EFAULT;
9524             ret = get_errno(sys_uname(buf));
9525             if (!is_error(ret)) {
9526                 /* Overwrite the native machine name with whatever is being
9527                    emulated. */
9528                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9529                           sizeof(buf->machine));
9530                 /* Allow the user to override the reported release.  */
9531                 if (qemu_uname_release && *qemu_uname_release) {
9532                     g_strlcpy(buf->release, qemu_uname_release,
9533                               sizeof(buf->release));
9534                 }
9535             }
9536             unlock_user_struct(buf, arg1, 1);
9537         }
9538         return ret;
9539 #ifdef TARGET_I386
9540     case TARGET_NR_modify_ldt:
9541         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9542 #if !defined(TARGET_X86_64)
9543     case TARGET_NR_vm86:
9544         return do_vm86(cpu_env, arg1, arg2);
9545 #endif
9546 #endif
9547 #if defined(TARGET_NR_adjtimex)
9548     case TARGET_NR_adjtimex:
9549         {
9550             struct timex host_buf;
9551 
9552             if (target_to_host_timex(&host_buf, arg1) != 0) {
9553                 return -TARGET_EFAULT;
9554             }
9555             ret = get_errno(adjtimex(&host_buf));
9556             if (!is_error(ret)) {
9557                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9558                     return -TARGET_EFAULT;
9559                 }
9560             }
9561         }
9562         return ret;
9563 #endif
9564 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9565     case TARGET_NR_clock_adjtime:
9566         {
9567             struct timex htx, *phtx = &htx;
9568 
9569             if (target_to_host_timex(phtx, arg2) != 0) {
9570                 return -TARGET_EFAULT;
9571             }
9572             ret = get_errno(clock_adjtime(arg1, phtx));
9573             if (!is_error(ret) && phtx) {
9574                 if (host_to_target_timex(arg2, phtx) != 0) {
9575                     return -TARGET_EFAULT;
9576                 }
9577             }
9578         }
9579         return ret;
9580 #endif
9581     case TARGET_NR_getpgid:
9582         return get_errno(getpgid(arg1));
9583     case TARGET_NR_fchdir:
9584         return get_errno(fchdir(arg1));
9585     case TARGET_NR_personality:
9586         return get_errno(personality(arg1));
9587 #ifdef TARGET_NR__llseek /* Not on alpha */
9588     case TARGET_NR__llseek:
9589         {
9590             int64_t res;
9591 #if !defined(__NR_llseek)
9592             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9593             if (res == -1) {
9594                 ret = get_errno(res);
9595             } else {
9596                 ret = 0;
9597             }
9598 #else
9599             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9600 #endif
9601             if ((ret == 0) && put_user_s64(res, arg4)) {
9602                 return -TARGET_EFAULT;
9603             }
9604         }
9605         return ret;
9606 #endif
9607 #ifdef TARGET_NR_getdents
9608     case TARGET_NR_getdents:
9609 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9610 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9611         {
9612             struct target_dirent *target_dirp;
9613             struct linux_dirent *dirp;
9614             abi_long count = arg3;
9615 
9616             dirp = g_try_malloc(count);
9617             if (!dirp) {
9618                 return -TARGET_ENOMEM;
9619             }
9620 
9621             ret = get_errno(sys_getdents(arg1, dirp, count));
9622             if (!is_error(ret)) {
9623                 struct linux_dirent *de;
9624 		struct target_dirent *tde;
9625                 int len = ret;
9626                 int reclen, treclen;
9627 		int count1, tnamelen;
9628 
9629 		count1 = 0;
9630                 de = dirp;
9631                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9632                     return -TARGET_EFAULT;
9633 		tde = target_dirp;
9634                 while (len > 0) {
9635                     reclen = de->d_reclen;
9636                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9637                     assert(tnamelen >= 0);
9638                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9639                     assert(count1 + treclen <= count);
9640                     tde->d_reclen = tswap16(treclen);
9641                     tde->d_ino = tswapal(de->d_ino);
9642                     tde->d_off = tswapal(de->d_off);
9643                     memcpy(tde->d_name, de->d_name, tnamelen);
9644                     de = (struct linux_dirent *)((char *)de + reclen);
9645                     len -= reclen;
9646                     tde = (struct target_dirent *)((char *)tde + treclen);
9647 		    count1 += treclen;
9648                 }
9649 		ret = count1;
9650                 unlock_user(target_dirp, arg2, ret);
9651             }
9652             g_free(dirp);
9653         }
9654 #else
9655         {
9656             struct linux_dirent *dirp;
9657             abi_long count = arg3;
9658 
9659             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9660                 return -TARGET_EFAULT;
9661             ret = get_errno(sys_getdents(arg1, dirp, count));
9662             if (!is_error(ret)) {
9663                 struct linux_dirent *de;
9664                 int len = ret;
9665                 int reclen;
9666                 de = dirp;
9667                 while (len > 0) {
9668                     reclen = de->d_reclen;
9669                     if (reclen > len)
9670                         break;
9671                     de->d_reclen = tswap16(reclen);
9672                     tswapls(&de->d_ino);
9673                     tswapls(&de->d_off);
9674                     de = (struct linux_dirent *)((char *)de + reclen);
9675                     len -= reclen;
9676                 }
9677             }
9678             unlock_user(dirp, arg2, ret);
9679         }
9680 #endif
9681 #else
9682         /* Implement getdents in terms of getdents64 */
9683         {
9684             struct linux_dirent64 *dirp;
9685             abi_long count = arg3;
9686 
9687             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9688             if (!dirp) {
9689                 return -TARGET_EFAULT;
9690             }
9691             ret = get_errno(sys_getdents64(arg1, dirp, count));
9692             if (!is_error(ret)) {
9693                 /* Convert the dirent64 structs to target dirent.  We do this
9694                  * in-place, since we can guarantee that a target_dirent is no
9695                  * larger than a dirent64; however this means we have to be
9696                  * careful to read everything before writing in the new format.
9697                  */
9698                 struct linux_dirent64 *de;
9699                 struct target_dirent *tde;
9700                 int len = ret;
9701                 int tlen = 0;
9702 
9703                 de = dirp;
9704                 tde = (struct target_dirent *)dirp;
9705                 while (len > 0) {
9706                     int namelen, treclen;
9707                     int reclen = de->d_reclen;
9708                     uint64_t ino = de->d_ino;
9709                     int64_t off = de->d_off;
9710                     uint8_t type = de->d_type;
9711 
9712                     namelen = strlen(de->d_name);
9713                     treclen = offsetof(struct target_dirent, d_name)
9714                         + namelen + 2;
9715                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9716 
9717                     memmove(tde->d_name, de->d_name, namelen + 1);
9718                     tde->d_ino = tswapal(ino);
9719                     tde->d_off = tswapal(off);
9720                     tde->d_reclen = tswap16(treclen);
9721                     /* The target_dirent type is in what was formerly a padding
9722                      * byte at the end of the structure:
9723                      */
9724                     *(((char *)tde) + treclen - 1) = type;
9725 
9726                     de = (struct linux_dirent64 *)((char *)de + reclen);
9727                     tde = (struct target_dirent *)((char *)tde + treclen);
9728                     len -= reclen;
9729                     tlen += treclen;
9730                 }
9731                 ret = tlen;
9732             }
9733             unlock_user(dirp, arg2, ret);
9734         }
9735 #endif
9736         return ret;
9737 #endif /* TARGET_NR_getdents */
9738 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9739     case TARGET_NR_getdents64:
9740         {
9741             struct linux_dirent64 *dirp;
9742             abi_long count = arg3;
9743             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9744                 return -TARGET_EFAULT;
9745             ret = get_errno(sys_getdents64(arg1, dirp, count));
9746             if (!is_error(ret)) {
9747                 struct linux_dirent64 *de;
9748                 int len = ret;
9749                 int reclen;
9750                 de = dirp;
9751                 while (len > 0) {
9752                     reclen = de->d_reclen;
9753                     if (reclen > len)
9754                         break;
9755                     de->d_reclen = tswap16(reclen);
9756                     tswap64s((uint64_t *)&de->d_ino);
9757                     tswap64s((uint64_t *)&de->d_off);
9758                     de = (struct linux_dirent64 *)((char *)de + reclen);
9759                     len -= reclen;
9760                 }
9761             }
9762             unlock_user(dirp, arg2, ret);
9763         }
9764         return ret;
9765 #endif /* TARGET_NR_getdents64 */
9766 #if defined(TARGET_NR__newselect)
9767     case TARGET_NR__newselect:
9768         return do_select(arg1, arg2, arg3, arg4, arg5);
9769 #endif
9770 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9771 # ifdef TARGET_NR_poll
9772     case TARGET_NR_poll:
9773 # endif
9774 # ifdef TARGET_NR_ppoll
9775     case TARGET_NR_ppoll:
9776 # endif
9777         {
9778             struct target_pollfd *target_pfd;
9779             unsigned int nfds = arg2;
9780             struct pollfd *pfd;
9781             unsigned int i;
9782 
9783             pfd = NULL;
9784             target_pfd = NULL;
9785             if (nfds) {
9786                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9787                     return -TARGET_EINVAL;
9788                 }
9789 
9790                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9791                                        sizeof(struct target_pollfd) * nfds, 1);
9792                 if (!target_pfd) {
9793                     return -TARGET_EFAULT;
9794                 }
9795 
9796                 pfd = alloca(sizeof(struct pollfd) * nfds);
9797                 for (i = 0; i < nfds; i++) {
9798                     pfd[i].fd = tswap32(target_pfd[i].fd);
9799                     pfd[i].events = tswap16(target_pfd[i].events);
9800                 }
9801             }
9802 
9803             switch (num) {
9804 # ifdef TARGET_NR_ppoll
9805             case TARGET_NR_ppoll:
9806             {
9807                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9808                 target_sigset_t *target_set;
9809                 sigset_t _set, *set = &_set;
9810 
9811                 if (arg3) {
9812                     if (target_to_host_timespec(timeout_ts, arg3)) {
9813                         unlock_user(target_pfd, arg1, 0);
9814                         return -TARGET_EFAULT;
9815                     }
9816                 } else {
9817                     timeout_ts = NULL;
9818                 }
9819 
9820                 if (arg4) {
9821                     if (arg5 != sizeof(target_sigset_t)) {
9822                         unlock_user(target_pfd, arg1, 0);
9823                         return -TARGET_EINVAL;
9824                     }
9825 
9826                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9827                     if (!target_set) {
9828                         unlock_user(target_pfd, arg1, 0);
9829                         return -TARGET_EFAULT;
9830                     }
9831                     target_to_host_sigset(set, target_set);
9832                 } else {
9833                     set = NULL;
9834                 }
9835 
9836                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9837                                            set, SIGSET_T_SIZE));
9838 
9839                 if (!is_error(ret) && arg3) {
9840                     host_to_target_timespec(arg3, timeout_ts);
9841                 }
9842                 if (arg4) {
9843                     unlock_user(target_set, arg4, 0);
9844                 }
9845                 break;
9846             }
9847 # endif
9848 # ifdef TARGET_NR_poll
9849             case TARGET_NR_poll:
9850             {
9851                 struct timespec ts, *pts;
9852 
9853                 if (arg3 >= 0) {
9854                     /* Convert ms to secs, ns */
9855                     ts.tv_sec = arg3 / 1000;
9856                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9857                     pts = &ts;
9858                 } else {
9859                     /* -ve poll() timeout means "infinite" */
9860                     pts = NULL;
9861                 }
9862                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9863                 break;
9864             }
9865 # endif
9866             default:
9867                 g_assert_not_reached();
9868             }
9869 
9870             if (!is_error(ret)) {
9871                 for(i = 0; i < nfds; i++) {
9872                     target_pfd[i].revents = tswap16(pfd[i].revents);
9873                 }
9874             }
9875             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9876         }
9877         return ret;
9878 #endif
9879     case TARGET_NR_flock:
9880         /* NOTE: the flock constant seems to be the same for every
9881            Linux platform */
9882         return get_errno(safe_flock(arg1, arg2));
9883     case TARGET_NR_readv:
9884         {
9885             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9886             if (vec != NULL) {
9887                 ret = get_errno(safe_readv(arg1, vec, arg3));
9888                 unlock_iovec(vec, arg2, arg3, 1);
9889             } else {
9890                 ret = -host_to_target_errno(errno);
9891             }
9892         }
9893         return ret;
9894     case TARGET_NR_writev:
9895         {
9896             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9897             if (vec != NULL) {
9898                 ret = get_errno(safe_writev(arg1, vec, arg3));
9899                 unlock_iovec(vec, arg2, arg3, 0);
9900             } else {
9901                 ret = -host_to_target_errno(errno);
9902             }
9903         }
9904         return ret;
9905 #if defined(TARGET_NR_preadv)
9906     case TARGET_NR_preadv:
9907         {
9908             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9909             if (vec != NULL) {
9910                 unsigned long low, high;
9911 
9912                 target_to_host_low_high(arg4, arg5, &low, &high);
9913                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9914                 unlock_iovec(vec, arg2, arg3, 1);
9915             } else {
9916                 ret = -host_to_target_errno(errno);
9917            }
9918         }
9919         return ret;
9920 #endif
9921 #if defined(TARGET_NR_pwritev)
9922     case TARGET_NR_pwritev:
9923         {
9924             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9925             if (vec != NULL) {
9926                 unsigned long low, high;
9927 
9928                 target_to_host_low_high(arg4, arg5, &low, &high);
9929                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9930                 unlock_iovec(vec, arg2, arg3, 0);
9931             } else {
9932                 ret = -host_to_target_errno(errno);
9933            }
9934         }
9935         return ret;
9936 #endif
9937     case TARGET_NR_getsid:
9938         return get_errno(getsid(arg1));
9939 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9940     case TARGET_NR_fdatasync:
9941         return get_errno(fdatasync(arg1));
9942 #endif
9943 #ifdef TARGET_NR__sysctl
9944     case TARGET_NR__sysctl:
9945         /* We don't implement this, but ENOTDIR is always a safe
9946            return value. */
9947         return -TARGET_ENOTDIR;
9948 #endif
9949     case TARGET_NR_sched_getaffinity:
9950         {
9951             unsigned int mask_size;
9952             unsigned long *mask;
9953 
9954             /*
9955              * sched_getaffinity needs multiples of ulong, so need to take
9956              * care of mismatches between target ulong and host ulong sizes.
9957              */
9958             if (arg2 & (sizeof(abi_ulong) - 1)) {
9959                 return -TARGET_EINVAL;
9960             }
9961             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9962 
9963             mask = alloca(mask_size);
9964             memset(mask, 0, mask_size);
9965             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9966 
9967             if (!is_error(ret)) {
9968                 if (ret > arg2) {
9969                     /* More data returned than the caller's buffer will fit.
9970                      * This only happens if sizeof(abi_long) < sizeof(long)
9971                      * and the caller passed us a buffer holding an odd number
9972                      * of abi_longs. If the host kernel is actually using the
9973                      * extra 4 bytes then fail EINVAL; otherwise we can just
9974                      * ignore them and only copy the interesting part.
9975                      */
9976                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9977                     if (numcpus > arg2 * 8) {
9978                         return -TARGET_EINVAL;
9979                     }
9980                     ret = arg2;
9981                 }
9982 
9983                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9984                     return -TARGET_EFAULT;
9985                 }
9986             }
9987         }
9988         return ret;
9989     case TARGET_NR_sched_setaffinity:
9990         {
9991             unsigned int mask_size;
9992             unsigned long *mask;
9993 
9994             /*
9995              * sched_setaffinity needs multiples of ulong, so need to take
9996              * care of mismatches between target ulong and host ulong sizes.
9997              */
9998             if (arg2 & (sizeof(abi_ulong) - 1)) {
9999                 return -TARGET_EINVAL;
10000             }
10001             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10002             mask = alloca(mask_size);
10003 
10004             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10005             if (ret) {
10006                 return ret;
10007             }
10008 
10009             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10010         }
10011     case TARGET_NR_getcpu:
10012         {
10013             unsigned cpu, node;
10014             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10015                                        arg2 ? &node : NULL,
10016                                        NULL));
10017             if (is_error(ret)) {
10018                 return ret;
10019             }
10020             if (arg1 && put_user_u32(cpu, arg1)) {
10021                 return -TARGET_EFAULT;
10022             }
10023             if (arg2 && put_user_u32(node, arg2)) {
10024                 return -TARGET_EFAULT;
10025             }
10026         }
10027         return ret;
10028     case TARGET_NR_sched_setparam:
10029         {
10030             struct sched_param *target_schp;
10031             struct sched_param schp;
10032 
10033             if (arg2 == 0) {
10034                 return -TARGET_EINVAL;
10035             }
10036             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10037                 return -TARGET_EFAULT;
10038             schp.sched_priority = tswap32(target_schp->sched_priority);
10039             unlock_user_struct(target_schp, arg2, 0);
10040             return get_errno(sched_setparam(arg1, &schp));
10041         }
10042     case TARGET_NR_sched_getparam:
10043         {
10044             struct sched_param *target_schp;
10045             struct sched_param schp;
10046 
10047             if (arg2 == 0) {
10048                 return -TARGET_EINVAL;
10049             }
10050             ret = get_errno(sched_getparam(arg1, &schp));
10051             if (!is_error(ret)) {
10052                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10053                     return -TARGET_EFAULT;
10054                 target_schp->sched_priority = tswap32(schp.sched_priority);
10055                 unlock_user_struct(target_schp, arg2, 1);
10056             }
10057         }
10058         return ret;
10059     case TARGET_NR_sched_setscheduler:
10060         {
10061             struct sched_param *target_schp;
10062             struct sched_param schp;
10063             if (arg3 == 0) {
10064                 return -TARGET_EINVAL;
10065             }
10066             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10067                 return -TARGET_EFAULT;
10068             schp.sched_priority = tswap32(target_schp->sched_priority);
10069             unlock_user_struct(target_schp, arg3, 0);
10070             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10071         }
10072     case TARGET_NR_sched_getscheduler:
10073         return get_errno(sched_getscheduler(arg1));
10074     case TARGET_NR_sched_yield:
10075         return get_errno(sched_yield());
10076     case TARGET_NR_sched_get_priority_max:
10077         return get_errno(sched_get_priority_max(arg1));
10078     case TARGET_NR_sched_get_priority_min:
10079         return get_errno(sched_get_priority_min(arg1));
10080 #ifdef TARGET_NR_sched_rr_get_interval
10081     case TARGET_NR_sched_rr_get_interval:
10082         {
10083             struct timespec ts;
10084             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10085             if (!is_error(ret)) {
10086                 ret = host_to_target_timespec(arg2, &ts);
10087             }
10088         }
10089         return ret;
10090 #endif
10091 #if defined(TARGET_NR_nanosleep)
10092     case TARGET_NR_nanosleep:
10093         {
10094             struct timespec req, rem;
10095             target_to_host_timespec(&req, arg1);
10096             ret = get_errno(safe_nanosleep(&req, &rem));
10097             if (is_error(ret) && arg2) {
10098                 host_to_target_timespec(arg2, &rem);
10099             }
10100         }
10101         return ret;
10102 #endif
10103     case TARGET_NR_prctl:
10104         switch (arg1) {
10105         case PR_GET_PDEATHSIG:
10106         {
10107             int deathsig;
10108             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10109             if (!is_error(ret) && arg2
10110                 && put_user_ual(deathsig, arg2)) {
10111                 return -TARGET_EFAULT;
10112             }
10113             return ret;
10114         }
10115 #ifdef PR_GET_NAME
10116         case PR_GET_NAME:
10117         {
10118             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10119             if (!name) {
10120                 return -TARGET_EFAULT;
10121             }
10122             ret = get_errno(prctl(arg1, (unsigned long)name,
10123                                   arg3, arg4, arg5));
10124             unlock_user(name, arg2, 16);
10125             return ret;
10126         }
10127         case PR_SET_NAME:
10128         {
10129             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10130             if (!name) {
10131                 return -TARGET_EFAULT;
10132             }
10133             ret = get_errno(prctl(arg1, (unsigned long)name,
10134                                   arg3, arg4, arg5));
10135             unlock_user(name, arg2, 0);
10136             return ret;
10137         }
10138 #endif
10139 #ifdef TARGET_MIPS
10140         case TARGET_PR_GET_FP_MODE:
10141         {
10142             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10143             ret = 0;
10144             if (env->CP0_Status & (1 << CP0St_FR)) {
10145                 ret |= TARGET_PR_FP_MODE_FR;
10146             }
10147             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10148                 ret |= TARGET_PR_FP_MODE_FRE;
10149             }
10150             return ret;
10151         }
10152         case TARGET_PR_SET_FP_MODE:
10153         {
10154             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10155             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10156             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10157             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10158             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10159 
10160             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10161                                             TARGET_PR_FP_MODE_FRE;
10162 
10163             /* If nothing to change, return right away, successfully.  */
10164             if (old_fr == new_fr && old_fre == new_fre) {
10165                 return 0;
10166             }
10167             /* Check the value is valid */
10168             if (arg2 & ~known_bits) {
10169                 return -TARGET_EOPNOTSUPP;
10170             }
10171             /* Setting FRE without FR is not supported.  */
10172             if (new_fre && !new_fr) {
10173                 return -TARGET_EOPNOTSUPP;
10174             }
10175             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10176                 /* FR1 is not supported */
10177                 return -TARGET_EOPNOTSUPP;
10178             }
10179             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10180                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10181                 /* cannot set FR=0 */
10182                 return -TARGET_EOPNOTSUPP;
10183             }
10184             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10185                 /* Cannot set FRE=1 */
10186                 return -TARGET_EOPNOTSUPP;
10187             }
10188 
10189             int i;
10190             fpr_t *fpr = env->active_fpu.fpr;
10191             for (i = 0; i < 32 ; i += 2) {
10192                 if (!old_fr && new_fr) {
10193                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10194                 } else if (old_fr && !new_fr) {
10195                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10196                 }
10197             }
10198 
10199             if (new_fr) {
10200                 env->CP0_Status |= (1 << CP0St_FR);
10201                 env->hflags |= MIPS_HFLAG_F64;
10202             } else {
10203                 env->CP0_Status &= ~(1 << CP0St_FR);
10204                 env->hflags &= ~MIPS_HFLAG_F64;
10205             }
10206             if (new_fre) {
10207                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10208                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10209                     env->hflags |= MIPS_HFLAG_FRE;
10210                 }
10211             } else {
10212                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10213                 env->hflags &= ~MIPS_HFLAG_FRE;
10214             }
10215 
10216             return 0;
10217         }
10218 #endif /* MIPS */
10219 #ifdef TARGET_AARCH64
10220         case TARGET_PR_SVE_SET_VL:
10221             /*
10222              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10223              * PR_SVE_VL_INHERIT.  Note the kernel definition
10224              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10225              * even though the current architectural maximum is VQ=16.
10226              */
10227             ret = -TARGET_EINVAL;
10228             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10229                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10230                 CPUARMState *env = cpu_env;
10231                 ARMCPU *cpu = env_archcpu(env);
10232                 uint32_t vq, old_vq;
10233 
10234                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10235                 vq = MAX(arg2 / 16, 1);
10236                 vq = MIN(vq, cpu->sve_max_vq);
10237 
10238                 if (vq < old_vq) {
10239                     aarch64_sve_narrow_vq(env, vq);
10240                 }
10241                 env->vfp.zcr_el[1] = vq - 1;
10242                 arm_rebuild_hflags(env);
10243                 ret = vq * 16;
10244             }
10245             return ret;
10246         case TARGET_PR_SVE_GET_VL:
10247             ret = -TARGET_EINVAL;
10248             {
10249                 ARMCPU *cpu = env_archcpu(cpu_env);
10250                 if (cpu_isar_feature(aa64_sve, cpu)) {
10251                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10252                 }
10253             }
10254             return ret;
10255         case TARGET_PR_PAC_RESET_KEYS:
10256             {
10257                 CPUARMState *env = cpu_env;
10258                 ARMCPU *cpu = env_archcpu(env);
10259 
10260                 if (arg3 || arg4 || arg5) {
10261                     return -TARGET_EINVAL;
10262                 }
10263                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10264                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10265                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10266                                TARGET_PR_PAC_APGAKEY);
10267                     int ret = 0;
10268                     Error *err = NULL;
10269 
10270                     if (arg2 == 0) {
10271                         arg2 = all;
10272                     } else if (arg2 & ~all) {
10273                         return -TARGET_EINVAL;
10274                     }
10275                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10276                         ret |= qemu_guest_getrandom(&env->keys.apia,
10277                                                     sizeof(ARMPACKey), &err);
10278                     }
10279                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10280                         ret |= qemu_guest_getrandom(&env->keys.apib,
10281                                                     sizeof(ARMPACKey), &err);
10282                     }
10283                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10284                         ret |= qemu_guest_getrandom(&env->keys.apda,
10285                                                     sizeof(ARMPACKey), &err);
10286                     }
10287                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10288                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10289                                                     sizeof(ARMPACKey), &err);
10290                     }
10291                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10292                         ret |= qemu_guest_getrandom(&env->keys.apga,
10293                                                     sizeof(ARMPACKey), &err);
10294                     }
10295                     if (ret != 0) {
10296                         /*
10297                          * Some unknown failure in the crypto.  The best
10298                          * we can do is log it and fail the syscall.
10299                          * The real syscall cannot fail this way.
10300                          */
10301                         qemu_log_mask(LOG_UNIMP,
10302                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10303                                       error_get_pretty(err));
10304                         error_free(err);
10305                         return -TARGET_EIO;
10306                     }
10307                     return 0;
10308                 }
10309             }
10310             return -TARGET_EINVAL;
10311 #endif /* AARCH64 */
10312         case PR_GET_SECCOMP:
10313         case PR_SET_SECCOMP:
10314             /* Disable seccomp to prevent the target disabling syscalls we
10315              * need. */
10316             return -TARGET_EINVAL;
10317         default:
10318             /* Most prctl options have no pointer arguments */
10319             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10320         }
10321         break;
10322 #ifdef TARGET_NR_arch_prctl
10323     case TARGET_NR_arch_prctl:
10324 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10325         return do_arch_prctl(cpu_env, arg1, arg2);
10326 #else
10327 #error unreachable
10328 #endif
10329 #endif
10330 #ifdef TARGET_NR_pread64
10331     case TARGET_NR_pread64:
10332         if (regpairs_aligned(cpu_env, num)) {
10333             arg4 = arg5;
10334             arg5 = arg6;
10335         }
10336         if (arg2 == 0 && arg3 == 0) {
10337             /* Special-case NULL buffer and zero length, which should succeed */
10338             p = 0;
10339         } else {
10340             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10341             if (!p) {
10342                 return -TARGET_EFAULT;
10343             }
10344         }
10345         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10346         unlock_user(p, arg2, ret);
10347         return ret;
10348     case TARGET_NR_pwrite64:
10349         if (regpairs_aligned(cpu_env, num)) {
10350             arg4 = arg5;
10351             arg5 = arg6;
10352         }
10353         if (arg2 == 0 && arg3 == 0) {
10354             /* Special-case NULL buffer and zero length, which should succeed */
10355             p = 0;
10356         } else {
10357             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10358             if (!p) {
10359                 return -TARGET_EFAULT;
10360             }
10361         }
10362         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10363         unlock_user(p, arg2, 0);
10364         return ret;
10365 #endif
10366     case TARGET_NR_getcwd:
10367         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10368             return -TARGET_EFAULT;
10369         ret = get_errno(sys_getcwd1(p, arg2));
10370         unlock_user(p, arg1, ret);
10371         return ret;
10372     case TARGET_NR_capget:
10373     case TARGET_NR_capset:
10374     {
10375         struct target_user_cap_header *target_header;
10376         struct target_user_cap_data *target_data = NULL;
10377         struct __user_cap_header_struct header;
10378         struct __user_cap_data_struct data[2];
10379         struct __user_cap_data_struct *dataptr = NULL;
10380         int i, target_datalen;
10381         int data_items = 1;
10382 
10383         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10384             return -TARGET_EFAULT;
10385         }
10386         header.version = tswap32(target_header->version);
10387         header.pid = tswap32(target_header->pid);
10388 
10389         if (header.version != _LINUX_CAPABILITY_VERSION) {
10390             /* Version 2 and up takes pointer to two user_data structs */
10391             data_items = 2;
10392         }
10393 
10394         target_datalen = sizeof(*target_data) * data_items;
10395 
10396         if (arg2) {
10397             if (num == TARGET_NR_capget) {
10398                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10399             } else {
10400                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10401             }
10402             if (!target_data) {
10403                 unlock_user_struct(target_header, arg1, 0);
10404                 return -TARGET_EFAULT;
10405             }
10406 
10407             if (num == TARGET_NR_capset) {
10408                 for (i = 0; i < data_items; i++) {
10409                     data[i].effective = tswap32(target_data[i].effective);
10410                     data[i].permitted = tswap32(target_data[i].permitted);
10411                     data[i].inheritable = tswap32(target_data[i].inheritable);
10412                 }
10413             }
10414 
10415             dataptr = data;
10416         }
10417 
10418         if (num == TARGET_NR_capget) {
10419             ret = get_errno(capget(&header, dataptr));
10420         } else {
10421             ret = get_errno(capset(&header, dataptr));
10422         }
10423 
10424         /* The kernel always updates version for both capget and capset */
10425         target_header->version = tswap32(header.version);
10426         unlock_user_struct(target_header, arg1, 1);
10427 
10428         if (arg2) {
10429             if (num == TARGET_NR_capget) {
10430                 for (i = 0; i < data_items; i++) {
10431                     target_data[i].effective = tswap32(data[i].effective);
10432                     target_data[i].permitted = tswap32(data[i].permitted);
10433                     target_data[i].inheritable = tswap32(data[i].inheritable);
10434                 }
10435                 unlock_user(target_data, arg2, target_datalen);
10436             } else {
10437                 unlock_user(target_data, arg2, 0);
10438             }
10439         }
10440         return ret;
10441     }
10442     case TARGET_NR_sigaltstack:
10443         return do_sigaltstack(arg1, arg2,
10444                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10445 
10446 #ifdef CONFIG_SENDFILE
10447 #ifdef TARGET_NR_sendfile
10448     case TARGET_NR_sendfile:
10449     {
10450         off_t *offp = NULL;
10451         off_t off;
10452         if (arg3) {
10453             ret = get_user_sal(off, arg3);
10454             if (is_error(ret)) {
10455                 return ret;
10456             }
10457             offp = &off;
10458         }
10459         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10460         if (!is_error(ret) && arg3) {
10461             abi_long ret2 = put_user_sal(off, arg3);
10462             if (is_error(ret2)) {
10463                 ret = ret2;
10464             }
10465         }
10466         return ret;
10467     }
10468 #endif
10469 #ifdef TARGET_NR_sendfile64
10470     case TARGET_NR_sendfile64:
10471     {
10472         off_t *offp = NULL;
10473         off_t off;
10474         if (arg3) {
10475             ret = get_user_s64(off, arg3);
10476             if (is_error(ret)) {
10477                 return ret;
10478             }
10479             offp = &off;
10480         }
10481         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10482         if (!is_error(ret) && arg3) {
10483             abi_long ret2 = put_user_s64(off, arg3);
10484             if (is_error(ret2)) {
10485                 ret = ret2;
10486             }
10487         }
10488         return ret;
10489     }
10490 #endif
10491 #endif
10492 #ifdef TARGET_NR_vfork
10493     case TARGET_NR_vfork:
10494         return get_errno(do_fork(cpu_env,
10495                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10496                          0, 0, 0, 0));
10497 #endif
10498 #ifdef TARGET_NR_ugetrlimit
10499     case TARGET_NR_ugetrlimit:
10500     {
10501 	struct rlimit rlim;
10502 	int resource = target_to_host_resource(arg1);
10503 	ret = get_errno(getrlimit(resource, &rlim));
10504 	if (!is_error(ret)) {
10505 	    struct target_rlimit *target_rlim;
10506             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10507                 return -TARGET_EFAULT;
10508 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10509 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10510             unlock_user_struct(target_rlim, arg2, 1);
10511 	}
10512         return ret;
10513     }
10514 #endif
10515 #ifdef TARGET_NR_truncate64
10516     case TARGET_NR_truncate64:
10517         if (!(p = lock_user_string(arg1)))
10518             return -TARGET_EFAULT;
10519 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10520         unlock_user(p, arg1, 0);
10521         return ret;
10522 #endif
10523 #ifdef TARGET_NR_ftruncate64
10524     case TARGET_NR_ftruncate64:
10525         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10526 #endif
10527 #ifdef TARGET_NR_stat64
10528     case TARGET_NR_stat64:
10529         if (!(p = lock_user_string(arg1))) {
10530             return -TARGET_EFAULT;
10531         }
10532         ret = get_errno(stat(path(p), &st));
10533         unlock_user(p, arg1, 0);
10534         if (!is_error(ret))
10535             ret = host_to_target_stat64(cpu_env, arg2, &st);
10536         return ret;
10537 #endif
10538 #ifdef TARGET_NR_lstat64
10539     case TARGET_NR_lstat64:
10540         if (!(p = lock_user_string(arg1))) {
10541             return -TARGET_EFAULT;
10542         }
10543         ret = get_errno(lstat(path(p), &st));
10544         unlock_user(p, arg1, 0);
10545         if (!is_error(ret))
10546             ret = host_to_target_stat64(cpu_env, arg2, &st);
10547         return ret;
10548 #endif
10549 #ifdef TARGET_NR_fstat64
10550     case TARGET_NR_fstat64:
10551         ret = get_errno(fstat(arg1, &st));
10552         if (!is_error(ret))
10553             ret = host_to_target_stat64(cpu_env, arg2, &st);
10554         return ret;
10555 #endif
10556 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10557 #ifdef TARGET_NR_fstatat64
10558     case TARGET_NR_fstatat64:
10559 #endif
10560 #ifdef TARGET_NR_newfstatat
10561     case TARGET_NR_newfstatat:
10562 #endif
10563         if (!(p = lock_user_string(arg2))) {
10564             return -TARGET_EFAULT;
10565         }
10566         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10567         unlock_user(p, arg2, 0);
10568         if (!is_error(ret))
10569             ret = host_to_target_stat64(cpu_env, arg3, &st);
10570         return ret;
10571 #endif
10572 #if defined(TARGET_NR_statx)
10573     case TARGET_NR_statx:
10574         {
10575             struct target_statx *target_stx;
10576             int dirfd = arg1;
10577             int flags = arg3;
10578 
10579             p = lock_user_string(arg2);
10580             if (p == NULL) {
10581                 return -TARGET_EFAULT;
10582             }
10583 #if defined(__NR_statx)
10584             {
10585                 /*
10586                  * It is assumed that struct statx is architecture independent.
10587                  */
10588                 struct target_statx host_stx;
10589                 int mask = arg4;
10590 
10591                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10592                 if (!is_error(ret)) {
10593                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10594                         unlock_user(p, arg2, 0);
10595                         return -TARGET_EFAULT;
10596                     }
10597                 }
10598 
10599                 if (ret != -TARGET_ENOSYS) {
10600                     unlock_user(p, arg2, 0);
10601                     return ret;
10602                 }
10603             }
10604 #endif
10605             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10606             unlock_user(p, arg2, 0);
10607 
10608             if (!is_error(ret)) {
10609                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10610                     return -TARGET_EFAULT;
10611                 }
10612                 memset(target_stx, 0, sizeof(*target_stx));
10613                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10614                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10615                 __put_user(st.st_ino, &target_stx->stx_ino);
10616                 __put_user(st.st_mode, &target_stx->stx_mode);
10617                 __put_user(st.st_uid, &target_stx->stx_uid);
10618                 __put_user(st.st_gid, &target_stx->stx_gid);
10619                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10620                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10621                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10622                 __put_user(st.st_size, &target_stx->stx_size);
10623                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10624                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10625                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10626                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10627                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10628                 unlock_user_struct(target_stx, arg5, 1);
10629             }
10630         }
10631         return ret;
10632 #endif
10633 #ifdef TARGET_NR_lchown
10634     case TARGET_NR_lchown:
10635         if (!(p = lock_user_string(arg1)))
10636             return -TARGET_EFAULT;
10637         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10638         unlock_user(p, arg1, 0);
10639         return ret;
10640 #endif
10641 #ifdef TARGET_NR_getuid
10642     case TARGET_NR_getuid:
10643         return get_errno(high2lowuid(getuid()));
10644 #endif
10645 #ifdef TARGET_NR_getgid
10646     case TARGET_NR_getgid:
10647         return get_errno(high2lowgid(getgid()));
10648 #endif
10649 #ifdef TARGET_NR_geteuid
10650     case TARGET_NR_geteuid:
10651         return get_errno(high2lowuid(geteuid()));
10652 #endif
10653 #ifdef TARGET_NR_getegid
10654     case TARGET_NR_getegid:
10655         return get_errno(high2lowgid(getegid()));
10656 #endif
10657     case TARGET_NR_setreuid:
10658         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10659     case TARGET_NR_setregid:
10660         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10661     case TARGET_NR_getgroups:
10662         {
10663             int gidsetsize = arg1;
10664             target_id *target_grouplist;
10665             gid_t *grouplist;
10666             int i;
10667 
10668             grouplist = alloca(gidsetsize * sizeof(gid_t));
10669             ret = get_errno(getgroups(gidsetsize, grouplist));
10670             if (gidsetsize == 0)
10671                 return ret;
10672             if (!is_error(ret)) {
10673                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10674                 if (!target_grouplist)
10675                     return -TARGET_EFAULT;
10676                 for(i = 0;i < ret; i++)
10677                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10678                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10679             }
10680         }
10681         return ret;
10682     case TARGET_NR_setgroups:
10683         {
10684             int gidsetsize = arg1;
10685             target_id *target_grouplist;
10686             gid_t *grouplist = NULL;
10687             int i;
10688             if (gidsetsize) {
10689                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10690                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10691                 if (!target_grouplist) {
10692                     return -TARGET_EFAULT;
10693                 }
10694                 for (i = 0; i < gidsetsize; i++) {
10695                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10696                 }
10697                 unlock_user(target_grouplist, arg2, 0);
10698             }
10699             return get_errno(setgroups(gidsetsize, grouplist));
10700         }
10701     case TARGET_NR_fchown:
10702         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10703 #if defined(TARGET_NR_fchownat)
10704     case TARGET_NR_fchownat:
10705         if (!(p = lock_user_string(arg2)))
10706             return -TARGET_EFAULT;
10707         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10708                                  low2highgid(arg4), arg5));
10709         unlock_user(p, arg2, 0);
10710         return ret;
10711 #endif
10712 #ifdef TARGET_NR_setresuid
10713     case TARGET_NR_setresuid:
10714         return get_errno(sys_setresuid(low2highuid(arg1),
10715                                        low2highuid(arg2),
10716                                        low2highuid(arg3)));
10717 #endif
10718 #ifdef TARGET_NR_getresuid
10719     case TARGET_NR_getresuid:
10720         {
10721             uid_t ruid, euid, suid;
10722             ret = get_errno(getresuid(&ruid, &euid, &suid));
10723             if (!is_error(ret)) {
10724                 if (put_user_id(high2lowuid(ruid), arg1)
10725                     || put_user_id(high2lowuid(euid), arg2)
10726                     || put_user_id(high2lowuid(suid), arg3))
10727                     return -TARGET_EFAULT;
10728             }
10729         }
10730         return ret;
10731 #endif
10732 #ifdef TARGET_NR_getresgid
10733     case TARGET_NR_setresgid:
10734         return get_errno(sys_setresgid(low2highgid(arg1),
10735                                        low2highgid(arg2),
10736                                        low2highgid(arg3)));
10737 #endif
10738 #ifdef TARGET_NR_getresgid
10739     case TARGET_NR_getresgid:
10740         {
10741             gid_t rgid, egid, sgid;
10742             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10743             if (!is_error(ret)) {
10744                 if (put_user_id(high2lowgid(rgid), arg1)
10745                     || put_user_id(high2lowgid(egid), arg2)
10746                     || put_user_id(high2lowgid(sgid), arg3))
10747                     return -TARGET_EFAULT;
10748             }
10749         }
10750         return ret;
10751 #endif
10752 #ifdef TARGET_NR_chown
10753     case TARGET_NR_chown:
10754         if (!(p = lock_user_string(arg1)))
10755             return -TARGET_EFAULT;
10756         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10757         unlock_user(p, arg1, 0);
10758         return ret;
10759 #endif
10760     case TARGET_NR_setuid:
10761         return get_errno(sys_setuid(low2highuid(arg1)));
10762     case TARGET_NR_setgid:
10763         return get_errno(sys_setgid(low2highgid(arg1)));
10764     case TARGET_NR_setfsuid:
10765         return get_errno(setfsuid(arg1));
10766     case TARGET_NR_setfsgid:
10767         return get_errno(setfsgid(arg1));
10768 
10769 #ifdef TARGET_NR_lchown32
10770     case TARGET_NR_lchown32:
10771         if (!(p = lock_user_string(arg1)))
10772             return -TARGET_EFAULT;
10773         ret = get_errno(lchown(p, arg2, arg3));
10774         unlock_user(p, arg1, 0);
10775         return ret;
10776 #endif
10777 #ifdef TARGET_NR_getuid32
10778     case TARGET_NR_getuid32:
10779         return get_errno(getuid());
10780 #endif
10781 
10782 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10783    /* Alpha specific */
10784     case TARGET_NR_getxuid:
10785          {
10786             uid_t euid;
10787             euid=geteuid();
10788             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10789          }
10790         return get_errno(getuid());
10791 #endif
10792 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10793    /* Alpha specific */
10794     case TARGET_NR_getxgid:
10795          {
10796             uid_t egid;
10797             egid=getegid();
10798             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10799          }
10800         return get_errno(getgid());
10801 #endif
10802 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10803     /* Alpha specific */
10804     case TARGET_NR_osf_getsysinfo:
10805         ret = -TARGET_EOPNOTSUPP;
10806         switch (arg1) {
10807           case TARGET_GSI_IEEE_FP_CONTROL:
10808             {
10809                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10810                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10811 
10812                 swcr &= ~SWCR_STATUS_MASK;
10813                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10814 
10815                 if (put_user_u64 (swcr, arg2))
10816                         return -TARGET_EFAULT;
10817                 ret = 0;
10818             }
10819             break;
10820 
10821           /* case GSI_IEEE_STATE_AT_SIGNAL:
10822              -- Not implemented in linux kernel.
10823              case GSI_UACPROC:
10824              -- Retrieves current unaligned access state; not much used.
10825              case GSI_PROC_TYPE:
10826              -- Retrieves implver information; surely not used.
10827              case GSI_GET_HWRPB:
10828              -- Grabs a copy of the HWRPB; surely not used.
10829           */
10830         }
10831         return ret;
10832 #endif
10833 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10834     /* Alpha specific */
10835     case TARGET_NR_osf_setsysinfo:
10836         ret = -TARGET_EOPNOTSUPP;
10837         switch (arg1) {
10838           case TARGET_SSI_IEEE_FP_CONTROL:
10839             {
10840                 uint64_t swcr, fpcr;
10841 
10842                 if (get_user_u64 (swcr, arg2)) {
10843                     return -TARGET_EFAULT;
10844                 }
10845 
10846                 /*
10847                  * The kernel calls swcr_update_status to update the
10848                  * status bits from the fpcr at every point that it
10849                  * could be queried.  Therefore, we store the status
10850                  * bits only in FPCR.
10851                  */
10852                 ((CPUAlphaState *)cpu_env)->swcr
10853                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10854 
10855                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10856                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10857                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10858                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10859                 ret = 0;
10860             }
10861             break;
10862 
10863           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10864             {
10865                 uint64_t exc, fpcr, fex;
10866 
10867                 if (get_user_u64(exc, arg2)) {
10868                     return -TARGET_EFAULT;
10869                 }
10870                 exc &= SWCR_STATUS_MASK;
10871                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10872 
10873                 /* Old exceptions are not signaled.  */
10874                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10875                 fex = exc & ~fex;
10876                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10877                 fex &= ((CPUArchState *)cpu_env)->swcr;
10878 
10879                 /* Update the hardware fpcr.  */
10880                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10881                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10882 
10883                 if (fex) {
10884                     int si_code = TARGET_FPE_FLTUNK;
10885                     target_siginfo_t info;
10886 
10887                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10888                         si_code = TARGET_FPE_FLTUND;
10889                     }
10890                     if (fex & SWCR_TRAP_ENABLE_INE) {
10891                         si_code = TARGET_FPE_FLTRES;
10892                     }
10893                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10894                         si_code = TARGET_FPE_FLTUND;
10895                     }
10896                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10897                         si_code = TARGET_FPE_FLTOVF;
10898                     }
10899                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10900                         si_code = TARGET_FPE_FLTDIV;
10901                     }
10902                     if (fex & SWCR_TRAP_ENABLE_INV) {
10903                         si_code = TARGET_FPE_FLTINV;
10904                     }
10905 
10906                     info.si_signo = SIGFPE;
10907                     info.si_errno = 0;
10908                     info.si_code = si_code;
10909                     info._sifields._sigfault._addr
10910                         = ((CPUArchState *)cpu_env)->pc;
10911                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10912                                  QEMU_SI_FAULT, &info);
10913                 }
10914                 ret = 0;
10915             }
10916             break;
10917 
10918           /* case SSI_NVPAIRS:
10919              -- Used with SSIN_UACPROC to enable unaligned accesses.
10920              case SSI_IEEE_STATE_AT_SIGNAL:
10921              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10922              -- Not implemented in linux kernel
10923           */
10924         }
10925         return ret;
10926 #endif
10927 #ifdef TARGET_NR_osf_sigprocmask
10928     /* Alpha specific.  */
10929     case TARGET_NR_osf_sigprocmask:
10930         {
10931             abi_ulong mask;
10932             int how;
10933             sigset_t set, oldset;
10934 
10935             switch(arg1) {
10936             case TARGET_SIG_BLOCK:
10937                 how = SIG_BLOCK;
10938                 break;
10939             case TARGET_SIG_UNBLOCK:
10940                 how = SIG_UNBLOCK;
10941                 break;
10942             case TARGET_SIG_SETMASK:
10943                 how = SIG_SETMASK;
10944                 break;
10945             default:
10946                 return -TARGET_EINVAL;
10947             }
10948             mask = arg2;
10949             target_to_host_old_sigset(&set, &mask);
10950             ret = do_sigprocmask(how, &set, &oldset);
10951             if (!ret) {
10952                 host_to_target_old_sigset(&mask, &oldset);
10953                 ret = mask;
10954             }
10955         }
10956         return ret;
10957 #endif
10958 
10959 #ifdef TARGET_NR_getgid32
10960     case TARGET_NR_getgid32:
10961         return get_errno(getgid());
10962 #endif
10963 #ifdef TARGET_NR_geteuid32
10964     case TARGET_NR_geteuid32:
10965         return get_errno(geteuid());
10966 #endif
10967 #ifdef TARGET_NR_getegid32
10968     case TARGET_NR_getegid32:
10969         return get_errno(getegid());
10970 #endif
10971 #ifdef TARGET_NR_setreuid32
10972     case TARGET_NR_setreuid32:
10973         return get_errno(setreuid(arg1, arg2));
10974 #endif
10975 #ifdef TARGET_NR_setregid32
10976     case TARGET_NR_setregid32:
10977         return get_errno(setregid(arg1, arg2));
10978 #endif
10979 #ifdef TARGET_NR_getgroups32
10980     case TARGET_NR_getgroups32:
10981         {
10982             int gidsetsize = arg1;
10983             uint32_t *target_grouplist;
10984             gid_t *grouplist;
10985             int i;
10986 
10987             grouplist = alloca(gidsetsize * sizeof(gid_t));
10988             ret = get_errno(getgroups(gidsetsize, grouplist));
10989             if (gidsetsize == 0)
10990                 return ret;
10991             if (!is_error(ret)) {
10992                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10993                 if (!target_grouplist) {
10994                     return -TARGET_EFAULT;
10995                 }
10996                 for(i = 0;i < ret; i++)
10997                     target_grouplist[i] = tswap32(grouplist[i]);
10998                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10999             }
11000         }
11001         return ret;
11002 #endif
11003 #ifdef TARGET_NR_setgroups32
11004     case TARGET_NR_setgroups32:
11005         {
11006             int gidsetsize = arg1;
11007             uint32_t *target_grouplist;
11008             gid_t *grouplist;
11009             int i;
11010 
11011             grouplist = alloca(gidsetsize * sizeof(gid_t));
11012             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11013             if (!target_grouplist) {
11014                 return -TARGET_EFAULT;
11015             }
11016             for(i = 0;i < gidsetsize; i++)
11017                 grouplist[i] = tswap32(target_grouplist[i]);
11018             unlock_user(target_grouplist, arg2, 0);
11019             return get_errno(setgroups(gidsetsize, grouplist));
11020         }
11021 #endif
11022 #ifdef TARGET_NR_fchown32
11023     case TARGET_NR_fchown32:
11024         return get_errno(fchown(arg1, arg2, arg3));
11025 #endif
11026 #ifdef TARGET_NR_setresuid32
11027     case TARGET_NR_setresuid32:
11028         return get_errno(sys_setresuid(arg1, arg2, arg3));
11029 #endif
11030 #ifdef TARGET_NR_getresuid32
11031     case TARGET_NR_getresuid32:
11032         {
11033             uid_t ruid, euid, suid;
11034             ret = get_errno(getresuid(&ruid, &euid, &suid));
11035             if (!is_error(ret)) {
11036                 if (put_user_u32(ruid, arg1)
11037                     || put_user_u32(euid, arg2)
11038                     || put_user_u32(suid, arg3))
11039                     return -TARGET_EFAULT;
11040             }
11041         }
11042         return ret;
11043 #endif
11044 #ifdef TARGET_NR_setresgid32
11045     case TARGET_NR_setresgid32:
11046         return get_errno(sys_setresgid(arg1, arg2, arg3));
11047 #endif
11048 #ifdef TARGET_NR_getresgid32
11049     case TARGET_NR_getresgid32:
11050         {
11051             gid_t rgid, egid, sgid;
11052             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11053             if (!is_error(ret)) {
11054                 if (put_user_u32(rgid, arg1)
11055                     || put_user_u32(egid, arg2)
11056                     || put_user_u32(sgid, arg3))
11057                     return -TARGET_EFAULT;
11058             }
11059         }
11060         return ret;
11061 #endif
11062 #ifdef TARGET_NR_chown32
11063     case TARGET_NR_chown32:
11064         if (!(p = lock_user_string(arg1)))
11065             return -TARGET_EFAULT;
11066         ret = get_errno(chown(p, arg2, arg3));
11067         unlock_user(p, arg1, 0);
11068         return ret;
11069 #endif
11070 #ifdef TARGET_NR_setuid32
11071     case TARGET_NR_setuid32:
11072         return get_errno(sys_setuid(arg1));
11073 #endif
11074 #ifdef TARGET_NR_setgid32
11075     case TARGET_NR_setgid32:
11076         return get_errno(sys_setgid(arg1));
11077 #endif
11078 #ifdef TARGET_NR_setfsuid32
11079     case TARGET_NR_setfsuid32:
11080         return get_errno(setfsuid(arg1));
11081 #endif
11082 #ifdef TARGET_NR_setfsgid32
11083     case TARGET_NR_setfsgid32:
11084         return get_errno(setfsgid(arg1));
11085 #endif
11086 #ifdef TARGET_NR_mincore
11087     case TARGET_NR_mincore:
11088         {
11089             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11090             if (!a) {
11091                 return -TARGET_ENOMEM;
11092             }
11093             p = lock_user_string(arg3);
11094             if (!p) {
11095                 ret = -TARGET_EFAULT;
11096             } else {
11097                 ret = get_errno(mincore(a, arg2, p));
11098                 unlock_user(p, arg3, ret);
11099             }
11100             unlock_user(a, arg1, 0);
11101         }
11102         return ret;
11103 #endif
11104 #ifdef TARGET_NR_arm_fadvise64_64
11105     case TARGET_NR_arm_fadvise64_64:
11106         /* arm_fadvise64_64 looks like fadvise64_64 but
11107          * with different argument order: fd, advice, offset, len
11108          * rather than the usual fd, offset, len, advice.
11109          * Note that offset and len are both 64-bit so appear as
11110          * pairs of 32-bit registers.
11111          */
11112         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11113                             target_offset64(arg5, arg6), arg2);
11114         return -host_to_target_errno(ret);
11115 #endif
11116 
11117 #if TARGET_ABI_BITS == 32
11118 
11119 #ifdef TARGET_NR_fadvise64_64
11120     case TARGET_NR_fadvise64_64:
11121 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11122         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11123         ret = arg2;
11124         arg2 = arg3;
11125         arg3 = arg4;
11126         arg4 = arg5;
11127         arg5 = arg6;
11128         arg6 = ret;
11129 #else
11130         /* 6 args: fd, offset (high, low), len (high, low), advice */
11131         if (regpairs_aligned(cpu_env, num)) {
11132             /* offset is in (3,4), len in (5,6) and advice in 7 */
11133             arg2 = arg3;
11134             arg3 = arg4;
11135             arg4 = arg5;
11136             arg5 = arg6;
11137             arg6 = arg7;
11138         }
11139 #endif
11140         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11141                             target_offset64(arg4, arg5), arg6);
11142         return -host_to_target_errno(ret);
11143 #endif
11144 
11145 #ifdef TARGET_NR_fadvise64
11146     case TARGET_NR_fadvise64:
11147         /* 5 args: fd, offset (high, low), len, advice */
11148         if (regpairs_aligned(cpu_env, num)) {
11149             /* offset is in (3,4), len in 5 and advice in 6 */
11150             arg2 = arg3;
11151             arg3 = arg4;
11152             arg4 = arg5;
11153             arg5 = arg6;
11154         }
11155         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11156         return -host_to_target_errno(ret);
11157 #endif
11158 
11159 #else /* not a 32-bit ABI */
11160 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11161 #ifdef TARGET_NR_fadvise64_64
11162     case TARGET_NR_fadvise64_64:
11163 #endif
11164 #ifdef TARGET_NR_fadvise64
11165     case TARGET_NR_fadvise64:
11166 #endif
11167 #ifdef TARGET_S390X
11168         switch (arg4) {
11169         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11170         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11171         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11172         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11173         default: break;
11174         }
11175 #endif
11176         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11177 #endif
11178 #endif /* end of 64-bit ABI fadvise handling */
11179 
11180 #ifdef TARGET_NR_madvise
11181     case TARGET_NR_madvise:
11182         /* A straight passthrough may not be safe because qemu sometimes
11183            turns private file-backed mappings into anonymous mappings.
11184            This will break MADV_DONTNEED.
11185            This is a hint, so ignoring and returning success is ok.  */
11186         return 0;
11187 #endif
11188 #if TARGET_ABI_BITS == 32
11189     case TARGET_NR_fcntl64:
11190     {
11191 	int cmd;
11192 	struct flock64 fl;
11193         from_flock64_fn *copyfrom = copy_from_user_flock64;
11194         to_flock64_fn *copyto = copy_to_user_flock64;
11195 
11196 #ifdef TARGET_ARM
11197         if (!((CPUARMState *)cpu_env)->eabi) {
11198             copyfrom = copy_from_user_oabi_flock64;
11199             copyto = copy_to_user_oabi_flock64;
11200         }
11201 #endif
11202 
11203 	cmd = target_to_host_fcntl_cmd(arg2);
11204         if (cmd == -TARGET_EINVAL) {
11205             return cmd;
11206         }
11207 
11208         switch(arg2) {
11209         case TARGET_F_GETLK64:
11210             ret = copyfrom(&fl, arg3);
11211             if (ret) {
11212                 break;
11213             }
11214             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11215             if (ret == 0) {
11216                 ret = copyto(arg3, &fl);
11217             }
11218 	    break;
11219 
11220         case TARGET_F_SETLK64:
11221         case TARGET_F_SETLKW64:
11222             ret = copyfrom(&fl, arg3);
11223             if (ret) {
11224                 break;
11225             }
11226             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11227 	    break;
11228         default:
11229             ret = do_fcntl(arg1, arg2, arg3);
11230             break;
11231         }
11232         return ret;
11233     }
11234 #endif
11235 #ifdef TARGET_NR_cacheflush
11236     case TARGET_NR_cacheflush:
11237         /* self-modifying code is handled automatically, so nothing needed */
11238         return 0;
11239 #endif
11240 #ifdef TARGET_NR_getpagesize
11241     case TARGET_NR_getpagesize:
11242         return TARGET_PAGE_SIZE;
11243 #endif
11244     case TARGET_NR_gettid:
11245         return get_errno(sys_gettid());
11246 #ifdef TARGET_NR_readahead
11247     case TARGET_NR_readahead:
11248 #if TARGET_ABI_BITS == 32
11249         if (regpairs_aligned(cpu_env, num)) {
11250             arg2 = arg3;
11251             arg3 = arg4;
11252             arg4 = arg5;
11253         }
11254         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11255 #else
11256         ret = get_errno(readahead(arg1, arg2, arg3));
11257 #endif
11258         return ret;
11259 #endif
11260 #ifdef CONFIG_ATTR
11261 #ifdef TARGET_NR_setxattr
11262     case TARGET_NR_listxattr:
11263     case TARGET_NR_llistxattr:
11264     {
11265         void *p, *b = 0;
11266         if (arg2) {
11267             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11268             if (!b) {
11269                 return -TARGET_EFAULT;
11270             }
11271         }
11272         p = lock_user_string(arg1);
11273         if (p) {
11274             if (num == TARGET_NR_listxattr) {
11275                 ret = get_errno(listxattr(p, b, arg3));
11276             } else {
11277                 ret = get_errno(llistxattr(p, b, arg3));
11278             }
11279         } else {
11280             ret = -TARGET_EFAULT;
11281         }
11282         unlock_user(p, arg1, 0);
11283         unlock_user(b, arg2, arg3);
11284         return ret;
11285     }
11286     case TARGET_NR_flistxattr:
11287     {
11288         void *b = 0;
11289         if (arg2) {
11290             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11291             if (!b) {
11292                 return -TARGET_EFAULT;
11293             }
11294         }
11295         ret = get_errno(flistxattr(arg1, b, arg3));
11296         unlock_user(b, arg2, arg3);
11297         return ret;
11298     }
11299     case TARGET_NR_setxattr:
11300     case TARGET_NR_lsetxattr:
11301         {
11302             void *p, *n, *v = 0;
11303             if (arg3) {
11304                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11305                 if (!v) {
11306                     return -TARGET_EFAULT;
11307                 }
11308             }
11309             p = lock_user_string(arg1);
11310             n = lock_user_string(arg2);
11311             if (p && n) {
11312                 if (num == TARGET_NR_setxattr) {
11313                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11314                 } else {
11315                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11316                 }
11317             } else {
11318                 ret = -TARGET_EFAULT;
11319             }
11320             unlock_user(p, arg1, 0);
11321             unlock_user(n, arg2, 0);
11322             unlock_user(v, arg3, 0);
11323         }
11324         return ret;
11325     case TARGET_NR_fsetxattr:
11326         {
11327             void *n, *v = 0;
11328             if (arg3) {
11329                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11330                 if (!v) {
11331                     return -TARGET_EFAULT;
11332                 }
11333             }
11334             n = lock_user_string(arg2);
11335             if (n) {
11336                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11337             } else {
11338                 ret = -TARGET_EFAULT;
11339             }
11340             unlock_user(n, arg2, 0);
11341             unlock_user(v, arg3, 0);
11342         }
11343         return ret;
11344     case TARGET_NR_getxattr:
11345     case TARGET_NR_lgetxattr:
11346         {
11347             void *p, *n, *v = 0;
11348             if (arg3) {
11349                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11350                 if (!v) {
11351                     return -TARGET_EFAULT;
11352                 }
11353             }
11354             p = lock_user_string(arg1);
11355             n = lock_user_string(arg2);
11356             if (p && n) {
11357                 if (num == TARGET_NR_getxattr) {
11358                     ret = get_errno(getxattr(p, n, v, arg4));
11359                 } else {
11360                     ret = get_errno(lgetxattr(p, n, v, arg4));
11361                 }
11362             } else {
11363                 ret = -TARGET_EFAULT;
11364             }
11365             unlock_user(p, arg1, 0);
11366             unlock_user(n, arg2, 0);
11367             unlock_user(v, arg3, arg4);
11368         }
11369         return ret;
11370     case TARGET_NR_fgetxattr:
11371         {
11372             void *n, *v = 0;
11373             if (arg3) {
11374                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11375                 if (!v) {
11376                     return -TARGET_EFAULT;
11377                 }
11378             }
11379             n = lock_user_string(arg2);
11380             if (n) {
11381                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11382             } else {
11383                 ret = -TARGET_EFAULT;
11384             }
11385             unlock_user(n, arg2, 0);
11386             unlock_user(v, arg3, arg4);
11387         }
11388         return ret;
11389     case TARGET_NR_removexattr:
11390     case TARGET_NR_lremovexattr:
11391         {
11392             void *p, *n;
11393             p = lock_user_string(arg1);
11394             n = lock_user_string(arg2);
11395             if (p && n) {
11396                 if (num == TARGET_NR_removexattr) {
11397                     ret = get_errno(removexattr(p, n));
11398                 } else {
11399                     ret = get_errno(lremovexattr(p, n));
11400                 }
11401             } else {
11402                 ret = -TARGET_EFAULT;
11403             }
11404             unlock_user(p, arg1, 0);
11405             unlock_user(n, arg2, 0);
11406         }
11407         return ret;
11408     case TARGET_NR_fremovexattr:
11409         {
11410             void *n;
11411             n = lock_user_string(arg2);
11412             if (n) {
11413                 ret = get_errno(fremovexattr(arg1, n));
11414             } else {
11415                 ret = -TARGET_EFAULT;
11416             }
11417             unlock_user(n, arg2, 0);
11418         }
11419         return ret;
11420 #endif
11421 #endif /* CONFIG_ATTR */
11422 #ifdef TARGET_NR_set_thread_area
11423     case TARGET_NR_set_thread_area:
11424 #if defined(TARGET_MIPS)
11425       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11426       return 0;
11427 #elif defined(TARGET_CRIS)
11428       if (arg1 & 0xff)
11429           ret = -TARGET_EINVAL;
11430       else {
11431           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11432           ret = 0;
11433       }
11434       return ret;
11435 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11436       return do_set_thread_area(cpu_env, arg1);
11437 #elif defined(TARGET_M68K)
11438       {
11439           TaskState *ts = cpu->opaque;
11440           ts->tp_value = arg1;
11441           return 0;
11442       }
11443 #else
11444       return -TARGET_ENOSYS;
11445 #endif
11446 #endif
11447 #ifdef TARGET_NR_get_thread_area
11448     case TARGET_NR_get_thread_area:
11449 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11450         return do_get_thread_area(cpu_env, arg1);
11451 #elif defined(TARGET_M68K)
11452         {
11453             TaskState *ts = cpu->opaque;
11454             return ts->tp_value;
11455         }
11456 #else
11457         return -TARGET_ENOSYS;
11458 #endif
11459 #endif
11460 #ifdef TARGET_NR_getdomainname
11461     case TARGET_NR_getdomainname:
11462         return -TARGET_ENOSYS;
11463 #endif
11464 
11465 #ifdef TARGET_NR_clock_settime
11466     case TARGET_NR_clock_settime:
11467     {
11468         struct timespec ts;
11469 
11470         ret = target_to_host_timespec(&ts, arg2);
11471         if (!is_error(ret)) {
11472             ret = get_errno(clock_settime(arg1, &ts));
11473         }
11474         return ret;
11475     }
11476 #endif
11477 #ifdef TARGET_NR_clock_settime64
11478     case TARGET_NR_clock_settime64:
11479     {
11480         struct timespec ts;
11481 
11482         ret = target_to_host_timespec64(&ts, arg2);
11483         if (!is_error(ret)) {
11484             ret = get_errno(clock_settime(arg1, &ts));
11485         }
11486         return ret;
11487     }
11488 #endif
11489 #ifdef TARGET_NR_clock_gettime
11490     case TARGET_NR_clock_gettime:
11491     {
11492         struct timespec ts;
11493         ret = get_errno(clock_gettime(arg1, &ts));
11494         if (!is_error(ret)) {
11495             ret = host_to_target_timespec(arg2, &ts);
11496         }
11497         return ret;
11498     }
11499 #endif
11500 #ifdef TARGET_NR_clock_gettime64
11501     case TARGET_NR_clock_gettime64:
11502     {
11503         struct timespec ts;
11504         ret = get_errno(clock_gettime(arg1, &ts));
11505         if (!is_error(ret)) {
11506             ret = host_to_target_timespec64(arg2, &ts);
11507         }
11508         return ret;
11509     }
11510 #endif
11511 #ifdef TARGET_NR_clock_getres
11512     case TARGET_NR_clock_getres:
11513     {
11514         struct timespec ts;
11515         ret = get_errno(clock_getres(arg1, &ts));
11516         if (!is_error(ret)) {
11517             host_to_target_timespec(arg2, &ts);
11518         }
11519         return ret;
11520     }
11521 #endif
11522 #ifdef TARGET_NR_clock_nanosleep
11523     case TARGET_NR_clock_nanosleep:
11524     {
11525         struct timespec ts;
11526         target_to_host_timespec(&ts, arg3);
11527         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11528                                              &ts, arg4 ? &ts : NULL));
11529         if (arg4)
11530             host_to_target_timespec(arg4, &ts);
11531 
11532 #if defined(TARGET_PPC)
11533         /* clock_nanosleep is odd in that it returns positive errno values.
11534          * On PPC, CR0 bit 3 should be set in such a situation. */
11535         if (ret && ret != -TARGET_ERESTARTSYS) {
11536             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11537         }
11538 #endif
11539         return ret;
11540     }
11541 #endif
11542 
11543 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11544     case TARGET_NR_set_tid_address:
11545         return get_errno(set_tid_address((int *)g2h(arg1)));
11546 #endif
11547 
11548     case TARGET_NR_tkill:
11549         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11550 
11551     case TARGET_NR_tgkill:
11552         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11553                          target_to_host_signal(arg3)));
11554 
11555 #ifdef TARGET_NR_set_robust_list
11556     case TARGET_NR_set_robust_list:
11557     case TARGET_NR_get_robust_list:
11558         /* The ABI for supporting robust futexes has userspace pass
11559          * the kernel a pointer to a linked list which is updated by
11560          * userspace after the syscall; the list is walked by the kernel
11561          * when the thread exits. Since the linked list in QEMU guest
11562          * memory isn't a valid linked list for the host and we have
11563          * no way to reliably intercept the thread-death event, we can't
11564          * support these. Silently return ENOSYS so that guest userspace
11565          * falls back to a non-robust futex implementation (which should
11566          * be OK except in the corner case of the guest crashing while
11567          * holding a mutex that is shared with another process via
11568          * shared memory).
11569          */
11570         return -TARGET_ENOSYS;
11571 #endif
11572 
11573 #if defined(TARGET_NR_utimensat)
11574     case TARGET_NR_utimensat:
11575         {
11576             struct timespec *tsp, ts[2];
11577             if (!arg3) {
11578                 tsp = NULL;
11579             } else {
11580                 target_to_host_timespec(ts, arg3);
11581                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11582                 tsp = ts;
11583             }
11584             if (!arg2)
11585                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11586             else {
11587                 if (!(p = lock_user_string(arg2))) {
11588                     return -TARGET_EFAULT;
11589                 }
11590                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11591                 unlock_user(p, arg2, 0);
11592             }
11593         }
11594         return ret;
11595 #endif
11596 #ifdef TARGET_NR_futex
11597     case TARGET_NR_futex:
11598         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11599 #endif
11600 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11601     case TARGET_NR_inotify_init:
11602         ret = get_errno(sys_inotify_init());
11603         if (ret >= 0) {
11604             fd_trans_register(ret, &target_inotify_trans);
11605         }
11606         return ret;
11607 #endif
11608 #ifdef CONFIG_INOTIFY1
11609 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11610     case TARGET_NR_inotify_init1:
11611         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11612                                           fcntl_flags_tbl)));
11613         if (ret >= 0) {
11614             fd_trans_register(ret, &target_inotify_trans);
11615         }
11616         return ret;
11617 #endif
11618 #endif
11619 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11620     case TARGET_NR_inotify_add_watch:
11621         p = lock_user_string(arg2);
11622         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11623         unlock_user(p, arg2, 0);
11624         return ret;
11625 #endif
11626 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11627     case TARGET_NR_inotify_rm_watch:
11628         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11629 #endif
11630 
11631 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11632     case TARGET_NR_mq_open:
11633         {
11634             struct mq_attr posix_mq_attr;
11635             struct mq_attr *pposix_mq_attr;
11636             int host_flags;
11637 
11638             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11639             pposix_mq_attr = NULL;
11640             if (arg4) {
11641                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11642                     return -TARGET_EFAULT;
11643                 }
11644                 pposix_mq_attr = &posix_mq_attr;
11645             }
11646             p = lock_user_string(arg1 - 1);
11647             if (!p) {
11648                 return -TARGET_EFAULT;
11649             }
11650             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11651             unlock_user (p, arg1, 0);
11652         }
11653         return ret;
11654 
11655     case TARGET_NR_mq_unlink:
11656         p = lock_user_string(arg1 - 1);
11657         if (!p) {
11658             return -TARGET_EFAULT;
11659         }
11660         ret = get_errno(mq_unlink(p));
11661         unlock_user (p, arg1, 0);
11662         return ret;
11663 
11664 #ifdef TARGET_NR_mq_timedsend
11665     case TARGET_NR_mq_timedsend:
11666         {
11667             struct timespec ts;
11668 
11669             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11670             if (arg5 != 0) {
11671                 target_to_host_timespec(&ts, arg5);
11672                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11673                 host_to_target_timespec(arg5, &ts);
11674             } else {
11675                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11676             }
11677             unlock_user (p, arg2, arg3);
11678         }
11679         return ret;
11680 #endif
11681 
11682 #ifdef TARGET_NR_mq_timedreceive
11683     case TARGET_NR_mq_timedreceive:
11684         {
11685             struct timespec ts;
11686             unsigned int prio;
11687 
11688             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11689             if (arg5 != 0) {
11690                 target_to_host_timespec(&ts, arg5);
11691                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11692                                                      &prio, &ts));
11693                 host_to_target_timespec(arg5, &ts);
11694             } else {
11695                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11696                                                      &prio, NULL));
11697             }
11698             unlock_user (p, arg2, arg3);
11699             if (arg4 != 0)
11700                 put_user_u32(prio, arg4);
11701         }
11702         return ret;
11703 #endif
11704 
11705     /* Not implemented for now... */
11706 /*     case TARGET_NR_mq_notify: */
11707 /*         break; */
11708 
11709     case TARGET_NR_mq_getsetattr:
11710         {
11711             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11712             ret = 0;
11713             if (arg2 != 0) {
11714                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11715                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11716                                            &posix_mq_attr_out));
11717             } else if (arg3 != 0) {
11718                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11719             }
11720             if (ret == 0 && arg3 != 0) {
11721                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11722             }
11723         }
11724         return ret;
11725 #endif
11726 
11727 #ifdef CONFIG_SPLICE
11728 #ifdef TARGET_NR_tee
11729     case TARGET_NR_tee:
11730         {
11731             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11732         }
11733         return ret;
11734 #endif
11735 #ifdef TARGET_NR_splice
11736     case TARGET_NR_splice:
11737         {
11738             loff_t loff_in, loff_out;
11739             loff_t *ploff_in = NULL, *ploff_out = NULL;
11740             if (arg2) {
11741                 if (get_user_u64(loff_in, arg2)) {
11742                     return -TARGET_EFAULT;
11743                 }
11744                 ploff_in = &loff_in;
11745             }
11746             if (arg4) {
11747                 if (get_user_u64(loff_out, arg4)) {
11748                     return -TARGET_EFAULT;
11749                 }
11750                 ploff_out = &loff_out;
11751             }
11752             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11753             if (arg2) {
11754                 if (put_user_u64(loff_in, arg2)) {
11755                     return -TARGET_EFAULT;
11756                 }
11757             }
11758             if (arg4) {
11759                 if (put_user_u64(loff_out, arg4)) {
11760                     return -TARGET_EFAULT;
11761                 }
11762             }
11763         }
11764         return ret;
11765 #endif
11766 #ifdef TARGET_NR_vmsplice
11767 	case TARGET_NR_vmsplice:
11768         {
11769             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11770             if (vec != NULL) {
11771                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11772                 unlock_iovec(vec, arg2, arg3, 0);
11773             } else {
11774                 ret = -host_to_target_errno(errno);
11775             }
11776         }
11777         return ret;
11778 #endif
11779 #endif /* CONFIG_SPLICE */
11780 #ifdef CONFIG_EVENTFD
11781 #if defined(TARGET_NR_eventfd)
11782     case TARGET_NR_eventfd:
11783         ret = get_errno(eventfd(arg1, 0));
11784         if (ret >= 0) {
11785             fd_trans_register(ret, &target_eventfd_trans);
11786         }
11787         return ret;
11788 #endif
11789 #if defined(TARGET_NR_eventfd2)
11790     case TARGET_NR_eventfd2:
11791     {
11792         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11793         if (arg2 & TARGET_O_NONBLOCK) {
11794             host_flags |= O_NONBLOCK;
11795         }
11796         if (arg2 & TARGET_O_CLOEXEC) {
11797             host_flags |= O_CLOEXEC;
11798         }
11799         ret = get_errno(eventfd(arg1, host_flags));
11800         if (ret >= 0) {
11801             fd_trans_register(ret, &target_eventfd_trans);
11802         }
11803         return ret;
11804     }
11805 #endif
11806 #endif /* CONFIG_EVENTFD  */
11807 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11808     case TARGET_NR_fallocate:
11809 #if TARGET_ABI_BITS == 32
11810         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11811                                   target_offset64(arg5, arg6)));
11812 #else
11813         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11814 #endif
11815         return ret;
11816 #endif
11817 #if defined(CONFIG_SYNC_FILE_RANGE)
11818 #if defined(TARGET_NR_sync_file_range)
11819     case TARGET_NR_sync_file_range:
11820 #if TARGET_ABI_BITS == 32
11821 #if defined(TARGET_MIPS)
11822         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11823                                         target_offset64(arg5, arg6), arg7));
11824 #else
11825         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11826                                         target_offset64(arg4, arg5), arg6));
11827 #endif /* !TARGET_MIPS */
11828 #else
11829         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11830 #endif
11831         return ret;
11832 #endif
11833 #if defined(TARGET_NR_sync_file_range2)
11834     case TARGET_NR_sync_file_range2:
11835         /* This is like sync_file_range but the arguments are reordered */
11836 #if TARGET_ABI_BITS == 32
11837         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11838                                         target_offset64(arg5, arg6), arg2));
11839 #else
11840         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11841 #endif
11842         return ret;
11843 #endif
11844 #endif
11845 #if defined(TARGET_NR_signalfd4)
11846     case TARGET_NR_signalfd4:
11847         return do_signalfd4(arg1, arg2, arg4);
11848 #endif
11849 #if defined(TARGET_NR_signalfd)
11850     case TARGET_NR_signalfd:
11851         return do_signalfd4(arg1, arg2, 0);
11852 #endif
11853 #if defined(CONFIG_EPOLL)
11854 #if defined(TARGET_NR_epoll_create)
11855     case TARGET_NR_epoll_create:
11856         return get_errno(epoll_create(arg1));
11857 #endif
11858 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11859     case TARGET_NR_epoll_create1:
11860         return get_errno(epoll_create1(arg1));
11861 #endif
11862 #if defined(TARGET_NR_epoll_ctl)
11863     case TARGET_NR_epoll_ctl:
11864     {
11865         struct epoll_event ep;
11866         struct epoll_event *epp = 0;
11867         if (arg4) {
11868             struct target_epoll_event *target_ep;
11869             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11870                 return -TARGET_EFAULT;
11871             }
11872             ep.events = tswap32(target_ep->events);
11873             /* The epoll_data_t union is just opaque data to the kernel,
11874              * so we transfer all 64 bits across and need not worry what
11875              * actual data type it is.
11876              */
11877             ep.data.u64 = tswap64(target_ep->data.u64);
11878             unlock_user_struct(target_ep, arg4, 0);
11879             epp = &ep;
11880         }
11881         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11882     }
11883 #endif
11884 
11885 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11886 #if defined(TARGET_NR_epoll_wait)
11887     case TARGET_NR_epoll_wait:
11888 #endif
11889 #if defined(TARGET_NR_epoll_pwait)
11890     case TARGET_NR_epoll_pwait:
11891 #endif
11892     {
11893         struct target_epoll_event *target_ep;
11894         struct epoll_event *ep;
11895         int epfd = arg1;
11896         int maxevents = arg3;
11897         int timeout = arg4;
11898 
11899         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11900             return -TARGET_EINVAL;
11901         }
11902 
11903         target_ep = lock_user(VERIFY_WRITE, arg2,
11904                               maxevents * sizeof(struct target_epoll_event), 1);
11905         if (!target_ep) {
11906             return -TARGET_EFAULT;
11907         }
11908 
11909         ep = g_try_new(struct epoll_event, maxevents);
11910         if (!ep) {
11911             unlock_user(target_ep, arg2, 0);
11912             return -TARGET_ENOMEM;
11913         }
11914 
11915         switch (num) {
11916 #if defined(TARGET_NR_epoll_pwait)
11917         case TARGET_NR_epoll_pwait:
11918         {
11919             target_sigset_t *target_set;
11920             sigset_t _set, *set = &_set;
11921 
11922             if (arg5) {
11923                 if (arg6 != sizeof(target_sigset_t)) {
11924                     ret = -TARGET_EINVAL;
11925                     break;
11926                 }
11927 
11928                 target_set = lock_user(VERIFY_READ, arg5,
11929                                        sizeof(target_sigset_t), 1);
11930                 if (!target_set) {
11931                     ret = -TARGET_EFAULT;
11932                     break;
11933                 }
11934                 target_to_host_sigset(set, target_set);
11935                 unlock_user(target_set, arg5, 0);
11936             } else {
11937                 set = NULL;
11938             }
11939 
11940             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11941                                              set, SIGSET_T_SIZE));
11942             break;
11943         }
11944 #endif
11945 #if defined(TARGET_NR_epoll_wait)
11946         case TARGET_NR_epoll_wait:
11947             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11948                                              NULL, 0));
11949             break;
11950 #endif
11951         default:
11952             ret = -TARGET_ENOSYS;
11953         }
11954         if (!is_error(ret)) {
11955             int i;
11956             for (i = 0; i < ret; i++) {
11957                 target_ep[i].events = tswap32(ep[i].events);
11958                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11959             }
11960             unlock_user(target_ep, arg2,
11961                         ret * sizeof(struct target_epoll_event));
11962         } else {
11963             unlock_user(target_ep, arg2, 0);
11964         }
11965         g_free(ep);
11966         return ret;
11967     }
11968 #endif
11969 #endif
11970 #ifdef TARGET_NR_prlimit64
11971     case TARGET_NR_prlimit64:
11972     {
11973         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11974         struct target_rlimit64 *target_rnew, *target_rold;
11975         struct host_rlimit64 rnew, rold, *rnewp = 0;
11976         int resource = target_to_host_resource(arg2);
11977 
11978         if (arg3 && (resource != RLIMIT_AS &&
11979                      resource != RLIMIT_DATA &&
11980                      resource != RLIMIT_STACK)) {
11981             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11982                 return -TARGET_EFAULT;
11983             }
11984             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11985             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11986             unlock_user_struct(target_rnew, arg3, 0);
11987             rnewp = &rnew;
11988         }
11989 
11990         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11991         if (!is_error(ret) && arg4) {
11992             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11993                 return -TARGET_EFAULT;
11994             }
11995             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11996             target_rold->rlim_max = tswap64(rold.rlim_max);
11997             unlock_user_struct(target_rold, arg4, 1);
11998         }
11999         return ret;
12000     }
12001 #endif
12002 #ifdef TARGET_NR_gethostname
12003     case TARGET_NR_gethostname:
12004     {
12005         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12006         if (name) {
12007             ret = get_errno(gethostname(name, arg2));
12008             unlock_user(name, arg1, arg2);
12009         } else {
12010             ret = -TARGET_EFAULT;
12011         }
12012         return ret;
12013     }
12014 #endif
12015 #ifdef TARGET_NR_atomic_cmpxchg_32
12016     case TARGET_NR_atomic_cmpxchg_32:
12017     {
12018         /* should use start_exclusive from main.c */
12019         abi_ulong mem_value;
12020         if (get_user_u32(mem_value, arg6)) {
12021             target_siginfo_t info;
12022             info.si_signo = SIGSEGV;
12023             info.si_errno = 0;
12024             info.si_code = TARGET_SEGV_MAPERR;
12025             info._sifields._sigfault._addr = arg6;
12026             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12027                          QEMU_SI_FAULT, &info);
12028             ret = 0xdeadbeef;
12029 
12030         }
12031         if (mem_value == arg2)
12032             put_user_u32(arg1, arg6);
12033         return mem_value;
12034     }
12035 #endif
12036 #ifdef TARGET_NR_atomic_barrier
12037     case TARGET_NR_atomic_barrier:
12038         /* Like the kernel implementation and the
12039            qemu arm barrier, no-op this? */
12040         return 0;
12041 #endif
12042 
12043 #ifdef TARGET_NR_timer_create
12044     case TARGET_NR_timer_create:
12045     {
12046         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12047 
12048         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12049 
12050         int clkid = arg1;
12051         int timer_index = next_free_host_timer();
12052 
12053         if (timer_index < 0) {
12054             ret = -TARGET_EAGAIN;
12055         } else {
12056             timer_t *phtimer = g_posix_timers  + timer_index;
12057 
12058             if (arg2) {
12059                 phost_sevp = &host_sevp;
12060                 ret = target_to_host_sigevent(phost_sevp, arg2);
12061                 if (ret != 0) {
12062                     return ret;
12063                 }
12064             }
12065 
12066             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12067             if (ret) {
12068                 phtimer = NULL;
12069             } else {
12070                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12071                     return -TARGET_EFAULT;
12072                 }
12073             }
12074         }
12075         return ret;
12076     }
12077 #endif
12078 
12079 #ifdef TARGET_NR_timer_settime
12080     case TARGET_NR_timer_settime:
12081     {
12082         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12083          * struct itimerspec * old_value */
12084         target_timer_t timerid = get_timer_id(arg1);
12085 
12086         if (timerid < 0) {
12087             ret = timerid;
12088         } else if (arg3 == 0) {
12089             ret = -TARGET_EINVAL;
12090         } else {
12091             timer_t htimer = g_posix_timers[timerid];
12092             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12093 
12094             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12095                 return -TARGET_EFAULT;
12096             }
12097             ret = get_errno(
12098                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12099             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12100                 return -TARGET_EFAULT;
12101             }
12102         }
12103         return ret;
12104     }
12105 #endif
12106 
12107 #ifdef TARGET_NR_timer_gettime
12108     case TARGET_NR_timer_gettime:
12109     {
12110         /* args: timer_t timerid, struct itimerspec *curr_value */
12111         target_timer_t timerid = get_timer_id(arg1);
12112 
12113         if (timerid < 0) {
12114             ret = timerid;
12115         } else if (!arg2) {
12116             ret = -TARGET_EFAULT;
12117         } else {
12118             timer_t htimer = g_posix_timers[timerid];
12119             struct itimerspec hspec;
12120             ret = get_errno(timer_gettime(htimer, &hspec));
12121 
12122             if (host_to_target_itimerspec(arg2, &hspec)) {
12123                 ret = -TARGET_EFAULT;
12124             }
12125         }
12126         return ret;
12127     }
12128 #endif
12129 
12130 #ifdef TARGET_NR_timer_getoverrun
12131     case TARGET_NR_timer_getoverrun:
12132     {
12133         /* args: timer_t timerid */
12134         target_timer_t timerid = get_timer_id(arg1);
12135 
12136         if (timerid < 0) {
12137             ret = timerid;
12138         } else {
12139             timer_t htimer = g_posix_timers[timerid];
12140             ret = get_errno(timer_getoverrun(htimer));
12141         }
12142         return ret;
12143     }
12144 #endif
12145 
12146 #ifdef TARGET_NR_timer_delete
12147     case TARGET_NR_timer_delete:
12148     {
12149         /* args: timer_t timerid */
12150         target_timer_t timerid = get_timer_id(arg1);
12151 
12152         if (timerid < 0) {
12153             ret = timerid;
12154         } else {
12155             timer_t htimer = g_posix_timers[timerid];
12156             ret = get_errno(timer_delete(htimer));
12157             g_posix_timers[timerid] = 0;
12158         }
12159         return ret;
12160     }
12161 #endif
12162 
12163 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12164     case TARGET_NR_timerfd_create:
12165         return get_errno(timerfd_create(arg1,
12166                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12167 #endif
12168 
12169 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12170     case TARGET_NR_timerfd_gettime:
12171         {
12172             struct itimerspec its_curr;
12173 
12174             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12175 
12176             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12177                 return -TARGET_EFAULT;
12178             }
12179         }
12180         return ret;
12181 #endif
12182 
12183 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12184     case TARGET_NR_timerfd_settime:
12185         {
12186             struct itimerspec its_new, its_old, *p_new;
12187 
12188             if (arg3) {
12189                 if (target_to_host_itimerspec(&its_new, arg3)) {
12190                     return -TARGET_EFAULT;
12191                 }
12192                 p_new = &its_new;
12193             } else {
12194                 p_new = NULL;
12195             }
12196 
12197             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12198 
12199             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12200                 return -TARGET_EFAULT;
12201             }
12202         }
12203         return ret;
12204 #endif
12205 
12206 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12207     case TARGET_NR_ioprio_get:
12208         return get_errno(ioprio_get(arg1, arg2));
12209 #endif
12210 
12211 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12212     case TARGET_NR_ioprio_set:
12213         return get_errno(ioprio_set(arg1, arg2, arg3));
12214 #endif
12215 
12216 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12217     case TARGET_NR_setns:
12218         return get_errno(setns(arg1, arg2));
12219 #endif
12220 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12221     case TARGET_NR_unshare:
12222         return get_errno(unshare(arg1));
12223 #endif
12224 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12225     case TARGET_NR_kcmp:
12226         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12227 #endif
12228 #ifdef TARGET_NR_swapcontext
12229     case TARGET_NR_swapcontext:
12230         /* PowerPC specific.  */
12231         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12232 #endif
12233 #ifdef TARGET_NR_memfd_create
12234     case TARGET_NR_memfd_create:
12235         p = lock_user_string(arg1);
12236         if (!p) {
12237             return -TARGET_EFAULT;
12238         }
12239         ret = get_errno(memfd_create(p, arg2));
12240         fd_trans_unregister(ret);
12241         unlock_user(p, arg1, 0);
12242         return ret;
12243 #endif
12244 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12245     case TARGET_NR_membarrier:
12246         return get_errno(membarrier(arg1, arg2));
12247 #endif
12248 
12249     default:
12250         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12251         return -TARGET_ENOSYS;
12252     }
12253     return ret;
12254 }
12255 
12256 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12257                     abi_long arg2, abi_long arg3, abi_long arg4,
12258                     abi_long arg5, abi_long arg6, abi_long arg7,
12259                     abi_long arg8)
12260 {
12261     CPUState *cpu = env_cpu(cpu_env);
12262     abi_long ret;
12263 
12264 #ifdef DEBUG_ERESTARTSYS
12265     /* Debug-only code for exercising the syscall-restart code paths
12266      * in the per-architecture cpu main loops: restart every syscall
12267      * the guest makes once before letting it through.
12268      */
12269     {
12270         static bool flag;
12271         flag = !flag;
12272         if (flag) {
12273             return -TARGET_ERESTARTSYS;
12274         }
12275     }
12276 #endif
12277 
12278     record_syscall_start(cpu, num, arg1,
12279                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12280 
12281     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12282         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12283     }
12284 
12285     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12286                       arg5, arg6, arg7, arg8);
12287 
12288     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12289         print_syscall_ret(num, ret);
12290     }
12291 
12292     record_syscall_return(cpu, num, ret);
12293     return ret;
12294 }
12295