xref: /openbmc/qemu/linux-user/syscall.c (revision e865b97f)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #endif
118 #include "linux_loop.h"
119 #include "uname.h"
120 
121 #include "qemu.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
127 #include "tcg/tcg.h"
128 
129 #ifndef CLONE_IO
130 #define CLONE_IO                0x80000000      /* Clone io context */
131 #endif
132 
133 /* We can't directly call the host clone syscall, because this will
134  * badly confuse libc (breaking mutexes, for example). So we must
135  * divide clone flags into:
136  *  * flag combinations that look like pthread_create()
137  *  * flag combinations that look like fork()
138  *  * flags we can implement within QEMU itself
139  *  * flags we can't support and will return an error for
140  */
141 /* For thread creation, all these flags must be present; for
142  * fork, none must be present.
143  */
144 #define CLONE_THREAD_FLAGS                              \
145     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
146      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
147 
148 /* These flags are ignored:
149  * CLONE_DETACHED is now ignored by the kernel;
150  * CLONE_IO is just an optimisation hint to the I/O scheduler
151  */
152 #define CLONE_IGNORED_FLAGS                     \
153     (CLONE_DETACHED | CLONE_IO)
154 
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS               \
157     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
158      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
159 
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
162     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
163      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
164 
165 #define CLONE_INVALID_FORK_FLAGS                                        \
166     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
167 
168 #define CLONE_INVALID_THREAD_FLAGS                                      \
169     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
170        CLONE_IGNORED_FLAGS))
171 
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173  * have almost all been allocated. We cannot support any of
174  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176  * The checks against the invalid thread masks above will catch these.
177  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
178  */
179 
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181  * once. This exercises the codepaths for restart.
182  */
183 //#define DEBUG_ERESTARTSYS
184 
185 //#include <linux/msdos_fs.h>
186 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
187 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
188 
189 #undef _syscall0
190 #undef _syscall1
191 #undef _syscall2
192 #undef _syscall3
193 #undef _syscall4
194 #undef _syscall5
195 #undef _syscall6
196 
197 #define _syscall0(type,name)		\
198 static type name (void)			\
199 {					\
200 	return syscall(__NR_##name);	\
201 }
202 
203 #define _syscall1(type,name,type1,arg1)		\
204 static type name (type1 arg1)			\
205 {						\
206 	return syscall(__NR_##name, arg1);	\
207 }
208 
209 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
210 static type name (type1 arg1,type2 arg2)		\
211 {							\
212 	return syscall(__NR_##name, arg1, arg2);	\
213 }
214 
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
216 static type name (type1 arg1,type2 arg2,type3 arg3)		\
217 {								\
218 	return syscall(__NR_##name, arg1, arg2, arg3);		\
219 }
220 
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
223 {										\
224 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
225 }
226 
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
228 		  type5,arg5)							\
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
230 {										\
231 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
232 }
233 
234 
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5,type6,arg6)					\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
238                   type6 arg6)							\
239 {										\
240 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
241 }
242 
243 
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
254 #endif
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
257 #endif
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
262 
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
265 #endif
266 
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
270 #endif
271 
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid)
274 
275 /* For the 64-bit guest on 32-bit host case we must emulate
276  * getdents using getdents64, because otherwise the host
277  * might hand us back more dirent records than we can fit
278  * into the guest buffer after structure format conversion.
279  * Otherwise we emulate getdents with getdents if the host has it.
280  */
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
283 #endif
284 
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
287 #endif
288 #if (defined(TARGET_NR_getdents) && \
289       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
292 #endif
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
295           loff_t *, res, uint, wh);
296 #endif
297 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
298 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
299           siginfo_t *, uinfo)
300 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group,int,error_code)
303 #endif
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address,int *,tidptr)
306 #endif
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
309           const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
313           const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
317           unsigned long *, user_mask_ptr);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
320           unsigned long *, user_mask_ptr);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
323 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
324           void *, arg);
325 _syscall2(int, capget, struct __user_cap_header_struct *, header,
326           struct __user_cap_data_struct *, data);
327 _syscall2(int, capset, struct __user_cap_header_struct *, header,
328           struct __user_cap_data_struct *, data);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get, int, which, int, who)
331 #endif
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
334 #endif
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
337 #endif
338 
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
341           unsigned long, idx1, unsigned long, idx2)
342 #endif
343 
344 /*
345  * It is assumed that struct statx is architecture independent.
346  */
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
349           unsigned int, mask, struct target_statx *, statxbuf)
350 #endif
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier, int, cmd, int, flags)
353 #endif
354 
355 static bitmask_transtbl fcntl_flags_tbl[] = {
356   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
357   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
358   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
359   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
360   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
361   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
362   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
363   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
364   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
365   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
366   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
367   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
368   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
369 #if defined(O_DIRECT)
370   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
371 #endif
372 #if defined(O_NOATIME)
373   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
374 #endif
375 #if defined(O_CLOEXEC)
376   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
377 #endif
378 #if defined(O_PATH)
379   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
380 #endif
381 #if defined(O_TMPFILE)
382   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
383 #endif
384   /* Don't terminate the list prematurely on 64-bit host+guest.  */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
387 #endif
388   { 0, 0, 0, 0 }
389 };
390 
391 static int sys_getcwd1(char *buf, size_t size)
392 {
393   if (getcwd(buf, size) == NULL) {
394       /* getcwd() sets errno */
395       return (-1);
396   }
397   return strlen(buf)+1;
398 }
399 
400 #ifdef TARGET_NR_utimensat
401 #if defined(__NR_utimensat)
402 #define __NR_sys_utimensat __NR_utimensat
403 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
404           const struct timespec *,tsp,int,flags)
405 #else
406 static int sys_utimensat(int dirfd, const char *pathname,
407                          const struct timespec times[2], int flags)
408 {
409     errno = ENOSYS;
410     return -1;
411 }
412 #endif
413 #endif /* TARGET_NR_utimensat */
414 
415 #ifdef TARGET_NR_renameat2
416 #if defined(__NR_renameat2)
417 #define __NR_sys_renameat2 __NR_renameat2
418 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
419           const char *, new, unsigned int, flags)
420 #else
421 static int sys_renameat2(int oldfd, const char *old,
422                          int newfd, const char *new, int flags)
423 {
424     if (flags == 0) {
425         return renameat(oldfd, old, newfd, new);
426     }
427     errno = ENOSYS;
428     return -1;
429 }
430 #endif
431 #endif /* TARGET_NR_renameat2 */
432 
433 #ifdef CONFIG_INOTIFY
434 #include <sys/inotify.h>
435 
436 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
437 static int sys_inotify_init(void)
438 {
439   return (inotify_init());
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
443 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
444 {
445   return (inotify_add_watch(fd, pathname, mask));
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
449 static int sys_inotify_rm_watch(int fd, int32_t wd)
450 {
451   return (inotify_rm_watch(fd, wd));
452 }
453 #endif
454 #ifdef CONFIG_INOTIFY1
455 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
456 static int sys_inotify_init1(int flags)
457 {
458   return (inotify_init1(flags));
459 }
460 #endif
461 #endif
462 #else
463 /* Userspace can usually survive runtime without inotify */
464 #undef TARGET_NR_inotify_init
465 #undef TARGET_NR_inotify_init1
466 #undef TARGET_NR_inotify_add_watch
467 #undef TARGET_NR_inotify_rm_watch
468 #endif /* CONFIG_INOTIFY  */
469 
470 #if defined(TARGET_NR_prlimit64)
471 #ifndef __NR_prlimit64
472 # define __NR_prlimit64 -1
473 #endif
474 #define __NR_sys_prlimit64 __NR_prlimit64
475 /* The glibc rlimit structure may not be that used by the underlying syscall */
476 struct host_rlimit64 {
477     uint64_t rlim_cur;
478     uint64_t rlim_max;
479 };
480 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
481           const struct host_rlimit64 *, new_limit,
482           struct host_rlimit64 *, old_limit)
483 #endif
484 
485 
486 #if defined(TARGET_NR_timer_create)
487 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
488 static timer_t g_posix_timers[32] = { 0, } ;
489 
490 static inline int next_free_host_timer(void)
491 {
492     int k ;
493     /* FIXME: Does finding the next free slot require a lock? */
494     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
495         if (g_posix_timers[k] == 0) {
496             g_posix_timers[k] = (timer_t) 1;
497             return k;
498         }
499     }
500     return -1;
501 }
502 #endif
503 
504 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
505 #ifdef TARGET_ARM
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
509 }
510 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
512 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
513 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
514  * of registers which translates to the same as ARM/MIPS, because we start with
515  * r3 as arg1 */
516 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
517 #elif defined(TARGET_SH4)
518 /* SH4 doesn't align register pairs, except for p{read,write}64 */
519 static inline int regpairs_aligned(void *cpu_env, int num)
520 {
521     switch (num) {
522     case TARGET_NR_pread64:
523     case TARGET_NR_pwrite64:
524         return 1;
525 
526     default:
527         return 0;
528     }
529 }
530 #elif defined(TARGET_XTENSA)
531 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
532 #else
533 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
534 #endif
535 
536 #define ERRNO_TABLE_SIZE 1200
537 
538 /* target_to_host_errno_table[] is initialized from
539  * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
541 };
542 
543 /*
544  * This list is the union of errno values overridden in asm-<arch>/errno.h
545  * minus the errnos that are not actually generic to all archs.
546  */
547 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
548     [EAGAIN]		= TARGET_EAGAIN,
549     [EIDRM]		= TARGET_EIDRM,
550     [ECHRNG]		= TARGET_ECHRNG,
551     [EL2NSYNC]		= TARGET_EL2NSYNC,
552     [EL3HLT]		= TARGET_EL3HLT,
553     [EL3RST]		= TARGET_EL3RST,
554     [ELNRNG]		= TARGET_ELNRNG,
555     [EUNATCH]		= TARGET_EUNATCH,
556     [ENOCSI]		= TARGET_ENOCSI,
557     [EL2HLT]		= TARGET_EL2HLT,
558     [EDEADLK]		= TARGET_EDEADLK,
559     [ENOLCK]		= TARGET_ENOLCK,
560     [EBADE]		= TARGET_EBADE,
561     [EBADR]		= TARGET_EBADR,
562     [EXFULL]		= TARGET_EXFULL,
563     [ENOANO]		= TARGET_ENOANO,
564     [EBADRQC]		= TARGET_EBADRQC,
565     [EBADSLT]		= TARGET_EBADSLT,
566     [EBFONT]		= TARGET_EBFONT,
567     [ENOSTR]		= TARGET_ENOSTR,
568     [ENODATA]		= TARGET_ENODATA,
569     [ETIME]		= TARGET_ETIME,
570     [ENOSR]		= TARGET_ENOSR,
571     [ENONET]		= TARGET_ENONET,
572     [ENOPKG]		= TARGET_ENOPKG,
573     [EREMOTE]		= TARGET_EREMOTE,
574     [ENOLINK]		= TARGET_ENOLINK,
575     [EADV]		= TARGET_EADV,
576     [ESRMNT]		= TARGET_ESRMNT,
577     [ECOMM]		= TARGET_ECOMM,
578     [EPROTO]		= TARGET_EPROTO,
579     [EDOTDOT]		= TARGET_EDOTDOT,
580     [EMULTIHOP]		= TARGET_EMULTIHOP,
581     [EBADMSG]		= TARGET_EBADMSG,
582     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
583     [EOVERFLOW]		= TARGET_EOVERFLOW,
584     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
585     [EBADFD]		= TARGET_EBADFD,
586     [EREMCHG]		= TARGET_EREMCHG,
587     [ELIBACC]		= TARGET_ELIBACC,
588     [ELIBBAD]		= TARGET_ELIBBAD,
589     [ELIBSCN]		= TARGET_ELIBSCN,
590     [ELIBMAX]		= TARGET_ELIBMAX,
591     [ELIBEXEC]		= TARGET_ELIBEXEC,
592     [EILSEQ]		= TARGET_EILSEQ,
593     [ENOSYS]		= TARGET_ENOSYS,
594     [ELOOP]		= TARGET_ELOOP,
595     [ERESTART]		= TARGET_ERESTART,
596     [ESTRPIPE]		= TARGET_ESTRPIPE,
597     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
598     [EUSERS]		= TARGET_EUSERS,
599     [ENOTSOCK]		= TARGET_ENOTSOCK,
600     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
601     [EMSGSIZE]		= TARGET_EMSGSIZE,
602     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
603     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
604     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
605     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
606     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
607     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
608     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
609     [EADDRINUSE]	= TARGET_EADDRINUSE,
610     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
611     [ENETDOWN]		= TARGET_ENETDOWN,
612     [ENETUNREACH]	= TARGET_ENETUNREACH,
613     [ENETRESET]		= TARGET_ENETRESET,
614     [ECONNABORTED]	= TARGET_ECONNABORTED,
615     [ECONNRESET]	= TARGET_ECONNRESET,
616     [ENOBUFS]		= TARGET_ENOBUFS,
617     [EISCONN]		= TARGET_EISCONN,
618     [ENOTCONN]		= TARGET_ENOTCONN,
619     [EUCLEAN]		= TARGET_EUCLEAN,
620     [ENOTNAM]		= TARGET_ENOTNAM,
621     [ENAVAIL]		= TARGET_ENAVAIL,
622     [EISNAM]		= TARGET_EISNAM,
623     [EREMOTEIO]		= TARGET_EREMOTEIO,
624     [EDQUOT]            = TARGET_EDQUOT,
625     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
626     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
627     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
628     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
629     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
630     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
631     [EALREADY]		= TARGET_EALREADY,
632     [EINPROGRESS]	= TARGET_EINPROGRESS,
633     [ESTALE]		= TARGET_ESTALE,
634     [ECANCELED]		= TARGET_ECANCELED,
635     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
636     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
637 #ifdef ENOKEY
638     [ENOKEY]		= TARGET_ENOKEY,
639 #endif
640 #ifdef EKEYEXPIRED
641     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
642 #endif
643 #ifdef EKEYREVOKED
644     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
645 #endif
646 #ifdef EKEYREJECTED
647     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
648 #endif
649 #ifdef EOWNERDEAD
650     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
651 #endif
652 #ifdef ENOTRECOVERABLE
653     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
654 #endif
655 #ifdef ENOMSG
656     [ENOMSG]            = TARGET_ENOMSG,
657 #endif
658 #ifdef ERKFILL
659     [ERFKILL]           = TARGET_ERFKILL,
660 #endif
661 #ifdef EHWPOISON
662     [EHWPOISON]         = TARGET_EHWPOISON,
663 #endif
664 };
665 
666 static inline int host_to_target_errno(int err)
667 {
668     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
669         host_to_target_errno_table[err]) {
670         return host_to_target_errno_table[err];
671     }
672     return err;
673 }
674 
675 static inline int target_to_host_errno(int err)
676 {
677     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
678         target_to_host_errno_table[err]) {
679         return target_to_host_errno_table[err];
680     }
681     return err;
682 }
683 
684 static inline abi_long get_errno(abi_long ret)
685 {
686     if (ret == -1)
687         return -host_to_target_errno(errno);
688     else
689         return ret;
690 }
691 
692 const char *target_strerror(int err)
693 {
694     if (err == TARGET_ERESTARTSYS) {
695         return "To be restarted";
696     }
697     if (err == TARGET_QEMU_ESIGRETURN) {
698         return "Successful exit from sigreturn";
699     }
700 
701     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
702         return NULL;
703     }
704     return strerror(target_to_host_errno(err));
705 }
706 
707 #define safe_syscall0(type, name) \
708 static type safe_##name(void) \
709 { \
710     return safe_syscall(__NR_##name); \
711 }
712 
713 #define safe_syscall1(type, name, type1, arg1) \
714 static type safe_##name(type1 arg1) \
715 { \
716     return safe_syscall(__NR_##name, arg1); \
717 }
718 
719 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
720 static type safe_##name(type1 arg1, type2 arg2) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2); \
723 }
724 
725 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
727 { \
728     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
729 }
730 
731 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
732     type4, arg4) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
734 { \
735     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
736 }
737 
738 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
739     type4, arg4, type5, arg5) \
740 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
741     type5 arg5) \
742 { \
743     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
744 }
745 
746 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
747     type4, arg4, type5, arg5, type6, arg6) \
748 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
749     type5 arg5, type6 arg6) \
750 { \
751     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
752 }
753 
754 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
755 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
756 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
757               int, flags, mode_t, mode)
758 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
759 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
760               struct rusage *, rusage)
761 #endif
762 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
763               int, options, struct rusage *, rusage)
764 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
765 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
766     defined(TARGET_NR_pselect6)
767 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
768               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
769 #endif
770 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
771 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
772               struct timespec *, tsp, const sigset_t *, sigmask,
773               size_t, sigsetsize)
774 #endif
775 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
776               int, maxevents, int, timeout, const sigset_t *, sigmask,
777               size_t, sigsetsize)
778 #if defined(__NR_futex)
779 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
780               const struct timespec *,timeout,int *,uaddr2,int,val3)
781 #endif
782 #if defined(__NR_futex_time64)
783 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
784               const struct timespec *,timeout,int *,uaddr2,int,val3)
785 #endif
786 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
787 safe_syscall2(int, kill, pid_t, pid, int, sig)
788 safe_syscall2(int, tkill, int, tid, int, sig)
789 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
790 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
791 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
792 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
793               unsigned long, pos_l, unsigned long, pos_h)
794 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
795               unsigned long, pos_l, unsigned long, pos_h)
796 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
797               socklen_t, addrlen)
798 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
799               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
800 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
801               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
802 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
803 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
804 safe_syscall2(int, flock, int, fd, int, operation)
805 #ifdef TARGET_NR_rt_sigtimedwait
806 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
807               const struct timespec *, uts, size_t, sigsetsize)
808 #endif
809 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
810               int, flags)
811 #if defined(TARGET_NR_nanosleep)
812 safe_syscall2(int, nanosleep, const struct timespec *, req,
813               struct timespec *, rem)
814 #endif
815 #ifdef TARGET_NR_clock_nanosleep
816 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
817               const struct timespec *, req, struct timespec *, rem)
818 #endif
819 #ifdef __NR_ipc
820 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
821               void *, ptr, long, fifth)
822 #endif
823 #ifdef __NR_msgsnd
824 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
825               int, flags)
826 #endif
827 #ifdef __NR_msgrcv
828 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
829               long, msgtype, int, flags)
830 #endif
831 #ifdef __NR_semtimedop
832 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
833               unsigned, nsops, const struct timespec *, timeout)
834 #endif
835 #ifdef TARGET_NR_mq_timedsend
836 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
837               size_t, len, unsigned, prio, const struct timespec *, timeout)
838 #endif
839 #ifdef TARGET_NR_mq_timedreceive
840 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
841               size_t, len, unsigned *, prio, const struct timespec *, timeout)
842 #endif
843 /* We do ioctl like this rather than via safe_syscall3 to preserve the
844  * "third argument might be integer or pointer or not present" behaviour of
845  * the libc function.
846  */
847 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
848 /* Similarly for fcntl. Note that callers must always:
849  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
850  *  use the flock64 struct rather than unsuffixed flock
851  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
852  */
853 #ifdef __NR_fcntl64
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
855 #else
856 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
857 #endif
858 
859 static inline int host_to_target_sock_type(int host_type)
860 {
861     int target_type;
862 
863     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
864     case SOCK_DGRAM:
865         target_type = TARGET_SOCK_DGRAM;
866         break;
867     case SOCK_STREAM:
868         target_type = TARGET_SOCK_STREAM;
869         break;
870     default:
871         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
872         break;
873     }
874 
875 #if defined(SOCK_CLOEXEC)
876     if (host_type & SOCK_CLOEXEC) {
877         target_type |= TARGET_SOCK_CLOEXEC;
878     }
879 #endif
880 
881 #if defined(SOCK_NONBLOCK)
882     if (host_type & SOCK_NONBLOCK) {
883         target_type |= TARGET_SOCK_NONBLOCK;
884     }
885 #endif
886 
887     return target_type;
888 }
889 
890 static abi_ulong target_brk;
891 static abi_ulong target_original_brk;
892 static abi_ulong brk_page;
893 
894 void target_set_brk(abi_ulong new_brk)
895 {
896     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
897     brk_page = HOST_PAGE_ALIGN(target_brk);
898 }
899 
900 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
901 #define DEBUGF_BRK(message, args...)
902 
903 /* do_brk() must return target values and target errnos. */
904 abi_long do_brk(abi_ulong new_brk)
905 {
906     abi_long mapped_addr;
907     abi_ulong new_alloc_size;
908 
909     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
910 
911     if (!new_brk) {
912         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
913         return target_brk;
914     }
915     if (new_brk < target_original_brk) {
916         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
917                    target_brk);
918         return target_brk;
919     }
920 
921     /* If the new brk is less than the highest page reserved to the
922      * target heap allocation, set it and we're almost done...  */
923     if (new_brk <= brk_page) {
924         /* Heap contents are initialized to zero, as for anonymous
925          * mapped pages.  */
926         if (new_brk > target_brk) {
927             memset(g2h(target_brk), 0, new_brk - target_brk);
928         }
929 	target_brk = new_brk;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
931 	return target_brk;
932     }
933 
934     /* We need to allocate more memory after the brk... Note that
935      * we don't use MAP_FIXED because that will map over the top of
936      * any existing mapping (like the one with the host libc or qemu
937      * itself); instead we treat "mapped but at wrong address" as
938      * a failure and unmap again.
939      */
940     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
941     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
942                                         PROT_READ|PROT_WRITE,
943                                         MAP_ANON|MAP_PRIVATE, 0, 0));
944 
945     if (mapped_addr == brk_page) {
946         /* Heap contents are initialized to zero, as for anonymous
947          * mapped pages.  Technically the new pages are already
948          * initialized to zero since they *are* anonymous mapped
949          * pages, however we have to take care with the contents that
950          * come from the remaining part of the previous page: it may
951          * contains garbage data due to a previous heap usage (grown
952          * then shrunken).  */
953         memset(g2h(target_brk), 0, brk_page - target_brk);
954 
955         target_brk = new_brk;
956         brk_page = HOST_PAGE_ALIGN(target_brk);
957         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
958             target_brk);
959         return target_brk;
960     } else if (mapped_addr != -1) {
961         /* Mapped but at wrong address, meaning there wasn't actually
962          * enough space for this brk.
963          */
964         target_munmap(mapped_addr, new_alloc_size);
965         mapped_addr = -1;
966         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
967     }
968     else {
969         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
970     }
971 
972 #if defined(TARGET_ALPHA)
973     /* We (partially) emulate OSF/1 on Alpha, which requires we
974        return a proper errno, not an unchanged brk value.  */
975     return -TARGET_ENOMEM;
976 #endif
977     /* For everything else, return the previous break. */
978     return target_brk;
979 }
980 
981 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
982     defined(TARGET_NR_pselect6)
983 static inline abi_long copy_from_user_fdset(fd_set *fds,
984                                             abi_ulong target_fds_addr,
985                                             int n)
986 {
987     int i, nw, j, k;
988     abi_ulong b, *target_fds;
989 
990     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
991     if (!(target_fds = lock_user(VERIFY_READ,
992                                  target_fds_addr,
993                                  sizeof(abi_ulong) * nw,
994                                  1)))
995         return -TARGET_EFAULT;
996 
997     FD_ZERO(fds);
998     k = 0;
999     for (i = 0; i < nw; i++) {
1000         /* grab the abi_ulong */
1001         __get_user(b, &target_fds[i]);
1002         for (j = 0; j < TARGET_ABI_BITS; j++) {
1003             /* check the bit inside the abi_ulong */
1004             if ((b >> j) & 1)
1005                 FD_SET(k, fds);
1006             k++;
1007         }
1008     }
1009 
1010     unlock_user(target_fds, target_fds_addr, 0);
1011 
1012     return 0;
1013 }
1014 
1015 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1016                                                  abi_ulong target_fds_addr,
1017                                                  int n)
1018 {
1019     if (target_fds_addr) {
1020         if (copy_from_user_fdset(fds, target_fds_addr, n))
1021             return -TARGET_EFAULT;
1022         *fds_ptr = fds;
1023     } else {
1024         *fds_ptr = NULL;
1025     }
1026     return 0;
1027 }
1028 
1029 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1030                                           const fd_set *fds,
1031                                           int n)
1032 {
1033     int i, nw, j, k;
1034     abi_long v;
1035     abi_ulong *target_fds;
1036 
1037     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1038     if (!(target_fds = lock_user(VERIFY_WRITE,
1039                                  target_fds_addr,
1040                                  sizeof(abi_ulong) * nw,
1041                                  0)))
1042         return -TARGET_EFAULT;
1043 
1044     k = 0;
1045     for (i = 0; i < nw; i++) {
1046         v = 0;
1047         for (j = 0; j < TARGET_ABI_BITS; j++) {
1048             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1049             k++;
1050         }
1051         __put_user(v, &target_fds[i]);
1052     }
1053 
1054     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1055 
1056     return 0;
1057 }
1058 #endif
1059 
1060 #if defined(__alpha__)
1061 #define HOST_HZ 1024
1062 #else
1063 #define HOST_HZ 100
1064 #endif
1065 
1066 static inline abi_long host_to_target_clock_t(long ticks)
1067 {
1068 #if HOST_HZ == TARGET_HZ
1069     return ticks;
1070 #else
1071     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1072 #endif
1073 }
1074 
1075 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1076                                              const struct rusage *rusage)
1077 {
1078     struct target_rusage *target_rusage;
1079 
1080     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1081         return -TARGET_EFAULT;
1082     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1083     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1084     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1085     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1086     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1087     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1088     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1089     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1090     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1091     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1092     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1093     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1094     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1095     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1096     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1097     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1098     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1099     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1100     unlock_user_struct(target_rusage, target_addr, 1);
1101 
1102     return 0;
1103 }
1104 
1105 #ifdef TARGET_NR_setrlimit
1106 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1107 {
1108     abi_ulong target_rlim_swap;
1109     rlim_t result;
1110 
1111     target_rlim_swap = tswapal(target_rlim);
1112     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1113         return RLIM_INFINITY;
1114 
1115     result = target_rlim_swap;
1116     if (target_rlim_swap != (rlim_t)result)
1117         return RLIM_INFINITY;
1118 
1119     return result;
1120 }
1121 #endif
1122 
1123 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1124 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1125 {
1126     abi_ulong target_rlim_swap;
1127     abi_ulong result;
1128 
1129     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1130         target_rlim_swap = TARGET_RLIM_INFINITY;
1131     else
1132         target_rlim_swap = rlim;
1133     result = tswapal(target_rlim_swap);
1134 
1135     return result;
1136 }
1137 #endif
1138 
1139 static inline int target_to_host_resource(int code)
1140 {
1141     switch (code) {
1142     case TARGET_RLIMIT_AS:
1143         return RLIMIT_AS;
1144     case TARGET_RLIMIT_CORE:
1145         return RLIMIT_CORE;
1146     case TARGET_RLIMIT_CPU:
1147         return RLIMIT_CPU;
1148     case TARGET_RLIMIT_DATA:
1149         return RLIMIT_DATA;
1150     case TARGET_RLIMIT_FSIZE:
1151         return RLIMIT_FSIZE;
1152     case TARGET_RLIMIT_LOCKS:
1153         return RLIMIT_LOCKS;
1154     case TARGET_RLIMIT_MEMLOCK:
1155         return RLIMIT_MEMLOCK;
1156     case TARGET_RLIMIT_MSGQUEUE:
1157         return RLIMIT_MSGQUEUE;
1158     case TARGET_RLIMIT_NICE:
1159         return RLIMIT_NICE;
1160     case TARGET_RLIMIT_NOFILE:
1161         return RLIMIT_NOFILE;
1162     case TARGET_RLIMIT_NPROC:
1163         return RLIMIT_NPROC;
1164     case TARGET_RLIMIT_RSS:
1165         return RLIMIT_RSS;
1166     case TARGET_RLIMIT_RTPRIO:
1167         return RLIMIT_RTPRIO;
1168     case TARGET_RLIMIT_SIGPENDING:
1169         return RLIMIT_SIGPENDING;
1170     case TARGET_RLIMIT_STACK:
1171         return RLIMIT_STACK;
1172     default:
1173         return code;
1174     }
1175 }
1176 
1177 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1178                                               abi_ulong target_tv_addr)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __get_user(tv->tv_sec, &target_tv->tv_sec);
1187     __get_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 0);
1190 
1191     return 0;
1192 }
1193 
1194 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1195                                             const struct timeval *tv)
1196 {
1197     struct target_timeval *target_tv;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tv->tv_sec, &target_tv->tv_sec);
1204     __put_user(tv->tv_usec, &target_tv->tv_usec);
1205 
1206     unlock_user_struct(target_tv, target_tv_addr, 1);
1207 
1208     return 0;
1209 }
1210 
1211 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1212                                              const struct timeval *tv)
1213 {
1214     struct target__kernel_sock_timeval *target_tv;
1215 
1216     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1217         return -TARGET_EFAULT;
1218     }
1219 
1220     __put_user(tv->tv_sec, &target_tv->tv_sec);
1221     __put_user(tv->tv_usec, &target_tv->tv_usec);
1222 
1223     unlock_user_struct(target_tv, target_tv_addr, 1);
1224 
1225     return 0;
1226 }
1227 
1228 #if defined(TARGET_NR_futex) || \
1229     defined(TARGET_NR_rt_sigtimedwait) || \
1230     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1231     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1232     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1233     defined(TARGET_NR_mq_timedreceive)
1234 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1235                                                abi_ulong target_addr)
1236 {
1237     struct target_timespec *target_ts;
1238 
1239     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1240         return -TARGET_EFAULT;
1241     }
1242     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1243     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1244     unlock_user_struct(target_ts, target_addr, 0);
1245     return 0;
1246 }
1247 #endif
1248 
1249 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1250 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1251                                                  abi_ulong target_addr)
1252 {
1253     struct target__kernel_timespec *target_ts;
1254 
1255     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1256         return -TARGET_EFAULT;
1257     }
1258     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1259     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1260     unlock_user_struct(target_ts, target_addr, 0);
1261     return 0;
1262 }
1263 #endif
1264 
1265 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1266                                                struct timespec *host_ts)
1267 {
1268     struct target_timespec *target_ts;
1269 
1270     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1271         return -TARGET_EFAULT;
1272     }
1273     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1274     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1275     unlock_user_struct(target_ts, target_addr, 1);
1276     return 0;
1277 }
1278 
1279 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1280                                                  struct timespec *host_ts)
1281 {
1282     struct target__kernel_timespec *target_ts;
1283 
1284     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1285         return -TARGET_EFAULT;
1286     }
1287     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1288     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1289     unlock_user_struct(target_ts, target_addr, 1);
1290     return 0;
1291 }
1292 
1293 #if defined(TARGET_NR_gettimeofday)
1294 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1295                                              struct timezone *tz)
1296 {
1297     struct target_timezone *target_tz;
1298 
1299     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1300         return -TARGET_EFAULT;
1301     }
1302 
1303     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1304     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1305 
1306     unlock_user_struct(target_tz, target_tz_addr, 1);
1307 
1308     return 0;
1309 }
1310 #endif
1311 
1312 #if defined(TARGET_NR_settimeofday)
1313 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1314                                                abi_ulong target_tz_addr)
1315 {
1316     struct target_timezone *target_tz;
1317 
1318     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1319         return -TARGET_EFAULT;
1320     }
1321 
1322     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1323     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1324 
1325     unlock_user_struct(target_tz, target_tz_addr, 0);
1326 
1327     return 0;
1328 }
1329 #endif
1330 
1331 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1332 #include <mqueue.h>
1333 
1334 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1335                                               abi_ulong target_mq_attr_addr)
1336 {
1337     struct target_mq_attr *target_mq_attr;
1338 
1339     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1340                           target_mq_attr_addr, 1))
1341         return -TARGET_EFAULT;
1342 
1343     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1344     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1345     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1346     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1347 
1348     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1349 
1350     return 0;
1351 }
1352 
1353 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1354                                             const struct mq_attr *attr)
1355 {
1356     struct target_mq_attr *target_mq_attr;
1357 
1358     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1359                           target_mq_attr_addr, 0))
1360         return -TARGET_EFAULT;
1361 
1362     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1363     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1364     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1365     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1366 
1367     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1368 
1369     return 0;
1370 }
1371 #endif
1372 
1373 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1374 /* do_select() must return target values and target errnos. */
1375 static abi_long do_select(int n,
1376                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1377                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1378 {
1379     fd_set rfds, wfds, efds;
1380     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1381     struct timeval tv;
1382     struct timespec ts, *ts_ptr;
1383     abi_long ret;
1384 
1385     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1386     if (ret) {
1387         return ret;
1388     }
1389     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1394     if (ret) {
1395         return ret;
1396     }
1397 
1398     if (target_tv_addr) {
1399         if (copy_from_user_timeval(&tv, target_tv_addr))
1400             return -TARGET_EFAULT;
1401         ts.tv_sec = tv.tv_sec;
1402         ts.tv_nsec = tv.tv_usec * 1000;
1403         ts_ptr = &ts;
1404     } else {
1405         ts_ptr = NULL;
1406     }
1407 
1408     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1409                                   ts_ptr, NULL));
1410 
1411     if (!is_error(ret)) {
1412         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1413             return -TARGET_EFAULT;
1414         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1415             return -TARGET_EFAULT;
1416         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1417             return -TARGET_EFAULT;
1418 
1419         if (target_tv_addr) {
1420             tv.tv_sec = ts.tv_sec;
1421             tv.tv_usec = ts.tv_nsec / 1000;
1422             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1423                 return -TARGET_EFAULT;
1424             }
1425         }
1426     }
1427 
1428     return ret;
1429 }
1430 
1431 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1432 static abi_long do_old_select(abi_ulong arg1)
1433 {
1434     struct target_sel_arg_struct *sel;
1435     abi_ulong inp, outp, exp, tvp;
1436     long nsel;
1437 
1438     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1439         return -TARGET_EFAULT;
1440     }
1441 
1442     nsel = tswapal(sel->n);
1443     inp = tswapal(sel->inp);
1444     outp = tswapal(sel->outp);
1445     exp = tswapal(sel->exp);
1446     tvp = tswapal(sel->tvp);
1447 
1448     unlock_user_struct(sel, arg1, 0);
1449 
1450     return do_select(nsel, inp, outp, exp, tvp);
1451 }
1452 #endif
1453 #endif
1454 
1455 static abi_long do_pipe2(int host_pipe[], int flags)
1456 {
1457 #ifdef CONFIG_PIPE2
1458     return pipe2(host_pipe, flags);
1459 #else
1460     return -ENOSYS;
1461 #endif
1462 }
1463 
1464 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1465                         int flags, int is_pipe2)
1466 {
1467     int host_pipe[2];
1468     abi_long ret;
1469     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1470 
1471     if (is_error(ret))
1472         return get_errno(ret);
1473 
1474     /* Several targets have special calling conventions for the original
1475        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1476     if (!is_pipe2) {
1477 #if defined(TARGET_ALPHA)
1478         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1479         return host_pipe[0];
1480 #elif defined(TARGET_MIPS)
1481         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1482         return host_pipe[0];
1483 #elif defined(TARGET_SH4)
1484         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1485         return host_pipe[0];
1486 #elif defined(TARGET_SPARC)
1487         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1488         return host_pipe[0];
1489 #endif
1490     }
1491 
1492     if (put_user_s32(host_pipe[0], pipedes)
1493         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1494         return -TARGET_EFAULT;
1495     return get_errno(ret);
1496 }
1497 
1498 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1499                                               abi_ulong target_addr,
1500                                               socklen_t len)
1501 {
1502     struct target_ip_mreqn *target_smreqn;
1503 
1504     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1505     if (!target_smreqn)
1506         return -TARGET_EFAULT;
1507     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1508     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1509     if (len == sizeof(struct target_ip_mreqn))
1510         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1511     unlock_user(target_smreqn, target_addr, 0);
1512 
1513     return 0;
1514 }
1515 
1516 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1517                                                abi_ulong target_addr,
1518                                                socklen_t len)
1519 {
1520     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1521     sa_family_t sa_family;
1522     struct target_sockaddr *target_saddr;
1523 
1524     if (fd_trans_target_to_host_addr(fd)) {
1525         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1526     }
1527 
1528     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1529     if (!target_saddr)
1530         return -TARGET_EFAULT;
1531 
1532     sa_family = tswap16(target_saddr->sa_family);
1533 
1534     /* Oops. The caller might send a incomplete sun_path; sun_path
1535      * must be terminated by \0 (see the manual page), but
1536      * unfortunately it is quite common to specify sockaddr_un
1537      * length as "strlen(x->sun_path)" while it should be
1538      * "strlen(...) + 1". We'll fix that here if needed.
1539      * Linux kernel has a similar feature.
1540      */
1541 
1542     if (sa_family == AF_UNIX) {
1543         if (len < unix_maxlen && len > 0) {
1544             char *cp = (char*)target_saddr;
1545 
1546             if ( cp[len-1] && !cp[len] )
1547                 len++;
1548         }
1549         if (len > unix_maxlen)
1550             len = unix_maxlen;
1551     }
1552 
1553     memcpy(addr, target_saddr, len);
1554     addr->sa_family = sa_family;
1555     if (sa_family == AF_NETLINK) {
1556         struct sockaddr_nl *nladdr;
1557 
1558         nladdr = (struct sockaddr_nl *)addr;
1559         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1560         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1561     } else if (sa_family == AF_PACKET) {
1562 	struct target_sockaddr_ll *lladdr;
1563 
1564 	lladdr = (struct target_sockaddr_ll *)addr;
1565 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1566 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1567     }
1568     unlock_user(target_saddr, target_addr, 0);
1569 
1570     return 0;
1571 }
1572 
1573 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1574                                                struct sockaddr *addr,
1575                                                socklen_t len)
1576 {
1577     struct target_sockaddr *target_saddr;
1578 
1579     if (len == 0) {
1580         return 0;
1581     }
1582     assert(addr);
1583 
1584     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1585     if (!target_saddr)
1586         return -TARGET_EFAULT;
1587     memcpy(target_saddr, addr, len);
1588     if (len >= offsetof(struct target_sockaddr, sa_family) +
1589         sizeof(target_saddr->sa_family)) {
1590         target_saddr->sa_family = tswap16(addr->sa_family);
1591     }
1592     if (addr->sa_family == AF_NETLINK &&
1593         len >= sizeof(struct target_sockaddr_nl)) {
1594         struct target_sockaddr_nl *target_nl =
1595                (struct target_sockaddr_nl *)target_saddr;
1596         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1597         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1598     } else if (addr->sa_family == AF_PACKET) {
1599         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1600         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1601         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1602     } else if (addr->sa_family == AF_INET6 &&
1603                len >= sizeof(struct target_sockaddr_in6)) {
1604         struct target_sockaddr_in6 *target_in6 =
1605                (struct target_sockaddr_in6 *)target_saddr;
1606         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1607     }
1608     unlock_user(target_saddr, target_addr, len);
1609 
1610     return 0;
1611 }
1612 
1613 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1614                                            struct target_msghdr *target_msgh)
1615 {
1616     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1617     abi_long msg_controllen;
1618     abi_ulong target_cmsg_addr;
1619     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1620     socklen_t space = 0;
1621 
1622     msg_controllen = tswapal(target_msgh->msg_controllen);
1623     if (msg_controllen < sizeof (struct target_cmsghdr))
1624         goto the_end;
1625     target_cmsg_addr = tswapal(target_msgh->msg_control);
1626     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1627     target_cmsg_start = target_cmsg;
1628     if (!target_cmsg)
1629         return -TARGET_EFAULT;
1630 
1631     while (cmsg && target_cmsg) {
1632         void *data = CMSG_DATA(cmsg);
1633         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1634 
1635         int len = tswapal(target_cmsg->cmsg_len)
1636             - sizeof(struct target_cmsghdr);
1637 
1638         space += CMSG_SPACE(len);
1639         if (space > msgh->msg_controllen) {
1640             space -= CMSG_SPACE(len);
1641             /* This is a QEMU bug, since we allocated the payload
1642              * area ourselves (unlike overflow in host-to-target
1643              * conversion, which is just the guest giving us a buffer
1644              * that's too small). It can't happen for the payload types
1645              * we currently support; if it becomes an issue in future
1646              * we would need to improve our allocation strategy to
1647              * something more intelligent than "twice the size of the
1648              * target buffer we're reading from".
1649              */
1650             qemu_log_mask(LOG_UNIMP,
1651                           ("Unsupported ancillary data %d/%d: "
1652                            "unhandled msg size\n"),
1653                           tswap32(target_cmsg->cmsg_level),
1654                           tswap32(target_cmsg->cmsg_type));
1655             break;
1656         }
1657 
1658         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1659             cmsg->cmsg_level = SOL_SOCKET;
1660         } else {
1661             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1662         }
1663         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1664         cmsg->cmsg_len = CMSG_LEN(len);
1665 
1666         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1667             int *fd = (int *)data;
1668             int *target_fd = (int *)target_data;
1669             int i, numfds = len / sizeof(int);
1670 
1671             for (i = 0; i < numfds; i++) {
1672                 __get_user(fd[i], target_fd + i);
1673             }
1674         } else if (cmsg->cmsg_level == SOL_SOCKET
1675                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1676             struct ucred *cred = (struct ucred *)data;
1677             struct target_ucred *target_cred =
1678                 (struct target_ucred *)target_data;
1679 
1680             __get_user(cred->pid, &target_cred->pid);
1681             __get_user(cred->uid, &target_cred->uid);
1682             __get_user(cred->gid, &target_cred->gid);
1683         } else {
1684             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1685                           cmsg->cmsg_level, cmsg->cmsg_type);
1686             memcpy(data, target_data, len);
1687         }
1688 
1689         cmsg = CMSG_NXTHDR(msgh, cmsg);
1690         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1691                                          target_cmsg_start);
1692     }
1693     unlock_user(target_cmsg, target_cmsg_addr, 0);
1694  the_end:
1695     msgh->msg_controllen = space;
1696     return 0;
1697 }
1698 
1699 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1700                                            struct msghdr *msgh)
1701 {
1702     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1703     abi_long msg_controllen;
1704     abi_ulong target_cmsg_addr;
1705     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1706     socklen_t space = 0;
1707 
1708     msg_controllen = tswapal(target_msgh->msg_controllen);
1709     if (msg_controllen < sizeof (struct target_cmsghdr))
1710         goto the_end;
1711     target_cmsg_addr = tswapal(target_msgh->msg_control);
1712     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1713     target_cmsg_start = target_cmsg;
1714     if (!target_cmsg)
1715         return -TARGET_EFAULT;
1716 
1717     while (cmsg && target_cmsg) {
1718         void *data = CMSG_DATA(cmsg);
1719         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1720 
1721         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1722         int tgt_len, tgt_space;
1723 
1724         /* We never copy a half-header but may copy half-data;
1725          * this is Linux's behaviour in put_cmsg(). Note that
1726          * truncation here is a guest problem (which we report
1727          * to the guest via the CTRUNC bit), unlike truncation
1728          * in target_to_host_cmsg, which is a QEMU bug.
1729          */
1730         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1731             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1732             break;
1733         }
1734 
1735         if (cmsg->cmsg_level == SOL_SOCKET) {
1736             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1737         } else {
1738             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1739         }
1740         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1741 
1742         /* Payload types which need a different size of payload on
1743          * the target must adjust tgt_len here.
1744          */
1745         tgt_len = len;
1746         switch (cmsg->cmsg_level) {
1747         case SOL_SOCKET:
1748             switch (cmsg->cmsg_type) {
1749             case SO_TIMESTAMP:
1750                 tgt_len = sizeof(struct target_timeval);
1751                 break;
1752             default:
1753                 break;
1754             }
1755             break;
1756         default:
1757             break;
1758         }
1759 
1760         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1761             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1762             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1763         }
1764 
1765         /* We must now copy-and-convert len bytes of payload
1766          * into tgt_len bytes of destination space. Bear in mind
1767          * that in both source and destination we may be dealing
1768          * with a truncated value!
1769          */
1770         switch (cmsg->cmsg_level) {
1771         case SOL_SOCKET:
1772             switch (cmsg->cmsg_type) {
1773             case SCM_RIGHTS:
1774             {
1775                 int *fd = (int *)data;
1776                 int *target_fd = (int *)target_data;
1777                 int i, numfds = tgt_len / sizeof(int);
1778 
1779                 for (i = 0; i < numfds; i++) {
1780                     __put_user(fd[i], target_fd + i);
1781                 }
1782                 break;
1783             }
1784             case SO_TIMESTAMP:
1785             {
1786                 struct timeval *tv = (struct timeval *)data;
1787                 struct target_timeval *target_tv =
1788                     (struct target_timeval *)target_data;
1789 
1790                 if (len != sizeof(struct timeval) ||
1791                     tgt_len != sizeof(struct target_timeval)) {
1792                     goto unimplemented;
1793                 }
1794 
1795                 /* copy struct timeval to target */
1796                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1797                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1798                 break;
1799             }
1800             case SCM_CREDENTIALS:
1801             {
1802                 struct ucred *cred = (struct ucred *)data;
1803                 struct target_ucred *target_cred =
1804                     (struct target_ucred *)target_data;
1805 
1806                 __put_user(cred->pid, &target_cred->pid);
1807                 __put_user(cred->uid, &target_cred->uid);
1808                 __put_user(cred->gid, &target_cred->gid);
1809                 break;
1810             }
1811             default:
1812                 goto unimplemented;
1813             }
1814             break;
1815 
1816         case SOL_IP:
1817             switch (cmsg->cmsg_type) {
1818             case IP_TTL:
1819             {
1820                 uint32_t *v = (uint32_t *)data;
1821                 uint32_t *t_int = (uint32_t *)target_data;
1822 
1823                 if (len != sizeof(uint32_t) ||
1824                     tgt_len != sizeof(uint32_t)) {
1825                     goto unimplemented;
1826                 }
1827                 __put_user(*v, t_int);
1828                 break;
1829             }
1830             case IP_RECVERR:
1831             {
1832                 struct errhdr_t {
1833                    struct sock_extended_err ee;
1834                    struct sockaddr_in offender;
1835                 };
1836                 struct errhdr_t *errh = (struct errhdr_t *)data;
1837                 struct errhdr_t *target_errh =
1838                     (struct errhdr_t *)target_data;
1839 
1840                 if (len != sizeof(struct errhdr_t) ||
1841                     tgt_len != sizeof(struct errhdr_t)) {
1842                     goto unimplemented;
1843                 }
1844                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1845                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1846                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1847                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1848                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1849                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1850                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1851                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1852                     (void *) &errh->offender, sizeof(errh->offender));
1853                 break;
1854             }
1855             default:
1856                 goto unimplemented;
1857             }
1858             break;
1859 
1860         case SOL_IPV6:
1861             switch (cmsg->cmsg_type) {
1862             case IPV6_HOPLIMIT:
1863             {
1864                 uint32_t *v = (uint32_t *)data;
1865                 uint32_t *t_int = (uint32_t *)target_data;
1866 
1867                 if (len != sizeof(uint32_t) ||
1868                     tgt_len != sizeof(uint32_t)) {
1869                     goto unimplemented;
1870                 }
1871                 __put_user(*v, t_int);
1872                 break;
1873             }
1874             case IPV6_RECVERR:
1875             {
1876                 struct errhdr6_t {
1877                    struct sock_extended_err ee;
1878                    struct sockaddr_in6 offender;
1879                 };
1880                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1881                 struct errhdr6_t *target_errh =
1882                     (struct errhdr6_t *)target_data;
1883 
1884                 if (len != sizeof(struct errhdr6_t) ||
1885                     tgt_len != sizeof(struct errhdr6_t)) {
1886                     goto unimplemented;
1887                 }
1888                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1889                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1890                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1891                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1892                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1893                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1894                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1895                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1896                     (void *) &errh->offender, sizeof(errh->offender));
1897                 break;
1898             }
1899             default:
1900                 goto unimplemented;
1901             }
1902             break;
1903 
1904         default:
1905         unimplemented:
1906             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1907                           cmsg->cmsg_level, cmsg->cmsg_type);
1908             memcpy(target_data, data, MIN(len, tgt_len));
1909             if (tgt_len > len) {
1910                 memset(target_data + len, 0, tgt_len - len);
1911             }
1912         }
1913 
1914         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1915         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1916         if (msg_controllen < tgt_space) {
1917             tgt_space = msg_controllen;
1918         }
1919         msg_controllen -= tgt_space;
1920         space += tgt_space;
1921         cmsg = CMSG_NXTHDR(msgh, cmsg);
1922         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1923                                          target_cmsg_start);
1924     }
1925     unlock_user(target_cmsg, target_cmsg_addr, space);
1926  the_end:
1927     target_msgh->msg_controllen = tswapal(space);
1928     return 0;
1929 }
1930 
1931 /* do_setsockopt() Must return target values and target errnos. */
1932 static abi_long do_setsockopt(int sockfd, int level, int optname,
1933                               abi_ulong optval_addr, socklen_t optlen)
1934 {
1935     abi_long ret;
1936     int val;
1937     struct ip_mreqn *ip_mreq;
1938     struct ip_mreq_source *ip_mreq_source;
1939 
1940     switch(level) {
1941     case SOL_TCP:
1942         /* TCP options all take an 'int' value.  */
1943         if (optlen < sizeof(uint32_t))
1944             return -TARGET_EINVAL;
1945 
1946         if (get_user_u32(val, optval_addr))
1947             return -TARGET_EFAULT;
1948         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1949         break;
1950     case SOL_IP:
1951         switch(optname) {
1952         case IP_TOS:
1953         case IP_TTL:
1954         case IP_HDRINCL:
1955         case IP_ROUTER_ALERT:
1956         case IP_RECVOPTS:
1957         case IP_RETOPTS:
1958         case IP_PKTINFO:
1959         case IP_MTU_DISCOVER:
1960         case IP_RECVERR:
1961         case IP_RECVTTL:
1962         case IP_RECVTOS:
1963 #ifdef IP_FREEBIND
1964         case IP_FREEBIND:
1965 #endif
1966         case IP_MULTICAST_TTL:
1967         case IP_MULTICAST_LOOP:
1968             val = 0;
1969             if (optlen >= sizeof(uint32_t)) {
1970                 if (get_user_u32(val, optval_addr))
1971                     return -TARGET_EFAULT;
1972             } else if (optlen >= 1) {
1973                 if (get_user_u8(val, optval_addr))
1974                     return -TARGET_EFAULT;
1975             }
1976             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1977             break;
1978         case IP_ADD_MEMBERSHIP:
1979         case IP_DROP_MEMBERSHIP:
1980             if (optlen < sizeof (struct target_ip_mreq) ||
1981                 optlen > sizeof (struct target_ip_mreqn))
1982                 return -TARGET_EINVAL;
1983 
1984             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1985             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1986             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1987             break;
1988 
1989         case IP_BLOCK_SOURCE:
1990         case IP_UNBLOCK_SOURCE:
1991         case IP_ADD_SOURCE_MEMBERSHIP:
1992         case IP_DROP_SOURCE_MEMBERSHIP:
1993             if (optlen != sizeof (struct target_ip_mreq_source))
1994                 return -TARGET_EINVAL;
1995 
1996             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1997             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1998             unlock_user (ip_mreq_source, optval_addr, 0);
1999             break;
2000 
2001         default:
2002             goto unimplemented;
2003         }
2004         break;
2005     case SOL_IPV6:
2006         switch (optname) {
2007         case IPV6_MTU_DISCOVER:
2008         case IPV6_MTU:
2009         case IPV6_V6ONLY:
2010         case IPV6_RECVPKTINFO:
2011         case IPV6_UNICAST_HOPS:
2012         case IPV6_MULTICAST_HOPS:
2013         case IPV6_MULTICAST_LOOP:
2014         case IPV6_RECVERR:
2015         case IPV6_RECVHOPLIMIT:
2016         case IPV6_2292HOPLIMIT:
2017         case IPV6_CHECKSUM:
2018         case IPV6_ADDRFORM:
2019         case IPV6_2292PKTINFO:
2020         case IPV6_RECVTCLASS:
2021         case IPV6_RECVRTHDR:
2022         case IPV6_2292RTHDR:
2023         case IPV6_RECVHOPOPTS:
2024         case IPV6_2292HOPOPTS:
2025         case IPV6_RECVDSTOPTS:
2026         case IPV6_2292DSTOPTS:
2027         case IPV6_TCLASS:
2028 #ifdef IPV6_RECVPATHMTU
2029         case IPV6_RECVPATHMTU:
2030 #endif
2031 #ifdef IPV6_TRANSPARENT
2032         case IPV6_TRANSPARENT:
2033 #endif
2034 #ifdef IPV6_FREEBIND
2035         case IPV6_FREEBIND:
2036 #endif
2037 #ifdef IPV6_RECVORIGDSTADDR
2038         case IPV6_RECVORIGDSTADDR:
2039 #endif
2040             val = 0;
2041             if (optlen < sizeof(uint32_t)) {
2042                 return -TARGET_EINVAL;
2043             }
2044             if (get_user_u32(val, optval_addr)) {
2045                 return -TARGET_EFAULT;
2046             }
2047             ret = get_errno(setsockopt(sockfd, level, optname,
2048                                        &val, sizeof(val)));
2049             break;
2050         case IPV6_PKTINFO:
2051         {
2052             struct in6_pktinfo pki;
2053 
2054             if (optlen < sizeof(pki)) {
2055                 return -TARGET_EINVAL;
2056             }
2057 
2058             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2059                 return -TARGET_EFAULT;
2060             }
2061 
2062             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2063 
2064             ret = get_errno(setsockopt(sockfd, level, optname,
2065                                        &pki, sizeof(pki)));
2066             break;
2067         }
2068         case IPV6_ADD_MEMBERSHIP:
2069         case IPV6_DROP_MEMBERSHIP:
2070         {
2071             struct ipv6_mreq ipv6mreq;
2072 
2073             if (optlen < sizeof(ipv6mreq)) {
2074                 return -TARGET_EINVAL;
2075             }
2076 
2077             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2078                 return -TARGET_EFAULT;
2079             }
2080 
2081             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2082 
2083             ret = get_errno(setsockopt(sockfd, level, optname,
2084                                        &ipv6mreq, sizeof(ipv6mreq)));
2085             break;
2086         }
2087         default:
2088             goto unimplemented;
2089         }
2090         break;
2091     case SOL_ICMPV6:
2092         switch (optname) {
2093         case ICMPV6_FILTER:
2094         {
2095             struct icmp6_filter icmp6f;
2096 
2097             if (optlen > sizeof(icmp6f)) {
2098                 optlen = sizeof(icmp6f);
2099             }
2100 
2101             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2102                 return -TARGET_EFAULT;
2103             }
2104 
2105             for (val = 0; val < 8; val++) {
2106                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2107             }
2108 
2109             ret = get_errno(setsockopt(sockfd, level, optname,
2110                                        &icmp6f, optlen));
2111             break;
2112         }
2113         default:
2114             goto unimplemented;
2115         }
2116         break;
2117     case SOL_RAW:
2118         switch (optname) {
2119         case ICMP_FILTER:
2120         case IPV6_CHECKSUM:
2121             /* those take an u32 value */
2122             if (optlen < sizeof(uint32_t)) {
2123                 return -TARGET_EINVAL;
2124             }
2125 
2126             if (get_user_u32(val, optval_addr)) {
2127                 return -TARGET_EFAULT;
2128             }
2129             ret = get_errno(setsockopt(sockfd, level, optname,
2130                                        &val, sizeof(val)));
2131             break;
2132 
2133         default:
2134             goto unimplemented;
2135         }
2136         break;
2137 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2138     case SOL_ALG:
2139         switch (optname) {
2140         case ALG_SET_KEY:
2141         {
2142             char *alg_key = g_malloc(optlen);
2143 
2144             if (!alg_key) {
2145                 return -TARGET_ENOMEM;
2146             }
2147             if (copy_from_user(alg_key, optval_addr, optlen)) {
2148                 g_free(alg_key);
2149                 return -TARGET_EFAULT;
2150             }
2151             ret = get_errno(setsockopt(sockfd, level, optname,
2152                                        alg_key, optlen));
2153             g_free(alg_key);
2154             break;
2155         }
2156         case ALG_SET_AEAD_AUTHSIZE:
2157         {
2158             ret = get_errno(setsockopt(sockfd, level, optname,
2159                                        NULL, optlen));
2160             break;
2161         }
2162         default:
2163             goto unimplemented;
2164         }
2165         break;
2166 #endif
2167     case TARGET_SOL_SOCKET:
2168         switch (optname) {
2169         case TARGET_SO_RCVTIMEO:
2170         {
2171                 struct timeval tv;
2172 
2173                 optname = SO_RCVTIMEO;
2174 
2175 set_timeout:
2176                 if (optlen != sizeof(struct target_timeval)) {
2177                     return -TARGET_EINVAL;
2178                 }
2179 
2180                 if (copy_from_user_timeval(&tv, optval_addr)) {
2181                     return -TARGET_EFAULT;
2182                 }
2183 
2184                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2185                                 &tv, sizeof(tv)));
2186                 return ret;
2187         }
2188         case TARGET_SO_SNDTIMEO:
2189                 optname = SO_SNDTIMEO;
2190                 goto set_timeout;
2191         case TARGET_SO_ATTACH_FILTER:
2192         {
2193                 struct target_sock_fprog *tfprog;
2194                 struct target_sock_filter *tfilter;
2195                 struct sock_fprog fprog;
2196                 struct sock_filter *filter;
2197                 int i;
2198 
2199                 if (optlen != sizeof(*tfprog)) {
2200                     return -TARGET_EINVAL;
2201                 }
2202                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2203                     return -TARGET_EFAULT;
2204                 }
2205                 if (!lock_user_struct(VERIFY_READ, tfilter,
2206                                       tswapal(tfprog->filter), 0)) {
2207                     unlock_user_struct(tfprog, optval_addr, 1);
2208                     return -TARGET_EFAULT;
2209                 }
2210 
2211                 fprog.len = tswap16(tfprog->len);
2212                 filter = g_try_new(struct sock_filter, fprog.len);
2213                 if (filter == NULL) {
2214                     unlock_user_struct(tfilter, tfprog->filter, 1);
2215                     unlock_user_struct(tfprog, optval_addr, 1);
2216                     return -TARGET_ENOMEM;
2217                 }
2218                 for (i = 0; i < fprog.len; i++) {
2219                     filter[i].code = tswap16(tfilter[i].code);
2220                     filter[i].jt = tfilter[i].jt;
2221                     filter[i].jf = tfilter[i].jf;
2222                     filter[i].k = tswap32(tfilter[i].k);
2223                 }
2224                 fprog.filter = filter;
2225 
2226                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2227                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2228                 g_free(filter);
2229 
2230                 unlock_user_struct(tfilter, tfprog->filter, 1);
2231                 unlock_user_struct(tfprog, optval_addr, 1);
2232                 return ret;
2233         }
2234 	case TARGET_SO_BINDTODEVICE:
2235 	{
2236 		char *dev_ifname, *addr_ifname;
2237 
2238 		if (optlen > IFNAMSIZ - 1) {
2239 		    optlen = IFNAMSIZ - 1;
2240 		}
2241 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2242 		if (!dev_ifname) {
2243 		    return -TARGET_EFAULT;
2244 		}
2245 		optname = SO_BINDTODEVICE;
2246 		addr_ifname = alloca(IFNAMSIZ);
2247 		memcpy(addr_ifname, dev_ifname, optlen);
2248 		addr_ifname[optlen] = 0;
2249 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2250                                            addr_ifname, optlen));
2251 		unlock_user (dev_ifname, optval_addr, 0);
2252 		return ret;
2253 	}
2254         case TARGET_SO_LINGER:
2255         {
2256                 struct linger lg;
2257                 struct target_linger *tlg;
2258 
2259                 if (optlen != sizeof(struct target_linger)) {
2260                     return -TARGET_EINVAL;
2261                 }
2262                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2263                     return -TARGET_EFAULT;
2264                 }
2265                 __get_user(lg.l_onoff, &tlg->l_onoff);
2266                 __get_user(lg.l_linger, &tlg->l_linger);
2267                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2268                                 &lg, sizeof(lg)));
2269                 unlock_user_struct(tlg, optval_addr, 0);
2270                 return ret;
2271         }
2272             /* Options with 'int' argument.  */
2273         case TARGET_SO_DEBUG:
2274 		optname = SO_DEBUG;
2275 		break;
2276         case TARGET_SO_REUSEADDR:
2277 		optname = SO_REUSEADDR;
2278 		break;
2279 #ifdef SO_REUSEPORT
2280         case TARGET_SO_REUSEPORT:
2281                 optname = SO_REUSEPORT;
2282                 break;
2283 #endif
2284         case TARGET_SO_TYPE:
2285 		optname = SO_TYPE;
2286 		break;
2287         case TARGET_SO_ERROR:
2288 		optname = SO_ERROR;
2289 		break;
2290         case TARGET_SO_DONTROUTE:
2291 		optname = SO_DONTROUTE;
2292 		break;
2293         case TARGET_SO_BROADCAST:
2294 		optname = SO_BROADCAST;
2295 		break;
2296         case TARGET_SO_SNDBUF:
2297 		optname = SO_SNDBUF;
2298 		break;
2299         case TARGET_SO_SNDBUFFORCE:
2300                 optname = SO_SNDBUFFORCE;
2301                 break;
2302         case TARGET_SO_RCVBUF:
2303 		optname = SO_RCVBUF;
2304 		break;
2305         case TARGET_SO_RCVBUFFORCE:
2306                 optname = SO_RCVBUFFORCE;
2307                 break;
2308         case TARGET_SO_KEEPALIVE:
2309 		optname = SO_KEEPALIVE;
2310 		break;
2311         case TARGET_SO_OOBINLINE:
2312 		optname = SO_OOBINLINE;
2313 		break;
2314         case TARGET_SO_NO_CHECK:
2315 		optname = SO_NO_CHECK;
2316 		break;
2317         case TARGET_SO_PRIORITY:
2318 		optname = SO_PRIORITY;
2319 		break;
2320 #ifdef SO_BSDCOMPAT
2321         case TARGET_SO_BSDCOMPAT:
2322 		optname = SO_BSDCOMPAT;
2323 		break;
2324 #endif
2325         case TARGET_SO_PASSCRED:
2326 		optname = SO_PASSCRED;
2327 		break;
2328         case TARGET_SO_PASSSEC:
2329                 optname = SO_PASSSEC;
2330                 break;
2331         case TARGET_SO_TIMESTAMP:
2332 		optname = SO_TIMESTAMP;
2333 		break;
2334         case TARGET_SO_RCVLOWAT:
2335 		optname = SO_RCVLOWAT;
2336 		break;
2337         default:
2338             goto unimplemented;
2339         }
2340 	if (optlen < sizeof(uint32_t))
2341             return -TARGET_EINVAL;
2342 
2343 	if (get_user_u32(val, optval_addr))
2344             return -TARGET_EFAULT;
2345 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2346         break;
2347 #ifdef SOL_NETLINK
2348     case SOL_NETLINK:
2349         switch (optname) {
2350         case NETLINK_PKTINFO:
2351         case NETLINK_ADD_MEMBERSHIP:
2352         case NETLINK_DROP_MEMBERSHIP:
2353         case NETLINK_BROADCAST_ERROR:
2354         case NETLINK_NO_ENOBUFS:
2355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2356         case NETLINK_LISTEN_ALL_NSID:
2357         case NETLINK_CAP_ACK:
2358 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2360         case NETLINK_EXT_ACK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2363         case NETLINK_GET_STRICT_CHK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365             break;
2366         default:
2367             goto unimplemented;
2368         }
2369         val = 0;
2370         if (optlen < sizeof(uint32_t)) {
2371             return -TARGET_EINVAL;
2372         }
2373         if (get_user_u32(val, optval_addr)) {
2374             return -TARGET_EFAULT;
2375         }
2376         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2377                                    sizeof(val)));
2378         break;
2379 #endif /* SOL_NETLINK */
2380     default:
2381     unimplemented:
2382         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2383                       level, optname);
2384         ret = -TARGET_ENOPROTOOPT;
2385     }
2386     return ret;
2387 }
2388 
2389 /* do_getsockopt() Must return target values and target errnos. */
2390 static abi_long do_getsockopt(int sockfd, int level, int optname,
2391                               abi_ulong optval_addr, abi_ulong optlen)
2392 {
2393     abi_long ret;
2394     int len, val;
2395     socklen_t lv;
2396 
2397     switch(level) {
2398     case TARGET_SOL_SOCKET:
2399         level = SOL_SOCKET;
2400         switch (optname) {
2401         /* These don't just return a single integer */
2402         case TARGET_SO_PEERNAME:
2403             goto unimplemented;
2404         case TARGET_SO_RCVTIMEO: {
2405             struct timeval tv;
2406             socklen_t tvlen;
2407 
2408             optname = SO_RCVTIMEO;
2409 
2410 get_timeout:
2411             if (get_user_u32(len, optlen)) {
2412                 return -TARGET_EFAULT;
2413             }
2414             if (len < 0) {
2415                 return -TARGET_EINVAL;
2416             }
2417 
2418             tvlen = sizeof(tv);
2419             ret = get_errno(getsockopt(sockfd, level, optname,
2420                                        &tv, &tvlen));
2421             if (ret < 0) {
2422                 return ret;
2423             }
2424             if (len > sizeof(struct target_timeval)) {
2425                 len = sizeof(struct target_timeval);
2426             }
2427             if (copy_to_user_timeval(optval_addr, &tv)) {
2428                 return -TARGET_EFAULT;
2429             }
2430             if (put_user_u32(len, optlen)) {
2431                 return -TARGET_EFAULT;
2432             }
2433             break;
2434         }
2435         case TARGET_SO_SNDTIMEO:
2436             optname = SO_SNDTIMEO;
2437             goto get_timeout;
2438         case TARGET_SO_PEERCRED: {
2439             struct ucred cr;
2440             socklen_t crlen;
2441             struct target_ucred *tcr;
2442 
2443             if (get_user_u32(len, optlen)) {
2444                 return -TARGET_EFAULT;
2445             }
2446             if (len < 0) {
2447                 return -TARGET_EINVAL;
2448             }
2449 
2450             crlen = sizeof(cr);
2451             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2452                                        &cr, &crlen));
2453             if (ret < 0) {
2454                 return ret;
2455             }
2456             if (len > crlen) {
2457                 len = crlen;
2458             }
2459             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2460                 return -TARGET_EFAULT;
2461             }
2462             __put_user(cr.pid, &tcr->pid);
2463             __put_user(cr.uid, &tcr->uid);
2464             __put_user(cr.gid, &tcr->gid);
2465             unlock_user_struct(tcr, optval_addr, 1);
2466             if (put_user_u32(len, optlen)) {
2467                 return -TARGET_EFAULT;
2468             }
2469             break;
2470         }
2471         case TARGET_SO_PEERSEC: {
2472             char *name;
2473 
2474             if (get_user_u32(len, optlen)) {
2475                 return -TARGET_EFAULT;
2476             }
2477             if (len < 0) {
2478                 return -TARGET_EINVAL;
2479             }
2480             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2481             if (!name) {
2482                 return -TARGET_EFAULT;
2483             }
2484             lv = len;
2485             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2486                                        name, &lv));
2487             if (put_user_u32(lv, optlen)) {
2488                 ret = -TARGET_EFAULT;
2489             }
2490             unlock_user(name, optval_addr, lv);
2491             break;
2492         }
2493         case TARGET_SO_LINGER:
2494         {
2495             struct linger lg;
2496             socklen_t lglen;
2497             struct target_linger *tlg;
2498 
2499             if (get_user_u32(len, optlen)) {
2500                 return -TARGET_EFAULT;
2501             }
2502             if (len < 0) {
2503                 return -TARGET_EINVAL;
2504             }
2505 
2506             lglen = sizeof(lg);
2507             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2508                                        &lg, &lglen));
2509             if (ret < 0) {
2510                 return ret;
2511             }
2512             if (len > lglen) {
2513                 len = lglen;
2514             }
2515             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2516                 return -TARGET_EFAULT;
2517             }
2518             __put_user(lg.l_onoff, &tlg->l_onoff);
2519             __put_user(lg.l_linger, &tlg->l_linger);
2520             unlock_user_struct(tlg, optval_addr, 1);
2521             if (put_user_u32(len, optlen)) {
2522                 return -TARGET_EFAULT;
2523             }
2524             break;
2525         }
2526         /* Options with 'int' argument.  */
2527         case TARGET_SO_DEBUG:
2528             optname = SO_DEBUG;
2529             goto int_case;
2530         case TARGET_SO_REUSEADDR:
2531             optname = SO_REUSEADDR;
2532             goto int_case;
2533 #ifdef SO_REUSEPORT
2534         case TARGET_SO_REUSEPORT:
2535             optname = SO_REUSEPORT;
2536             goto int_case;
2537 #endif
2538         case TARGET_SO_TYPE:
2539             optname = SO_TYPE;
2540             goto int_case;
2541         case TARGET_SO_ERROR:
2542             optname = SO_ERROR;
2543             goto int_case;
2544         case TARGET_SO_DONTROUTE:
2545             optname = SO_DONTROUTE;
2546             goto int_case;
2547         case TARGET_SO_BROADCAST:
2548             optname = SO_BROADCAST;
2549             goto int_case;
2550         case TARGET_SO_SNDBUF:
2551             optname = SO_SNDBUF;
2552             goto int_case;
2553         case TARGET_SO_RCVBUF:
2554             optname = SO_RCVBUF;
2555             goto int_case;
2556         case TARGET_SO_KEEPALIVE:
2557             optname = SO_KEEPALIVE;
2558             goto int_case;
2559         case TARGET_SO_OOBINLINE:
2560             optname = SO_OOBINLINE;
2561             goto int_case;
2562         case TARGET_SO_NO_CHECK:
2563             optname = SO_NO_CHECK;
2564             goto int_case;
2565         case TARGET_SO_PRIORITY:
2566             optname = SO_PRIORITY;
2567             goto int_case;
2568 #ifdef SO_BSDCOMPAT
2569         case TARGET_SO_BSDCOMPAT:
2570             optname = SO_BSDCOMPAT;
2571             goto int_case;
2572 #endif
2573         case TARGET_SO_PASSCRED:
2574             optname = SO_PASSCRED;
2575             goto int_case;
2576         case TARGET_SO_TIMESTAMP:
2577             optname = SO_TIMESTAMP;
2578             goto int_case;
2579         case TARGET_SO_RCVLOWAT:
2580             optname = SO_RCVLOWAT;
2581             goto int_case;
2582         case TARGET_SO_ACCEPTCONN:
2583             optname = SO_ACCEPTCONN;
2584             goto int_case;
2585         default:
2586             goto int_case;
2587         }
2588         break;
2589     case SOL_TCP:
2590         /* TCP options all take an 'int' value.  */
2591     int_case:
2592         if (get_user_u32(len, optlen))
2593             return -TARGET_EFAULT;
2594         if (len < 0)
2595             return -TARGET_EINVAL;
2596         lv = sizeof(lv);
2597         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2598         if (ret < 0)
2599             return ret;
2600         if (optname == SO_TYPE) {
2601             val = host_to_target_sock_type(val);
2602         }
2603         if (len > lv)
2604             len = lv;
2605         if (len == 4) {
2606             if (put_user_u32(val, optval_addr))
2607                 return -TARGET_EFAULT;
2608         } else {
2609             if (put_user_u8(val, optval_addr))
2610                 return -TARGET_EFAULT;
2611         }
2612         if (put_user_u32(len, optlen))
2613             return -TARGET_EFAULT;
2614         break;
2615     case SOL_IP:
2616         switch(optname) {
2617         case IP_TOS:
2618         case IP_TTL:
2619         case IP_HDRINCL:
2620         case IP_ROUTER_ALERT:
2621         case IP_RECVOPTS:
2622         case IP_RETOPTS:
2623         case IP_PKTINFO:
2624         case IP_MTU_DISCOVER:
2625         case IP_RECVERR:
2626         case IP_RECVTOS:
2627 #ifdef IP_FREEBIND
2628         case IP_FREEBIND:
2629 #endif
2630         case IP_MULTICAST_TTL:
2631         case IP_MULTICAST_LOOP:
2632             if (get_user_u32(len, optlen))
2633                 return -TARGET_EFAULT;
2634             if (len < 0)
2635                 return -TARGET_EINVAL;
2636             lv = sizeof(lv);
2637             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2638             if (ret < 0)
2639                 return ret;
2640             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2641                 len = 1;
2642                 if (put_user_u32(len, optlen)
2643                     || put_user_u8(val, optval_addr))
2644                     return -TARGET_EFAULT;
2645             } else {
2646                 if (len > sizeof(int))
2647                     len = sizeof(int);
2648                 if (put_user_u32(len, optlen)
2649                     || put_user_u32(val, optval_addr))
2650                     return -TARGET_EFAULT;
2651             }
2652             break;
2653         default:
2654             ret = -TARGET_ENOPROTOOPT;
2655             break;
2656         }
2657         break;
2658     case SOL_IPV6:
2659         switch (optname) {
2660         case IPV6_MTU_DISCOVER:
2661         case IPV6_MTU:
2662         case IPV6_V6ONLY:
2663         case IPV6_RECVPKTINFO:
2664         case IPV6_UNICAST_HOPS:
2665         case IPV6_MULTICAST_HOPS:
2666         case IPV6_MULTICAST_LOOP:
2667         case IPV6_RECVERR:
2668         case IPV6_RECVHOPLIMIT:
2669         case IPV6_2292HOPLIMIT:
2670         case IPV6_CHECKSUM:
2671         case IPV6_ADDRFORM:
2672         case IPV6_2292PKTINFO:
2673         case IPV6_RECVTCLASS:
2674         case IPV6_RECVRTHDR:
2675         case IPV6_2292RTHDR:
2676         case IPV6_RECVHOPOPTS:
2677         case IPV6_2292HOPOPTS:
2678         case IPV6_RECVDSTOPTS:
2679         case IPV6_2292DSTOPTS:
2680         case IPV6_TCLASS:
2681 #ifdef IPV6_RECVPATHMTU
2682         case IPV6_RECVPATHMTU:
2683 #endif
2684 #ifdef IPV6_TRANSPARENT
2685         case IPV6_TRANSPARENT:
2686 #endif
2687 #ifdef IPV6_FREEBIND
2688         case IPV6_FREEBIND:
2689 #endif
2690 #ifdef IPV6_RECVORIGDSTADDR
2691         case IPV6_RECVORIGDSTADDR:
2692 #endif
2693             if (get_user_u32(len, optlen))
2694                 return -TARGET_EFAULT;
2695             if (len < 0)
2696                 return -TARGET_EINVAL;
2697             lv = sizeof(lv);
2698             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2699             if (ret < 0)
2700                 return ret;
2701             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2702                 len = 1;
2703                 if (put_user_u32(len, optlen)
2704                     || put_user_u8(val, optval_addr))
2705                     return -TARGET_EFAULT;
2706             } else {
2707                 if (len > sizeof(int))
2708                     len = sizeof(int);
2709                 if (put_user_u32(len, optlen)
2710                     || put_user_u32(val, optval_addr))
2711                     return -TARGET_EFAULT;
2712             }
2713             break;
2714         default:
2715             ret = -TARGET_ENOPROTOOPT;
2716             break;
2717         }
2718         break;
2719 #ifdef SOL_NETLINK
2720     case SOL_NETLINK:
2721         switch (optname) {
2722         case NETLINK_PKTINFO:
2723         case NETLINK_BROADCAST_ERROR:
2724         case NETLINK_NO_ENOBUFS:
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2726         case NETLINK_LISTEN_ALL_NSID:
2727         case NETLINK_CAP_ACK:
2728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2730         case NETLINK_EXT_ACK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2733         case NETLINK_GET_STRICT_CHK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735             if (get_user_u32(len, optlen)) {
2736                 return -TARGET_EFAULT;
2737             }
2738             if (len != sizeof(val)) {
2739                 return -TARGET_EINVAL;
2740             }
2741             lv = len;
2742             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2743             if (ret < 0) {
2744                 return ret;
2745             }
2746             if (put_user_u32(lv, optlen)
2747                 || put_user_u32(val, optval_addr)) {
2748                 return -TARGET_EFAULT;
2749             }
2750             break;
2751 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2752         case NETLINK_LIST_MEMBERSHIPS:
2753         {
2754             uint32_t *results;
2755             int i;
2756             if (get_user_u32(len, optlen)) {
2757                 return -TARGET_EFAULT;
2758             }
2759             if (len < 0) {
2760                 return -TARGET_EINVAL;
2761             }
2762             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2763             if (!results) {
2764                 return -TARGET_EFAULT;
2765             }
2766             lv = len;
2767             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2768             if (ret < 0) {
2769                 unlock_user(results, optval_addr, 0);
2770                 return ret;
2771             }
2772             /* swap host endianess to target endianess. */
2773             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2774                 results[i] = tswap32(results[i]);
2775             }
2776             if (put_user_u32(lv, optlen)) {
2777                 return -TARGET_EFAULT;
2778             }
2779             unlock_user(results, optval_addr, 0);
2780             break;
2781         }
2782 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2783         default:
2784             goto unimplemented;
2785         }
2786         break;
2787 #endif /* SOL_NETLINK */
2788     default:
2789     unimplemented:
2790         qemu_log_mask(LOG_UNIMP,
2791                       "getsockopt level=%d optname=%d not yet supported\n",
2792                       level, optname);
2793         ret = -TARGET_EOPNOTSUPP;
2794         break;
2795     }
2796     return ret;
2797 }
2798 
2799 /* Convert target low/high pair representing file offset into the host
2800  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2801  * as the kernel doesn't handle them either.
2802  */
2803 static void target_to_host_low_high(abi_ulong tlow,
2804                                     abi_ulong thigh,
2805                                     unsigned long *hlow,
2806                                     unsigned long *hhigh)
2807 {
2808     uint64_t off = tlow |
2809         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2810         TARGET_LONG_BITS / 2;
2811 
2812     *hlow = off;
2813     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2814 }
2815 
2816 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2817                                 abi_ulong count, int copy)
2818 {
2819     struct target_iovec *target_vec;
2820     struct iovec *vec;
2821     abi_ulong total_len, max_len;
2822     int i;
2823     int err = 0;
2824     bool bad_address = false;
2825 
2826     if (count == 0) {
2827         errno = 0;
2828         return NULL;
2829     }
2830     if (count > IOV_MAX) {
2831         errno = EINVAL;
2832         return NULL;
2833     }
2834 
2835     vec = g_try_new0(struct iovec, count);
2836     if (vec == NULL) {
2837         errno = ENOMEM;
2838         return NULL;
2839     }
2840 
2841     target_vec = lock_user(VERIFY_READ, target_addr,
2842                            count * sizeof(struct target_iovec), 1);
2843     if (target_vec == NULL) {
2844         err = EFAULT;
2845         goto fail2;
2846     }
2847 
2848     /* ??? If host page size > target page size, this will result in a
2849        value larger than what we can actually support.  */
2850     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2851     total_len = 0;
2852 
2853     for (i = 0; i < count; i++) {
2854         abi_ulong base = tswapal(target_vec[i].iov_base);
2855         abi_long len = tswapal(target_vec[i].iov_len);
2856 
2857         if (len < 0) {
2858             err = EINVAL;
2859             goto fail;
2860         } else if (len == 0) {
2861             /* Zero length pointer is ignored.  */
2862             vec[i].iov_base = 0;
2863         } else {
2864             vec[i].iov_base = lock_user(type, base, len, copy);
2865             /* If the first buffer pointer is bad, this is a fault.  But
2866              * subsequent bad buffers will result in a partial write; this
2867              * is realized by filling the vector with null pointers and
2868              * zero lengths. */
2869             if (!vec[i].iov_base) {
2870                 if (i == 0) {
2871                     err = EFAULT;
2872                     goto fail;
2873                 } else {
2874                     bad_address = true;
2875                 }
2876             }
2877             if (bad_address) {
2878                 len = 0;
2879             }
2880             if (len > max_len - total_len) {
2881                 len = max_len - total_len;
2882             }
2883         }
2884         vec[i].iov_len = len;
2885         total_len += len;
2886     }
2887 
2888     unlock_user(target_vec, target_addr, 0);
2889     return vec;
2890 
2891  fail:
2892     while (--i >= 0) {
2893         if (tswapal(target_vec[i].iov_len) > 0) {
2894             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2895         }
2896     }
2897     unlock_user(target_vec, target_addr, 0);
2898  fail2:
2899     g_free(vec);
2900     errno = err;
2901     return NULL;
2902 }
2903 
2904 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2905                          abi_ulong count, int copy)
2906 {
2907     struct target_iovec *target_vec;
2908     int i;
2909 
2910     target_vec = lock_user(VERIFY_READ, target_addr,
2911                            count * sizeof(struct target_iovec), 1);
2912     if (target_vec) {
2913         for (i = 0; i < count; i++) {
2914             abi_ulong base = tswapal(target_vec[i].iov_base);
2915             abi_long len = tswapal(target_vec[i].iov_len);
2916             if (len < 0) {
2917                 break;
2918             }
2919             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2920         }
2921         unlock_user(target_vec, target_addr, 0);
2922     }
2923 
2924     g_free(vec);
2925 }
2926 
2927 static inline int target_to_host_sock_type(int *type)
2928 {
2929     int host_type = 0;
2930     int target_type = *type;
2931 
2932     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2933     case TARGET_SOCK_DGRAM:
2934         host_type = SOCK_DGRAM;
2935         break;
2936     case TARGET_SOCK_STREAM:
2937         host_type = SOCK_STREAM;
2938         break;
2939     default:
2940         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2941         break;
2942     }
2943     if (target_type & TARGET_SOCK_CLOEXEC) {
2944 #if defined(SOCK_CLOEXEC)
2945         host_type |= SOCK_CLOEXEC;
2946 #else
2947         return -TARGET_EINVAL;
2948 #endif
2949     }
2950     if (target_type & TARGET_SOCK_NONBLOCK) {
2951 #if defined(SOCK_NONBLOCK)
2952         host_type |= SOCK_NONBLOCK;
2953 #elif !defined(O_NONBLOCK)
2954         return -TARGET_EINVAL;
2955 #endif
2956     }
2957     *type = host_type;
2958     return 0;
2959 }
2960 
2961 /* Try to emulate socket type flags after socket creation.  */
2962 static int sock_flags_fixup(int fd, int target_type)
2963 {
2964 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2965     if (target_type & TARGET_SOCK_NONBLOCK) {
2966         int flags = fcntl(fd, F_GETFL);
2967         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2968             close(fd);
2969             return -TARGET_EINVAL;
2970         }
2971     }
2972 #endif
2973     return fd;
2974 }
2975 
2976 /* do_socket() Must return target values and target errnos. */
2977 static abi_long do_socket(int domain, int type, int protocol)
2978 {
2979     int target_type = type;
2980     int ret;
2981 
2982     ret = target_to_host_sock_type(&type);
2983     if (ret) {
2984         return ret;
2985     }
2986 
2987     if (domain == PF_NETLINK && !(
2988 #ifdef CONFIG_RTNETLINK
2989          protocol == NETLINK_ROUTE ||
2990 #endif
2991          protocol == NETLINK_KOBJECT_UEVENT ||
2992          protocol == NETLINK_AUDIT)) {
2993         return -TARGET_EPFNOSUPPORT;
2994     }
2995 
2996     if (domain == AF_PACKET ||
2997         (domain == AF_INET && type == SOCK_PACKET)) {
2998         protocol = tswap16(protocol);
2999     }
3000 
3001     ret = get_errno(socket(domain, type, protocol));
3002     if (ret >= 0) {
3003         ret = sock_flags_fixup(ret, target_type);
3004         if (type == SOCK_PACKET) {
3005             /* Manage an obsolete case :
3006              * if socket type is SOCK_PACKET, bind by name
3007              */
3008             fd_trans_register(ret, &target_packet_trans);
3009         } else if (domain == PF_NETLINK) {
3010             switch (protocol) {
3011 #ifdef CONFIG_RTNETLINK
3012             case NETLINK_ROUTE:
3013                 fd_trans_register(ret, &target_netlink_route_trans);
3014                 break;
3015 #endif
3016             case NETLINK_KOBJECT_UEVENT:
3017                 /* nothing to do: messages are strings */
3018                 break;
3019             case NETLINK_AUDIT:
3020                 fd_trans_register(ret, &target_netlink_audit_trans);
3021                 break;
3022             default:
3023                 g_assert_not_reached();
3024             }
3025         }
3026     }
3027     return ret;
3028 }
3029 
3030 /* do_bind() Must return target values and target errnos. */
3031 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3032                         socklen_t addrlen)
3033 {
3034     void *addr;
3035     abi_long ret;
3036 
3037     if ((int)addrlen < 0) {
3038         return -TARGET_EINVAL;
3039     }
3040 
3041     addr = alloca(addrlen+1);
3042 
3043     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3044     if (ret)
3045         return ret;
3046 
3047     return get_errno(bind(sockfd, addr, addrlen));
3048 }
3049 
3050 /* do_connect() Must return target values and target errnos. */
3051 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3052                            socklen_t addrlen)
3053 {
3054     void *addr;
3055     abi_long ret;
3056 
3057     if ((int)addrlen < 0) {
3058         return -TARGET_EINVAL;
3059     }
3060 
3061     addr = alloca(addrlen+1);
3062 
3063     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3064     if (ret)
3065         return ret;
3066 
3067     return get_errno(safe_connect(sockfd, addr, addrlen));
3068 }
3069 
3070 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3071 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3072                                       int flags, int send)
3073 {
3074     abi_long ret, len;
3075     struct msghdr msg;
3076     abi_ulong count;
3077     struct iovec *vec;
3078     abi_ulong target_vec;
3079 
3080     if (msgp->msg_name) {
3081         msg.msg_namelen = tswap32(msgp->msg_namelen);
3082         msg.msg_name = alloca(msg.msg_namelen+1);
3083         ret = target_to_host_sockaddr(fd, msg.msg_name,
3084                                       tswapal(msgp->msg_name),
3085                                       msg.msg_namelen);
3086         if (ret == -TARGET_EFAULT) {
3087             /* For connected sockets msg_name and msg_namelen must
3088              * be ignored, so returning EFAULT immediately is wrong.
3089              * Instead, pass a bad msg_name to the host kernel, and
3090              * let it decide whether to return EFAULT or not.
3091              */
3092             msg.msg_name = (void *)-1;
3093         } else if (ret) {
3094             goto out2;
3095         }
3096     } else {
3097         msg.msg_name = NULL;
3098         msg.msg_namelen = 0;
3099     }
3100     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3101     msg.msg_control = alloca(msg.msg_controllen);
3102     memset(msg.msg_control, 0, msg.msg_controllen);
3103 
3104     msg.msg_flags = tswap32(msgp->msg_flags);
3105 
3106     count = tswapal(msgp->msg_iovlen);
3107     target_vec = tswapal(msgp->msg_iov);
3108 
3109     if (count > IOV_MAX) {
3110         /* sendrcvmsg returns a different errno for this condition than
3111          * readv/writev, so we must catch it here before lock_iovec() does.
3112          */
3113         ret = -TARGET_EMSGSIZE;
3114         goto out2;
3115     }
3116 
3117     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3118                      target_vec, count, send);
3119     if (vec == NULL) {
3120         ret = -host_to_target_errno(errno);
3121         goto out2;
3122     }
3123     msg.msg_iovlen = count;
3124     msg.msg_iov = vec;
3125 
3126     if (send) {
3127         if (fd_trans_target_to_host_data(fd)) {
3128             void *host_msg;
3129 
3130             host_msg = g_malloc(msg.msg_iov->iov_len);
3131             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3132             ret = fd_trans_target_to_host_data(fd)(host_msg,
3133                                                    msg.msg_iov->iov_len);
3134             if (ret >= 0) {
3135                 msg.msg_iov->iov_base = host_msg;
3136                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3137             }
3138             g_free(host_msg);
3139         } else {
3140             ret = target_to_host_cmsg(&msg, msgp);
3141             if (ret == 0) {
3142                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3143             }
3144         }
3145     } else {
3146         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3147         if (!is_error(ret)) {
3148             len = ret;
3149             if (fd_trans_host_to_target_data(fd)) {
3150                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3151                                                MIN(msg.msg_iov->iov_len, len));
3152             } else {
3153                 ret = host_to_target_cmsg(msgp, &msg);
3154             }
3155             if (!is_error(ret)) {
3156                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3157                 msgp->msg_flags = tswap32(msg.msg_flags);
3158                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3159                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3160                                     msg.msg_name, msg.msg_namelen);
3161                     if (ret) {
3162                         goto out;
3163                     }
3164                 }
3165 
3166                 ret = len;
3167             }
3168         }
3169     }
3170 
3171 out:
3172     unlock_iovec(vec, target_vec, count, !send);
3173 out2:
3174     return ret;
3175 }
3176 
3177 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3178                                int flags, int send)
3179 {
3180     abi_long ret;
3181     struct target_msghdr *msgp;
3182 
3183     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3184                           msgp,
3185                           target_msg,
3186                           send ? 1 : 0)) {
3187         return -TARGET_EFAULT;
3188     }
3189     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3190     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3191     return ret;
3192 }
3193 
3194 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3195  * so it might not have this *mmsg-specific flag either.
3196  */
3197 #ifndef MSG_WAITFORONE
3198 #define MSG_WAITFORONE 0x10000
3199 #endif
3200 
3201 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3202                                 unsigned int vlen, unsigned int flags,
3203                                 int send)
3204 {
3205     struct target_mmsghdr *mmsgp;
3206     abi_long ret = 0;
3207     int i;
3208 
3209     if (vlen > UIO_MAXIOV) {
3210         vlen = UIO_MAXIOV;
3211     }
3212 
3213     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3214     if (!mmsgp) {
3215         return -TARGET_EFAULT;
3216     }
3217 
3218     for (i = 0; i < vlen; i++) {
3219         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3220         if (is_error(ret)) {
3221             break;
3222         }
3223         mmsgp[i].msg_len = tswap32(ret);
3224         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3225         if (flags & MSG_WAITFORONE) {
3226             flags |= MSG_DONTWAIT;
3227         }
3228     }
3229 
3230     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3231 
3232     /* Return number of datagrams sent if we sent any at all;
3233      * otherwise return the error.
3234      */
3235     if (i) {
3236         return i;
3237     }
3238     return ret;
3239 }
3240 
3241 /* do_accept4() Must return target values and target errnos. */
3242 static abi_long do_accept4(int fd, abi_ulong target_addr,
3243                            abi_ulong target_addrlen_addr, int flags)
3244 {
3245     socklen_t addrlen, ret_addrlen;
3246     void *addr;
3247     abi_long ret;
3248     int host_flags;
3249 
3250     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3251 
3252     if (target_addr == 0) {
3253         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3254     }
3255 
3256     /* linux returns EINVAL if addrlen pointer is invalid */
3257     if (get_user_u32(addrlen, target_addrlen_addr))
3258         return -TARGET_EINVAL;
3259 
3260     if ((int)addrlen < 0) {
3261         return -TARGET_EINVAL;
3262     }
3263 
3264     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3265         return -TARGET_EINVAL;
3266 
3267     addr = alloca(addrlen);
3268 
3269     ret_addrlen = addrlen;
3270     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3271     if (!is_error(ret)) {
3272         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3273         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3274             ret = -TARGET_EFAULT;
3275         }
3276     }
3277     return ret;
3278 }
3279 
3280 /* do_getpeername() Must return target values and target errnos. */
3281 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3282                                abi_ulong target_addrlen_addr)
3283 {
3284     socklen_t addrlen, ret_addrlen;
3285     void *addr;
3286     abi_long ret;
3287 
3288     if (get_user_u32(addrlen, target_addrlen_addr))
3289         return -TARGET_EFAULT;
3290 
3291     if ((int)addrlen < 0) {
3292         return -TARGET_EINVAL;
3293     }
3294 
3295     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3296         return -TARGET_EFAULT;
3297 
3298     addr = alloca(addrlen);
3299 
3300     ret_addrlen = addrlen;
3301     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3302     if (!is_error(ret)) {
3303         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3304         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3305             ret = -TARGET_EFAULT;
3306         }
3307     }
3308     return ret;
3309 }
3310 
3311 /* do_getsockname() Must return target values and target errnos. */
3312 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3313                                abi_ulong target_addrlen_addr)
3314 {
3315     socklen_t addrlen, ret_addrlen;
3316     void *addr;
3317     abi_long ret;
3318 
3319     if (get_user_u32(addrlen, target_addrlen_addr))
3320         return -TARGET_EFAULT;
3321 
3322     if ((int)addrlen < 0) {
3323         return -TARGET_EINVAL;
3324     }
3325 
3326     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3327         return -TARGET_EFAULT;
3328 
3329     addr = alloca(addrlen);
3330 
3331     ret_addrlen = addrlen;
3332     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3333     if (!is_error(ret)) {
3334         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3335         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3336             ret = -TARGET_EFAULT;
3337         }
3338     }
3339     return ret;
3340 }
3341 
3342 /* do_socketpair() Must return target values and target errnos. */
3343 static abi_long do_socketpair(int domain, int type, int protocol,
3344                               abi_ulong target_tab_addr)
3345 {
3346     int tab[2];
3347     abi_long ret;
3348 
3349     target_to_host_sock_type(&type);
3350 
3351     ret = get_errno(socketpair(domain, type, protocol, tab));
3352     if (!is_error(ret)) {
3353         if (put_user_s32(tab[0], target_tab_addr)
3354             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3355             ret = -TARGET_EFAULT;
3356     }
3357     return ret;
3358 }
3359 
3360 /* do_sendto() Must return target values and target errnos. */
3361 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3362                           abi_ulong target_addr, socklen_t addrlen)
3363 {
3364     void *addr;
3365     void *host_msg;
3366     void *copy_msg = NULL;
3367     abi_long ret;
3368 
3369     if ((int)addrlen < 0) {
3370         return -TARGET_EINVAL;
3371     }
3372 
3373     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3374     if (!host_msg)
3375         return -TARGET_EFAULT;
3376     if (fd_trans_target_to_host_data(fd)) {
3377         copy_msg = host_msg;
3378         host_msg = g_malloc(len);
3379         memcpy(host_msg, copy_msg, len);
3380         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3381         if (ret < 0) {
3382             goto fail;
3383         }
3384     }
3385     if (target_addr) {
3386         addr = alloca(addrlen+1);
3387         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3388         if (ret) {
3389             goto fail;
3390         }
3391         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3392     } else {
3393         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3394     }
3395 fail:
3396     if (copy_msg) {
3397         g_free(host_msg);
3398         host_msg = copy_msg;
3399     }
3400     unlock_user(host_msg, msg, 0);
3401     return ret;
3402 }
3403 
3404 /* do_recvfrom() Must return target values and target errnos. */
3405 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3406                             abi_ulong target_addr,
3407                             abi_ulong target_addrlen)
3408 {
3409     socklen_t addrlen, ret_addrlen;
3410     void *addr;
3411     void *host_msg;
3412     abi_long ret;
3413 
3414     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3415     if (!host_msg)
3416         return -TARGET_EFAULT;
3417     if (target_addr) {
3418         if (get_user_u32(addrlen, target_addrlen)) {
3419             ret = -TARGET_EFAULT;
3420             goto fail;
3421         }
3422         if ((int)addrlen < 0) {
3423             ret = -TARGET_EINVAL;
3424             goto fail;
3425         }
3426         addr = alloca(addrlen);
3427         ret_addrlen = addrlen;
3428         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3429                                       addr, &ret_addrlen));
3430     } else {
3431         addr = NULL; /* To keep compiler quiet.  */
3432         addrlen = 0; /* To keep compiler quiet.  */
3433         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3434     }
3435     if (!is_error(ret)) {
3436         if (fd_trans_host_to_target_data(fd)) {
3437             abi_long trans;
3438             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3439             if (is_error(trans)) {
3440                 ret = trans;
3441                 goto fail;
3442             }
3443         }
3444         if (target_addr) {
3445             host_to_target_sockaddr(target_addr, addr,
3446                                     MIN(addrlen, ret_addrlen));
3447             if (put_user_u32(ret_addrlen, target_addrlen)) {
3448                 ret = -TARGET_EFAULT;
3449                 goto fail;
3450             }
3451         }
3452         unlock_user(host_msg, msg, len);
3453     } else {
3454 fail:
3455         unlock_user(host_msg, msg, 0);
3456     }
3457     return ret;
3458 }
3459 
3460 #ifdef TARGET_NR_socketcall
3461 /* do_socketcall() must return target values and target errnos. */
3462 static abi_long do_socketcall(int num, abi_ulong vptr)
3463 {
3464     static const unsigned nargs[] = { /* number of arguments per operation */
3465         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3466         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3467         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3468         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3469         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3470         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3471         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3472         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3473         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3474         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3475         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3476         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3477         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3478         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3479         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3480         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3481         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3482         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3483         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3484         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3485     };
3486     abi_long a[6]; /* max 6 args */
3487     unsigned i;
3488 
3489     /* check the range of the first argument num */
3490     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3491     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3492         return -TARGET_EINVAL;
3493     }
3494     /* ensure we have space for args */
3495     if (nargs[num] > ARRAY_SIZE(a)) {
3496         return -TARGET_EINVAL;
3497     }
3498     /* collect the arguments in a[] according to nargs[] */
3499     for (i = 0; i < nargs[num]; ++i) {
3500         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3501             return -TARGET_EFAULT;
3502         }
3503     }
3504     /* now when we have the args, invoke the appropriate underlying function */
3505     switch (num) {
3506     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3507         return do_socket(a[0], a[1], a[2]);
3508     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3509         return do_bind(a[0], a[1], a[2]);
3510     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3511         return do_connect(a[0], a[1], a[2]);
3512     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3513         return get_errno(listen(a[0], a[1]));
3514     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3515         return do_accept4(a[0], a[1], a[2], 0);
3516     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3517         return do_getsockname(a[0], a[1], a[2]);
3518     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3519         return do_getpeername(a[0], a[1], a[2]);
3520     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3521         return do_socketpair(a[0], a[1], a[2], a[3]);
3522     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3523         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3524     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3525         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3526     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3527         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3528     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3529         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3530     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3531         return get_errno(shutdown(a[0], a[1]));
3532     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3533         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3534     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3535         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3536     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3537         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3538     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3539         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3540     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3541         return do_accept4(a[0], a[1], a[2], a[3]);
3542     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3543         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3544     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3545         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3546     default:
3547         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3548         return -TARGET_EINVAL;
3549     }
3550 }
3551 #endif
3552 
3553 #define N_SHM_REGIONS	32
3554 
3555 static struct shm_region {
3556     abi_ulong start;
3557     abi_ulong size;
3558     bool in_use;
3559 } shm_regions[N_SHM_REGIONS];
3560 
3561 #ifndef TARGET_SEMID64_DS
3562 /* asm-generic version of this struct */
3563 struct target_semid64_ds
3564 {
3565   struct target_ipc_perm sem_perm;
3566   abi_ulong sem_otime;
3567 #if TARGET_ABI_BITS == 32
3568   abi_ulong __unused1;
3569 #endif
3570   abi_ulong sem_ctime;
3571 #if TARGET_ABI_BITS == 32
3572   abi_ulong __unused2;
3573 #endif
3574   abi_ulong sem_nsems;
3575   abi_ulong __unused3;
3576   abi_ulong __unused4;
3577 };
3578 #endif
3579 
3580 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3581                                                abi_ulong target_addr)
3582 {
3583     struct target_ipc_perm *target_ip;
3584     struct target_semid64_ds *target_sd;
3585 
3586     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3587         return -TARGET_EFAULT;
3588     target_ip = &(target_sd->sem_perm);
3589     host_ip->__key = tswap32(target_ip->__key);
3590     host_ip->uid = tswap32(target_ip->uid);
3591     host_ip->gid = tswap32(target_ip->gid);
3592     host_ip->cuid = tswap32(target_ip->cuid);
3593     host_ip->cgid = tswap32(target_ip->cgid);
3594 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3595     host_ip->mode = tswap32(target_ip->mode);
3596 #else
3597     host_ip->mode = tswap16(target_ip->mode);
3598 #endif
3599 #if defined(TARGET_PPC)
3600     host_ip->__seq = tswap32(target_ip->__seq);
3601 #else
3602     host_ip->__seq = tswap16(target_ip->__seq);
3603 #endif
3604     unlock_user_struct(target_sd, target_addr, 0);
3605     return 0;
3606 }
3607 
3608 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3609                                                struct ipc_perm *host_ip)
3610 {
3611     struct target_ipc_perm *target_ip;
3612     struct target_semid64_ds *target_sd;
3613 
3614     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3615         return -TARGET_EFAULT;
3616     target_ip = &(target_sd->sem_perm);
3617     target_ip->__key = tswap32(host_ip->__key);
3618     target_ip->uid = tswap32(host_ip->uid);
3619     target_ip->gid = tswap32(host_ip->gid);
3620     target_ip->cuid = tswap32(host_ip->cuid);
3621     target_ip->cgid = tswap32(host_ip->cgid);
3622 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3623     target_ip->mode = tswap32(host_ip->mode);
3624 #else
3625     target_ip->mode = tswap16(host_ip->mode);
3626 #endif
3627 #if defined(TARGET_PPC)
3628     target_ip->__seq = tswap32(host_ip->__seq);
3629 #else
3630     target_ip->__seq = tswap16(host_ip->__seq);
3631 #endif
3632     unlock_user_struct(target_sd, target_addr, 1);
3633     return 0;
3634 }
3635 
3636 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3637                                                abi_ulong target_addr)
3638 {
3639     struct target_semid64_ds *target_sd;
3640 
3641     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3642         return -TARGET_EFAULT;
3643     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3644         return -TARGET_EFAULT;
3645     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3646     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3647     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3648     unlock_user_struct(target_sd, target_addr, 0);
3649     return 0;
3650 }
3651 
3652 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3653                                                struct semid_ds *host_sd)
3654 {
3655     struct target_semid64_ds *target_sd;
3656 
3657     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3658         return -TARGET_EFAULT;
3659     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3660         return -TARGET_EFAULT;
3661     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3662     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3663     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3664     unlock_user_struct(target_sd, target_addr, 1);
3665     return 0;
3666 }
3667 
3668 struct target_seminfo {
3669     int semmap;
3670     int semmni;
3671     int semmns;
3672     int semmnu;
3673     int semmsl;
3674     int semopm;
3675     int semume;
3676     int semusz;
3677     int semvmx;
3678     int semaem;
3679 };
3680 
3681 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3682                                               struct seminfo *host_seminfo)
3683 {
3684     struct target_seminfo *target_seminfo;
3685     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3686         return -TARGET_EFAULT;
3687     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3688     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3689     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3690     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3691     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3692     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3693     __put_user(host_seminfo->semume, &target_seminfo->semume);
3694     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3695     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3696     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3697     unlock_user_struct(target_seminfo, target_addr, 1);
3698     return 0;
3699 }
3700 
3701 union semun {
3702 	int val;
3703 	struct semid_ds *buf;
3704 	unsigned short *array;
3705 	struct seminfo *__buf;
3706 };
3707 
3708 union target_semun {
3709 	int val;
3710 	abi_ulong buf;
3711 	abi_ulong array;
3712 	abi_ulong __buf;
3713 };
3714 
3715 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3716                                                abi_ulong target_addr)
3717 {
3718     int nsems;
3719     unsigned short *array;
3720     union semun semun;
3721     struct semid_ds semid_ds;
3722     int i, ret;
3723 
3724     semun.buf = &semid_ds;
3725 
3726     ret = semctl(semid, 0, IPC_STAT, semun);
3727     if (ret == -1)
3728         return get_errno(ret);
3729 
3730     nsems = semid_ds.sem_nsems;
3731 
3732     *host_array = g_try_new(unsigned short, nsems);
3733     if (!*host_array) {
3734         return -TARGET_ENOMEM;
3735     }
3736     array = lock_user(VERIFY_READ, target_addr,
3737                       nsems*sizeof(unsigned short), 1);
3738     if (!array) {
3739         g_free(*host_array);
3740         return -TARGET_EFAULT;
3741     }
3742 
3743     for(i=0; i<nsems; i++) {
3744         __get_user((*host_array)[i], &array[i]);
3745     }
3746     unlock_user(array, target_addr, 0);
3747 
3748     return 0;
3749 }
3750 
3751 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3752                                                unsigned short **host_array)
3753 {
3754     int nsems;
3755     unsigned short *array;
3756     union semun semun;
3757     struct semid_ds semid_ds;
3758     int i, ret;
3759 
3760     semun.buf = &semid_ds;
3761 
3762     ret = semctl(semid, 0, IPC_STAT, semun);
3763     if (ret == -1)
3764         return get_errno(ret);
3765 
3766     nsems = semid_ds.sem_nsems;
3767 
3768     array = lock_user(VERIFY_WRITE, target_addr,
3769                       nsems*sizeof(unsigned short), 0);
3770     if (!array)
3771         return -TARGET_EFAULT;
3772 
3773     for(i=0; i<nsems; i++) {
3774         __put_user((*host_array)[i], &array[i]);
3775     }
3776     g_free(*host_array);
3777     unlock_user(array, target_addr, 1);
3778 
3779     return 0;
3780 }
3781 
3782 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3783                                  abi_ulong target_arg)
3784 {
3785     union target_semun target_su = { .buf = target_arg };
3786     union semun arg;
3787     struct semid_ds dsarg;
3788     unsigned short *array = NULL;
3789     struct seminfo seminfo;
3790     abi_long ret = -TARGET_EINVAL;
3791     abi_long err;
3792     cmd &= 0xff;
3793 
3794     switch( cmd ) {
3795 	case GETVAL:
3796 	case SETVAL:
3797             /* In 64 bit cross-endian situations, we will erroneously pick up
3798              * the wrong half of the union for the "val" element.  To rectify
3799              * this, the entire 8-byte structure is byteswapped, followed by
3800 	     * a swap of the 4 byte val field. In other cases, the data is
3801 	     * already in proper host byte order. */
3802 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3803 		target_su.buf = tswapal(target_su.buf);
3804 		arg.val = tswap32(target_su.val);
3805 	    } else {
3806 		arg.val = target_su.val;
3807 	    }
3808             ret = get_errno(semctl(semid, semnum, cmd, arg));
3809             break;
3810 	case GETALL:
3811 	case SETALL:
3812             err = target_to_host_semarray(semid, &array, target_su.array);
3813             if (err)
3814                 return err;
3815             arg.array = array;
3816             ret = get_errno(semctl(semid, semnum, cmd, arg));
3817             err = host_to_target_semarray(semid, target_su.array, &array);
3818             if (err)
3819                 return err;
3820             break;
3821 	case IPC_STAT:
3822 	case IPC_SET:
3823 	case SEM_STAT:
3824             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3825             if (err)
3826                 return err;
3827             arg.buf = &dsarg;
3828             ret = get_errno(semctl(semid, semnum, cmd, arg));
3829             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3830             if (err)
3831                 return err;
3832             break;
3833 	case IPC_INFO:
3834 	case SEM_INFO:
3835             arg.__buf = &seminfo;
3836             ret = get_errno(semctl(semid, semnum, cmd, arg));
3837             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3838             if (err)
3839                 return err;
3840             break;
3841 	case IPC_RMID:
3842 	case GETPID:
3843 	case GETNCNT:
3844 	case GETZCNT:
3845             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3846             break;
3847     }
3848 
3849     return ret;
3850 }
3851 
3852 struct target_sembuf {
3853     unsigned short sem_num;
3854     short sem_op;
3855     short sem_flg;
3856 };
3857 
3858 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3859                                              abi_ulong target_addr,
3860                                              unsigned nsops)
3861 {
3862     struct target_sembuf *target_sembuf;
3863     int i;
3864 
3865     target_sembuf = lock_user(VERIFY_READ, target_addr,
3866                               nsops*sizeof(struct target_sembuf), 1);
3867     if (!target_sembuf)
3868         return -TARGET_EFAULT;
3869 
3870     for(i=0; i<nsops; i++) {
3871         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3872         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3873         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3874     }
3875 
3876     unlock_user(target_sembuf, target_addr, 0);
3877 
3878     return 0;
3879 }
3880 
3881 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3882 {
3883     struct sembuf sops[nsops];
3884     abi_long ret;
3885 
3886     if (target_to_host_sembuf(sops, ptr, nsops))
3887         return -TARGET_EFAULT;
3888 
3889     ret = -TARGET_ENOSYS;
3890 #ifdef __NR_semtimedop
3891     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3892 #endif
3893 #ifdef __NR_ipc
3894     if (ret == -TARGET_ENOSYS) {
3895         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3896     }
3897 #endif
3898     return ret;
3899 }
3900 
3901 struct target_msqid_ds
3902 {
3903     struct target_ipc_perm msg_perm;
3904     abi_ulong msg_stime;
3905 #if TARGET_ABI_BITS == 32
3906     abi_ulong __unused1;
3907 #endif
3908     abi_ulong msg_rtime;
3909 #if TARGET_ABI_BITS == 32
3910     abi_ulong __unused2;
3911 #endif
3912     abi_ulong msg_ctime;
3913 #if TARGET_ABI_BITS == 32
3914     abi_ulong __unused3;
3915 #endif
3916     abi_ulong __msg_cbytes;
3917     abi_ulong msg_qnum;
3918     abi_ulong msg_qbytes;
3919     abi_ulong msg_lspid;
3920     abi_ulong msg_lrpid;
3921     abi_ulong __unused4;
3922     abi_ulong __unused5;
3923 };
3924 
3925 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3926                                                abi_ulong target_addr)
3927 {
3928     struct target_msqid_ds *target_md;
3929 
3930     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3931         return -TARGET_EFAULT;
3932     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3933         return -TARGET_EFAULT;
3934     host_md->msg_stime = tswapal(target_md->msg_stime);
3935     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3936     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3937     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3938     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3939     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3940     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3941     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3942     unlock_user_struct(target_md, target_addr, 0);
3943     return 0;
3944 }
3945 
3946 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3947                                                struct msqid_ds *host_md)
3948 {
3949     struct target_msqid_ds *target_md;
3950 
3951     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3952         return -TARGET_EFAULT;
3953     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3954         return -TARGET_EFAULT;
3955     target_md->msg_stime = tswapal(host_md->msg_stime);
3956     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3957     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3958     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3959     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3960     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3961     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3962     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3963     unlock_user_struct(target_md, target_addr, 1);
3964     return 0;
3965 }
3966 
3967 struct target_msginfo {
3968     int msgpool;
3969     int msgmap;
3970     int msgmax;
3971     int msgmnb;
3972     int msgmni;
3973     int msgssz;
3974     int msgtql;
3975     unsigned short int msgseg;
3976 };
3977 
3978 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3979                                               struct msginfo *host_msginfo)
3980 {
3981     struct target_msginfo *target_msginfo;
3982     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3983         return -TARGET_EFAULT;
3984     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3985     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3986     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3987     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3988     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3989     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3990     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3991     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3992     unlock_user_struct(target_msginfo, target_addr, 1);
3993     return 0;
3994 }
3995 
3996 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3997 {
3998     struct msqid_ds dsarg;
3999     struct msginfo msginfo;
4000     abi_long ret = -TARGET_EINVAL;
4001 
4002     cmd &= 0xff;
4003 
4004     switch (cmd) {
4005     case IPC_STAT:
4006     case IPC_SET:
4007     case MSG_STAT:
4008         if (target_to_host_msqid_ds(&dsarg,ptr))
4009             return -TARGET_EFAULT;
4010         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4011         if (host_to_target_msqid_ds(ptr,&dsarg))
4012             return -TARGET_EFAULT;
4013         break;
4014     case IPC_RMID:
4015         ret = get_errno(msgctl(msgid, cmd, NULL));
4016         break;
4017     case IPC_INFO:
4018     case MSG_INFO:
4019         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4020         if (host_to_target_msginfo(ptr, &msginfo))
4021             return -TARGET_EFAULT;
4022         break;
4023     }
4024 
4025     return ret;
4026 }
4027 
4028 struct target_msgbuf {
4029     abi_long mtype;
4030     char	mtext[1];
4031 };
4032 
4033 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4034                                  ssize_t msgsz, int msgflg)
4035 {
4036     struct target_msgbuf *target_mb;
4037     struct msgbuf *host_mb;
4038     abi_long ret = 0;
4039 
4040     if (msgsz < 0) {
4041         return -TARGET_EINVAL;
4042     }
4043 
4044     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4045         return -TARGET_EFAULT;
4046     host_mb = g_try_malloc(msgsz + sizeof(long));
4047     if (!host_mb) {
4048         unlock_user_struct(target_mb, msgp, 0);
4049         return -TARGET_ENOMEM;
4050     }
4051     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4052     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4053     ret = -TARGET_ENOSYS;
4054 #ifdef __NR_msgsnd
4055     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4056 #endif
4057 #ifdef __NR_ipc
4058     if (ret == -TARGET_ENOSYS) {
4059         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4060                                  host_mb, 0));
4061     }
4062 #endif
4063     g_free(host_mb);
4064     unlock_user_struct(target_mb, msgp, 0);
4065 
4066     return ret;
4067 }
4068 
4069 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4070                                  ssize_t msgsz, abi_long msgtyp,
4071                                  int msgflg)
4072 {
4073     struct target_msgbuf *target_mb;
4074     char *target_mtext;
4075     struct msgbuf *host_mb;
4076     abi_long ret = 0;
4077 
4078     if (msgsz < 0) {
4079         return -TARGET_EINVAL;
4080     }
4081 
4082     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4083         return -TARGET_EFAULT;
4084 
4085     host_mb = g_try_malloc(msgsz + sizeof(long));
4086     if (!host_mb) {
4087         ret = -TARGET_ENOMEM;
4088         goto end;
4089     }
4090     ret = -TARGET_ENOSYS;
4091 #ifdef __NR_msgrcv
4092     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4093 #endif
4094 #ifdef __NR_ipc
4095     if (ret == -TARGET_ENOSYS) {
4096         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4097                         msgflg, host_mb, msgtyp));
4098     }
4099 #endif
4100 
4101     if (ret > 0) {
4102         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4103         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4104         if (!target_mtext) {
4105             ret = -TARGET_EFAULT;
4106             goto end;
4107         }
4108         memcpy(target_mb->mtext, host_mb->mtext, ret);
4109         unlock_user(target_mtext, target_mtext_addr, ret);
4110     }
4111 
4112     target_mb->mtype = tswapal(host_mb->mtype);
4113 
4114 end:
4115     if (target_mb)
4116         unlock_user_struct(target_mb, msgp, 1);
4117     g_free(host_mb);
4118     return ret;
4119 }
4120 
4121 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4122                                                abi_ulong target_addr)
4123 {
4124     struct target_shmid_ds *target_sd;
4125 
4126     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4127         return -TARGET_EFAULT;
4128     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4129         return -TARGET_EFAULT;
4130     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4131     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4132     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4133     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4134     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4135     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4136     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4137     unlock_user_struct(target_sd, target_addr, 0);
4138     return 0;
4139 }
4140 
4141 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4142                                                struct shmid_ds *host_sd)
4143 {
4144     struct target_shmid_ds *target_sd;
4145 
4146     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4147         return -TARGET_EFAULT;
4148     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4149         return -TARGET_EFAULT;
4150     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4151     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4152     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4153     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4154     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4155     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4156     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4157     unlock_user_struct(target_sd, target_addr, 1);
4158     return 0;
4159 }
4160 
4161 struct  target_shminfo {
4162     abi_ulong shmmax;
4163     abi_ulong shmmin;
4164     abi_ulong shmmni;
4165     abi_ulong shmseg;
4166     abi_ulong shmall;
4167 };
4168 
4169 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4170                                               struct shminfo *host_shminfo)
4171 {
4172     struct target_shminfo *target_shminfo;
4173     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4174         return -TARGET_EFAULT;
4175     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4176     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4177     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4178     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4179     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4180     unlock_user_struct(target_shminfo, target_addr, 1);
4181     return 0;
4182 }
4183 
4184 struct target_shm_info {
4185     int used_ids;
4186     abi_ulong shm_tot;
4187     abi_ulong shm_rss;
4188     abi_ulong shm_swp;
4189     abi_ulong swap_attempts;
4190     abi_ulong swap_successes;
4191 };
4192 
4193 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4194                                                struct shm_info *host_shm_info)
4195 {
4196     struct target_shm_info *target_shm_info;
4197     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4198         return -TARGET_EFAULT;
4199     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4200     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4201     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4202     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4203     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4204     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4205     unlock_user_struct(target_shm_info, target_addr, 1);
4206     return 0;
4207 }
4208 
4209 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4210 {
4211     struct shmid_ds dsarg;
4212     struct shminfo shminfo;
4213     struct shm_info shm_info;
4214     abi_long ret = -TARGET_EINVAL;
4215 
4216     cmd &= 0xff;
4217 
4218     switch(cmd) {
4219     case IPC_STAT:
4220     case IPC_SET:
4221     case SHM_STAT:
4222         if (target_to_host_shmid_ds(&dsarg, buf))
4223             return -TARGET_EFAULT;
4224         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4225         if (host_to_target_shmid_ds(buf, &dsarg))
4226             return -TARGET_EFAULT;
4227         break;
4228     case IPC_INFO:
4229         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4230         if (host_to_target_shminfo(buf, &shminfo))
4231             return -TARGET_EFAULT;
4232         break;
4233     case SHM_INFO:
4234         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4235         if (host_to_target_shm_info(buf, &shm_info))
4236             return -TARGET_EFAULT;
4237         break;
4238     case IPC_RMID:
4239     case SHM_LOCK:
4240     case SHM_UNLOCK:
4241         ret = get_errno(shmctl(shmid, cmd, NULL));
4242         break;
4243     }
4244 
4245     return ret;
4246 }
4247 
4248 #ifndef TARGET_FORCE_SHMLBA
4249 /* For most architectures, SHMLBA is the same as the page size;
4250  * some architectures have larger values, in which case they should
4251  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4252  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4253  * and defining its own value for SHMLBA.
4254  *
4255  * The kernel also permits SHMLBA to be set by the architecture to a
4256  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4257  * this means that addresses are rounded to the large size if
4258  * SHM_RND is set but addresses not aligned to that size are not rejected
4259  * as long as they are at least page-aligned. Since the only architecture
4260  * which uses this is ia64 this code doesn't provide for that oddity.
4261  */
4262 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4263 {
4264     return TARGET_PAGE_SIZE;
4265 }
4266 #endif
4267 
4268 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4269                                  int shmid, abi_ulong shmaddr, int shmflg)
4270 {
4271     abi_long raddr;
4272     void *host_raddr;
4273     struct shmid_ds shm_info;
4274     int i,ret;
4275     abi_ulong shmlba;
4276 
4277     /* find out the length of the shared memory segment */
4278     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4279     if (is_error(ret)) {
4280         /* can't get length, bail out */
4281         return ret;
4282     }
4283 
4284     shmlba = target_shmlba(cpu_env);
4285 
4286     if (shmaddr & (shmlba - 1)) {
4287         if (shmflg & SHM_RND) {
4288             shmaddr &= ~(shmlba - 1);
4289         } else {
4290             return -TARGET_EINVAL;
4291         }
4292     }
4293     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4294         return -TARGET_EINVAL;
4295     }
4296 
4297     mmap_lock();
4298 
4299     if (shmaddr)
4300         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4301     else {
4302         abi_ulong mmap_start;
4303 
4304         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4305         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4306 
4307         if (mmap_start == -1) {
4308             errno = ENOMEM;
4309             host_raddr = (void *)-1;
4310         } else
4311             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4312     }
4313 
4314     if (host_raddr == (void *)-1) {
4315         mmap_unlock();
4316         return get_errno((long)host_raddr);
4317     }
4318     raddr=h2g((unsigned long)host_raddr);
4319 
4320     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4321                    PAGE_VALID | PAGE_READ |
4322                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4323 
4324     for (i = 0; i < N_SHM_REGIONS; i++) {
4325         if (!shm_regions[i].in_use) {
4326             shm_regions[i].in_use = true;
4327             shm_regions[i].start = raddr;
4328             shm_regions[i].size = shm_info.shm_segsz;
4329             break;
4330         }
4331     }
4332 
4333     mmap_unlock();
4334     return raddr;
4335 
4336 }
4337 
4338 static inline abi_long do_shmdt(abi_ulong shmaddr)
4339 {
4340     int i;
4341     abi_long rv;
4342 
4343     mmap_lock();
4344 
4345     for (i = 0; i < N_SHM_REGIONS; ++i) {
4346         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4347             shm_regions[i].in_use = false;
4348             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4349             break;
4350         }
4351     }
4352     rv = get_errno(shmdt(g2h(shmaddr)));
4353 
4354     mmap_unlock();
4355 
4356     return rv;
4357 }
4358 
4359 #ifdef TARGET_NR_ipc
4360 /* ??? This only works with linear mappings.  */
4361 /* do_ipc() must return target values and target errnos. */
4362 static abi_long do_ipc(CPUArchState *cpu_env,
4363                        unsigned int call, abi_long first,
4364                        abi_long second, abi_long third,
4365                        abi_long ptr, abi_long fifth)
4366 {
4367     int version;
4368     abi_long ret = 0;
4369 
4370     version = call >> 16;
4371     call &= 0xffff;
4372 
4373     switch (call) {
4374     case IPCOP_semop:
4375         ret = do_semop(first, ptr, second);
4376         break;
4377 
4378     case IPCOP_semget:
4379         ret = get_errno(semget(first, second, third));
4380         break;
4381 
4382     case IPCOP_semctl: {
4383         /* The semun argument to semctl is passed by value, so dereference the
4384          * ptr argument. */
4385         abi_ulong atptr;
4386         get_user_ual(atptr, ptr);
4387         ret = do_semctl(first, second, third, atptr);
4388         break;
4389     }
4390 
4391     case IPCOP_msgget:
4392         ret = get_errno(msgget(first, second));
4393         break;
4394 
4395     case IPCOP_msgsnd:
4396         ret = do_msgsnd(first, ptr, second, third);
4397         break;
4398 
4399     case IPCOP_msgctl:
4400         ret = do_msgctl(first, second, ptr);
4401         break;
4402 
4403     case IPCOP_msgrcv:
4404         switch (version) {
4405         case 0:
4406             {
4407                 struct target_ipc_kludge {
4408                     abi_long msgp;
4409                     abi_long msgtyp;
4410                 } *tmp;
4411 
4412                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4413                     ret = -TARGET_EFAULT;
4414                     break;
4415                 }
4416 
4417                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4418 
4419                 unlock_user_struct(tmp, ptr, 0);
4420                 break;
4421             }
4422         default:
4423             ret = do_msgrcv(first, ptr, second, fifth, third);
4424         }
4425         break;
4426 
4427     case IPCOP_shmat:
4428         switch (version) {
4429         default:
4430         {
4431             abi_ulong raddr;
4432             raddr = do_shmat(cpu_env, first, ptr, second);
4433             if (is_error(raddr))
4434                 return get_errno(raddr);
4435             if (put_user_ual(raddr, third))
4436                 return -TARGET_EFAULT;
4437             break;
4438         }
4439         case 1:
4440             ret = -TARGET_EINVAL;
4441             break;
4442         }
4443 	break;
4444     case IPCOP_shmdt:
4445         ret = do_shmdt(ptr);
4446 	break;
4447 
4448     case IPCOP_shmget:
4449 	/* IPC_* flag values are the same on all linux platforms */
4450 	ret = get_errno(shmget(first, second, third));
4451 	break;
4452 
4453 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4454     case IPCOP_shmctl:
4455         ret = do_shmctl(first, second, ptr);
4456         break;
4457     default:
4458         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4459                       call, version);
4460 	ret = -TARGET_ENOSYS;
4461 	break;
4462     }
4463     return ret;
4464 }
4465 #endif
4466 
4467 /* kernel structure types definitions */
4468 
4469 #define STRUCT(name, ...) STRUCT_ ## name,
4470 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4471 enum {
4472 #include "syscall_types.h"
4473 STRUCT_MAX
4474 };
4475 #undef STRUCT
4476 #undef STRUCT_SPECIAL
4477 
4478 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4479 #define STRUCT_SPECIAL(name)
4480 #include "syscall_types.h"
4481 #undef STRUCT
4482 #undef STRUCT_SPECIAL
4483 
4484 typedef struct IOCTLEntry IOCTLEntry;
4485 
4486 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4487                              int fd, int cmd, abi_long arg);
4488 
4489 struct IOCTLEntry {
4490     int target_cmd;
4491     unsigned int host_cmd;
4492     const char *name;
4493     int access;
4494     do_ioctl_fn *do_ioctl;
4495     const argtype arg_type[5];
4496 };
4497 
4498 #define IOC_R 0x0001
4499 #define IOC_W 0x0002
4500 #define IOC_RW (IOC_R | IOC_W)
4501 
4502 #define MAX_STRUCT_SIZE 4096
4503 
4504 #ifdef CONFIG_FIEMAP
4505 /* So fiemap access checks don't overflow on 32 bit systems.
4506  * This is very slightly smaller than the limit imposed by
4507  * the underlying kernel.
4508  */
4509 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4510                             / sizeof(struct fiemap_extent))
4511 
4512 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4513                                        int fd, int cmd, abi_long arg)
4514 {
4515     /* The parameter for this ioctl is a struct fiemap followed
4516      * by an array of struct fiemap_extent whose size is set
4517      * in fiemap->fm_extent_count. The array is filled in by the
4518      * ioctl.
4519      */
4520     int target_size_in, target_size_out;
4521     struct fiemap *fm;
4522     const argtype *arg_type = ie->arg_type;
4523     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4524     void *argptr, *p;
4525     abi_long ret;
4526     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4527     uint32_t outbufsz;
4528     int free_fm = 0;
4529 
4530     assert(arg_type[0] == TYPE_PTR);
4531     assert(ie->access == IOC_RW);
4532     arg_type++;
4533     target_size_in = thunk_type_size(arg_type, 0);
4534     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4535     if (!argptr) {
4536         return -TARGET_EFAULT;
4537     }
4538     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4539     unlock_user(argptr, arg, 0);
4540     fm = (struct fiemap *)buf_temp;
4541     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4542         return -TARGET_EINVAL;
4543     }
4544 
4545     outbufsz = sizeof (*fm) +
4546         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4547 
4548     if (outbufsz > MAX_STRUCT_SIZE) {
4549         /* We can't fit all the extents into the fixed size buffer.
4550          * Allocate one that is large enough and use it instead.
4551          */
4552         fm = g_try_malloc(outbufsz);
4553         if (!fm) {
4554             return -TARGET_ENOMEM;
4555         }
4556         memcpy(fm, buf_temp, sizeof(struct fiemap));
4557         free_fm = 1;
4558     }
4559     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4560     if (!is_error(ret)) {
4561         target_size_out = target_size_in;
4562         /* An extent_count of 0 means we were only counting the extents
4563          * so there are no structs to copy
4564          */
4565         if (fm->fm_extent_count != 0) {
4566             target_size_out += fm->fm_mapped_extents * extent_size;
4567         }
4568         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4569         if (!argptr) {
4570             ret = -TARGET_EFAULT;
4571         } else {
4572             /* Convert the struct fiemap */
4573             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4574             if (fm->fm_extent_count != 0) {
4575                 p = argptr + target_size_in;
4576                 /* ...and then all the struct fiemap_extents */
4577                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4578                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4579                                   THUNK_TARGET);
4580                     p += extent_size;
4581                 }
4582             }
4583             unlock_user(argptr, arg, target_size_out);
4584         }
4585     }
4586     if (free_fm) {
4587         g_free(fm);
4588     }
4589     return ret;
4590 }
4591 #endif
4592 
4593 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4594                                 int fd, int cmd, abi_long arg)
4595 {
4596     const argtype *arg_type = ie->arg_type;
4597     int target_size;
4598     void *argptr;
4599     int ret;
4600     struct ifconf *host_ifconf;
4601     uint32_t outbufsz;
4602     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4603     int target_ifreq_size;
4604     int nb_ifreq;
4605     int free_buf = 0;
4606     int i;
4607     int target_ifc_len;
4608     abi_long target_ifc_buf;
4609     int host_ifc_len;
4610     char *host_ifc_buf;
4611 
4612     assert(arg_type[0] == TYPE_PTR);
4613     assert(ie->access == IOC_RW);
4614 
4615     arg_type++;
4616     target_size = thunk_type_size(arg_type, 0);
4617 
4618     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4619     if (!argptr)
4620         return -TARGET_EFAULT;
4621     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4622     unlock_user(argptr, arg, 0);
4623 
4624     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4625     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4626     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4627 
4628     if (target_ifc_buf != 0) {
4629         target_ifc_len = host_ifconf->ifc_len;
4630         nb_ifreq = target_ifc_len / target_ifreq_size;
4631         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4632 
4633         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4634         if (outbufsz > MAX_STRUCT_SIZE) {
4635             /*
4636              * We can't fit all the extents into the fixed size buffer.
4637              * Allocate one that is large enough and use it instead.
4638              */
4639             host_ifconf = malloc(outbufsz);
4640             if (!host_ifconf) {
4641                 return -TARGET_ENOMEM;
4642             }
4643             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4644             free_buf = 1;
4645         }
4646         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4647 
4648         host_ifconf->ifc_len = host_ifc_len;
4649     } else {
4650       host_ifc_buf = NULL;
4651     }
4652     host_ifconf->ifc_buf = host_ifc_buf;
4653 
4654     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4655     if (!is_error(ret)) {
4656 	/* convert host ifc_len to target ifc_len */
4657 
4658         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4659         target_ifc_len = nb_ifreq * target_ifreq_size;
4660         host_ifconf->ifc_len = target_ifc_len;
4661 
4662 	/* restore target ifc_buf */
4663 
4664         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4665 
4666 	/* copy struct ifconf to target user */
4667 
4668         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4669         if (!argptr)
4670             return -TARGET_EFAULT;
4671         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4672         unlock_user(argptr, arg, target_size);
4673 
4674         if (target_ifc_buf != 0) {
4675             /* copy ifreq[] to target user */
4676             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4677             for (i = 0; i < nb_ifreq ; i++) {
4678                 thunk_convert(argptr + i * target_ifreq_size,
4679                               host_ifc_buf + i * sizeof(struct ifreq),
4680                               ifreq_arg_type, THUNK_TARGET);
4681             }
4682             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4683         }
4684     }
4685 
4686     if (free_buf) {
4687         free(host_ifconf);
4688     }
4689 
4690     return ret;
4691 }
4692 
4693 #if defined(CONFIG_USBFS)
4694 #if HOST_LONG_BITS > 64
4695 #error USBDEVFS thunks do not support >64 bit hosts yet.
4696 #endif
4697 struct live_urb {
4698     uint64_t target_urb_adr;
4699     uint64_t target_buf_adr;
4700     char *target_buf_ptr;
4701     struct usbdevfs_urb host_urb;
4702 };
4703 
4704 static GHashTable *usbdevfs_urb_hashtable(void)
4705 {
4706     static GHashTable *urb_hashtable;
4707 
4708     if (!urb_hashtable) {
4709         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4710     }
4711     return urb_hashtable;
4712 }
4713 
4714 static void urb_hashtable_insert(struct live_urb *urb)
4715 {
4716     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4717     g_hash_table_insert(urb_hashtable, urb, urb);
4718 }
4719 
4720 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4721 {
4722     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4723     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4724 }
4725 
4726 static void urb_hashtable_remove(struct live_urb *urb)
4727 {
4728     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4729     g_hash_table_remove(urb_hashtable, urb);
4730 }
4731 
4732 static abi_long
4733 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4734                           int fd, int cmd, abi_long arg)
4735 {
4736     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4737     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4738     struct live_urb *lurb;
4739     void *argptr;
4740     uint64_t hurb;
4741     int target_size;
4742     uintptr_t target_urb_adr;
4743     abi_long ret;
4744 
4745     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4746 
4747     memset(buf_temp, 0, sizeof(uint64_t));
4748     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4749     if (is_error(ret)) {
4750         return ret;
4751     }
4752 
4753     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4754     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4755     if (!lurb->target_urb_adr) {
4756         return -TARGET_EFAULT;
4757     }
4758     urb_hashtable_remove(lurb);
4759     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4760         lurb->host_urb.buffer_length);
4761     lurb->target_buf_ptr = NULL;
4762 
4763     /* restore the guest buffer pointer */
4764     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4765 
4766     /* update the guest urb struct */
4767     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4768     if (!argptr) {
4769         g_free(lurb);
4770         return -TARGET_EFAULT;
4771     }
4772     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4773     unlock_user(argptr, lurb->target_urb_adr, target_size);
4774 
4775     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4776     /* write back the urb handle */
4777     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4778     if (!argptr) {
4779         g_free(lurb);
4780         return -TARGET_EFAULT;
4781     }
4782 
4783     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4784     target_urb_adr = lurb->target_urb_adr;
4785     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4786     unlock_user(argptr, arg, target_size);
4787 
4788     g_free(lurb);
4789     return ret;
4790 }
4791 
4792 static abi_long
4793 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4794                              uint8_t *buf_temp __attribute__((unused)),
4795                              int fd, int cmd, abi_long arg)
4796 {
4797     struct live_urb *lurb;
4798 
4799     /* map target address back to host URB with metadata. */
4800     lurb = urb_hashtable_lookup(arg);
4801     if (!lurb) {
4802         return -TARGET_EFAULT;
4803     }
4804     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4805 }
4806 
4807 static abi_long
4808 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4809                             int fd, int cmd, abi_long arg)
4810 {
4811     const argtype *arg_type = ie->arg_type;
4812     int target_size;
4813     abi_long ret;
4814     void *argptr;
4815     int rw_dir;
4816     struct live_urb *lurb;
4817 
4818     /*
4819      * each submitted URB needs to map to a unique ID for the
4820      * kernel, and that unique ID needs to be a pointer to
4821      * host memory.  hence, we need to malloc for each URB.
4822      * isochronous transfers have a variable length struct.
4823      */
4824     arg_type++;
4825     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4826 
4827     /* construct host copy of urb and metadata */
4828     lurb = g_try_malloc0(sizeof(struct live_urb));
4829     if (!lurb) {
4830         return -TARGET_ENOMEM;
4831     }
4832 
4833     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4834     if (!argptr) {
4835         g_free(lurb);
4836         return -TARGET_EFAULT;
4837     }
4838     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4839     unlock_user(argptr, arg, 0);
4840 
4841     lurb->target_urb_adr = arg;
4842     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4843 
4844     /* buffer space used depends on endpoint type so lock the entire buffer */
4845     /* control type urbs should check the buffer contents for true direction */
4846     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4847     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4848         lurb->host_urb.buffer_length, 1);
4849     if (lurb->target_buf_ptr == NULL) {
4850         g_free(lurb);
4851         return -TARGET_EFAULT;
4852     }
4853 
4854     /* update buffer pointer in host copy */
4855     lurb->host_urb.buffer = lurb->target_buf_ptr;
4856 
4857     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4858     if (is_error(ret)) {
4859         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4860         g_free(lurb);
4861     } else {
4862         urb_hashtable_insert(lurb);
4863     }
4864 
4865     return ret;
4866 }
4867 #endif /* CONFIG_USBFS */
4868 
4869 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4870                             int cmd, abi_long arg)
4871 {
4872     void *argptr;
4873     struct dm_ioctl *host_dm;
4874     abi_long guest_data;
4875     uint32_t guest_data_size;
4876     int target_size;
4877     const argtype *arg_type = ie->arg_type;
4878     abi_long ret;
4879     void *big_buf = NULL;
4880     char *host_data;
4881 
4882     arg_type++;
4883     target_size = thunk_type_size(arg_type, 0);
4884     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4885     if (!argptr) {
4886         ret = -TARGET_EFAULT;
4887         goto out;
4888     }
4889     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4890     unlock_user(argptr, arg, 0);
4891 
4892     /* buf_temp is too small, so fetch things into a bigger buffer */
4893     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4894     memcpy(big_buf, buf_temp, target_size);
4895     buf_temp = big_buf;
4896     host_dm = big_buf;
4897 
4898     guest_data = arg + host_dm->data_start;
4899     if ((guest_data - arg) < 0) {
4900         ret = -TARGET_EINVAL;
4901         goto out;
4902     }
4903     guest_data_size = host_dm->data_size - host_dm->data_start;
4904     host_data = (char*)host_dm + host_dm->data_start;
4905 
4906     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4907     if (!argptr) {
4908         ret = -TARGET_EFAULT;
4909         goto out;
4910     }
4911 
4912     switch (ie->host_cmd) {
4913     case DM_REMOVE_ALL:
4914     case DM_LIST_DEVICES:
4915     case DM_DEV_CREATE:
4916     case DM_DEV_REMOVE:
4917     case DM_DEV_SUSPEND:
4918     case DM_DEV_STATUS:
4919     case DM_DEV_WAIT:
4920     case DM_TABLE_STATUS:
4921     case DM_TABLE_CLEAR:
4922     case DM_TABLE_DEPS:
4923     case DM_LIST_VERSIONS:
4924         /* no input data */
4925         break;
4926     case DM_DEV_RENAME:
4927     case DM_DEV_SET_GEOMETRY:
4928         /* data contains only strings */
4929         memcpy(host_data, argptr, guest_data_size);
4930         break;
4931     case DM_TARGET_MSG:
4932         memcpy(host_data, argptr, guest_data_size);
4933         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4934         break;
4935     case DM_TABLE_LOAD:
4936     {
4937         void *gspec = argptr;
4938         void *cur_data = host_data;
4939         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4940         int spec_size = thunk_type_size(arg_type, 0);
4941         int i;
4942 
4943         for (i = 0; i < host_dm->target_count; i++) {
4944             struct dm_target_spec *spec = cur_data;
4945             uint32_t next;
4946             int slen;
4947 
4948             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4949             slen = strlen((char*)gspec + spec_size) + 1;
4950             next = spec->next;
4951             spec->next = sizeof(*spec) + slen;
4952             strcpy((char*)&spec[1], gspec + spec_size);
4953             gspec += next;
4954             cur_data += spec->next;
4955         }
4956         break;
4957     }
4958     default:
4959         ret = -TARGET_EINVAL;
4960         unlock_user(argptr, guest_data, 0);
4961         goto out;
4962     }
4963     unlock_user(argptr, guest_data, 0);
4964 
4965     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4966     if (!is_error(ret)) {
4967         guest_data = arg + host_dm->data_start;
4968         guest_data_size = host_dm->data_size - host_dm->data_start;
4969         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4970         switch (ie->host_cmd) {
4971         case DM_REMOVE_ALL:
4972         case DM_DEV_CREATE:
4973         case DM_DEV_REMOVE:
4974         case DM_DEV_RENAME:
4975         case DM_DEV_SUSPEND:
4976         case DM_DEV_STATUS:
4977         case DM_TABLE_LOAD:
4978         case DM_TABLE_CLEAR:
4979         case DM_TARGET_MSG:
4980         case DM_DEV_SET_GEOMETRY:
4981             /* no return data */
4982             break;
4983         case DM_LIST_DEVICES:
4984         {
4985             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4986             uint32_t remaining_data = guest_data_size;
4987             void *cur_data = argptr;
4988             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4989             int nl_size = 12; /* can't use thunk_size due to alignment */
4990 
4991             while (1) {
4992                 uint32_t next = nl->next;
4993                 if (next) {
4994                     nl->next = nl_size + (strlen(nl->name) + 1);
4995                 }
4996                 if (remaining_data < nl->next) {
4997                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4998                     break;
4999                 }
5000                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5001                 strcpy(cur_data + nl_size, nl->name);
5002                 cur_data += nl->next;
5003                 remaining_data -= nl->next;
5004                 if (!next) {
5005                     break;
5006                 }
5007                 nl = (void*)nl + next;
5008             }
5009             break;
5010         }
5011         case DM_DEV_WAIT:
5012         case DM_TABLE_STATUS:
5013         {
5014             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5015             void *cur_data = argptr;
5016             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5017             int spec_size = thunk_type_size(arg_type, 0);
5018             int i;
5019 
5020             for (i = 0; i < host_dm->target_count; i++) {
5021                 uint32_t next = spec->next;
5022                 int slen = strlen((char*)&spec[1]) + 1;
5023                 spec->next = (cur_data - argptr) + spec_size + slen;
5024                 if (guest_data_size < spec->next) {
5025                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5026                     break;
5027                 }
5028                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5029                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5030                 cur_data = argptr + spec->next;
5031                 spec = (void*)host_dm + host_dm->data_start + next;
5032             }
5033             break;
5034         }
5035         case DM_TABLE_DEPS:
5036         {
5037             void *hdata = (void*)host_dm + host_dm->data_start;
5038             int count = *(uint32_t*)hdata;
5039             uint64_t *hdev = hdata + 8;
5040             uint64_t *gdev = argptr + 8;
5041             int i;
5042 
5043             *(uint32_t*)argptr = tswap32(count);
5044             for (i = 0; i < count; i++) {
5045                 *gdev = tswap64(*hdev);
5046                 gdev++;
5047                 hdev++;
5048             }
5049             break;
5050         }
5051         case DM_LIST_VERSIONS:
5052         {
5053             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5054             uint32_t remaining_data = guest_data_size;
5055             void *cur_data = argptr;
5056             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5057             int vers_size = thunk_type_size(arg_type, 0);
5058 
5059             while (1) {
5060                 uint32_t next = vers->next;
5061                 if (next) {
5062                     vers->next = vers_size + (strlen(vers->name) + 1);
5063                 }
5064                 if (remaining_data < vers->next) {
5065                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5066                     break;
5067                 }
5068                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5069                 strcpy(cur_data + vers_size, vers->name);
5070                 cur_data += vers->next;
5071                 remaining_data -= vers->next;
5072                 if (!next) {
5073                     break;
5074                 }
5075                 vers = (void*)vers + next;
5076             }
5077             break;
5078         }
5079         default:
5080             unlock_user(argptr, guest_data, 0);
5081             ret = -TARGET_EINVAL;
5082             goto out;
5083         }
5084         unlock_user(argptr, guest_data, guest_data_size);
5085 
5086         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5087         if (!argptr) {
5088             ret = -TARGET_EFAULT;
5089             goto out;
5090         }
5091         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5092         unlock_user(argptr, arg, target_size);
5093     }
5094 out:
5095     g_free(big_buf);
5096     return ret;
5097 }
5098 
5099 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5100                                int cmd, abi_long arg)
5101 {
5102     void *argptr;
5103     int target_size;
5104     const argtype *arg_type = ie->arg_type;
5105     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5106     abi_long ret;
5107 
5108     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5109     struct blkpg_partition host_part;
5110 
5111     /* Read and convert blkpg */
5112     arg_type++;
5113     target_size = thunk_type_size(arg_type, 0);
5114     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5115     if (!argptr) {
5116         ret = -TARGET_EFAULT;
5117         goto out;
5118     }
5119     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5120     unlock_user(argptr, arg, 0);
5121 
5122     switch (host_blkpg->op) {
5123     case BLKPG_ADD_PARTITION:
5124     case BLKPG_DEL_PARTITION:
5125         /* payload is struct blkpg_partition */
5126         break;
5127     default:
5128         /* Unknown opcode */
5129         ret = -TARGET_EINVAL;
5130         goto out;
5131     }
5132 
5133     /* Read and convert blkpg->data */
5134     arg = (abi_long)(uintptr_t)host_blkpg->data;
5135     target_size = thunk_type_size(part_arg_type, 0);
5136     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5137     if (!argptr) {
5138         ret = -TARGET_EFAULT;
5139         goto out;
5140     }
5141     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5142     unlock_user(argptr, arg, 0);
5143 
5144     /* Swizzle the data pointer to our local copy and call! */
5145     host_blkpg->data = &host_part;
5146     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5147 
5148 out:
5149     return ret;
5150 }
5151 
5152 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5153                                 int fd, int cmd, abi_long arg)
5154 {
5155     const argtype *arg_type = ie->arg_type;
5156     const StructEntry *se;
5157     const argtype *field_types;
5158     const int *dst_offsets, *src_offsets;
5159     int target_size;
5160     void *argptr;
5161     abi_ulong *target_rt_dev_ptr = NULL;
5162     unsigned long *host_rt_dev_ptr = NULL;
5163     abi_long ret;
5164     int i;
5165 
5166     assert(ie->access == IOC_W);
5167     assert(*arg_type == TYPE_PTR);
5168     arg_type++;
5169     assert(*arg_type == TYPE_STRUCT);
5170     target_size = thunk_type_size(arg_type, 0);
5171     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5172     if (!argptr) {
5173         return -TARGET_EFAULT;
5174     }
5175     arg_type++;
5176     assert(*arg_type == (int)STRUCT_rtentry);
5177     se = struct_entries + *arg_type++;
5178     assert(se->convert[0] == NULL);
5179     /* convert struct here to be able to catch rt_dev string */
5180     field_types = se->field_types;
5181     dst_offsets = se->field_offsets[THUNK_HOST];
5182     src_offsets = se->field_offsets[THUNK_TARGET];
5183     for (i = 0; i < se->nb_fields; i++) {
5184         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5185             assert(*field_types == TYPE_PTRVOID);
5186             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5187             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5188             if (*target_rt_dev_ptr != 0) {
5189                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5190                                                   tswapal(*target_rt_dev_ptr));
5191                 if (!*host_rt_dev_ptr) {
5192                     unlock_user(argptr, arg, 0);
5193                     return -TARGET_EFAULT;
5194                 }
5195             } else {
5196                 *host_rt_dev_ptr = 0;
5197             }
5198             field_types++;
5199             continue;
5200         }
5201         field_types = thunk_convert(buf_temp + dst_offsets[i],
5202                                     argptr + src_offsets[i],
5203                                     field_types, THUNK_HOST);
5204     }
5205     unlock_user(argptr, arg, 0);
5206 
5207     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5208 
5209     assert(host_rt_dev_ptr != NULL);
5210     assert(target_rt_dev_ptr != NULL);
5211     if (*host_rt_dev_ptr != 0) {
5212         unlock_user((void *)*host_rt_dev_ptr,
5213                     *target_rt_dev_ptr, 0);
5214     }
5215     return ret;
5216 }
5217 
5218 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5219                                      int fd, int cmd, abi_long arg)
5220 {
5221     int sig = target_to_host_signal(arg);
5222     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5223 }
5224 
5225 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5226                                     int fd, int cmd, abi_long arg)
5227 {
5228     struct timeval tv;
5229     abi_long ret;
5230 
5231     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5232     if (is_error(ret)) {
5233         return ret;
5234     }
5235 
5236     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5237         if (copy_to_user_timeval(arg, &tv)) {
5238             return -TARGET_EFAULT;
5239         }
5240     } else {
5241         if (copy_to_user_timeval64(arg, &tv)) {
5242             return -TARGET_EFAULT;
5243         }
5244     }
5245 
5246     return ret;
5247 }
5248 
5249 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5250                                       int fd, int cmd, abi_long arg)
5251 {
5252     struct timespec ts;
5253     abi_long ret;
5254 
5255     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5256     if (is_error(ret)) {
5257         return ret;
5258     }
5259 
5260     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5261         if (host_to_target_timespec(arg, &ts)) {
5262             return -TARGET_EFAULT;
5263         }
5264     } else{
5265         if (host_to_target_timespec64(arg, &ts)) {
5266             return -TARGET_EFAULT;
5267         }
5268     }
5269 
5270     return ret;
5271 }
5272 
5273 #ifdef TIOCGPTPEER
5274 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5275                                      int fd, int cmd, abi_long arg)
5276 {
5277     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5278     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5279 }
5280 #endif
5281 
5282 #ifdef HAVE_DRM_H
5283 
5284 static void unlock_drm_version(struct drm_version *host_ver,
5285                                struct target_drm_version *target_ver,
5286                                bool copy)
5287 {
5288     unlock_user(host_ver->name, target_ver->name,
5289                                 copy ? host_ver->name_len : 0);
5290     unlock_user(host_ver->date, target_ver->date,
5291                                 copy ? host_ver->date_len : 0);
5292     unlock_user(host_ver->desc, target_ver->desc,
5293                                 copy ? host_ver->desc_len : 0);
5294 }
5295 
5296 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5297                                           struct target_drm_version *target_ver)
5298 {
5299     memset(host_ver, 0, sizeof(*host_ver));
5300 
5301     __get_user(host_ver->name_len, &target_ver->name_len);
5302     if (host_ver->name_len) {
5303         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5304                                    target_ver->name_len, 0);
5305         if (!host_ver->name) {
5306             return -EFAULT;
5307         }
5308     }
5309 
5310     __get_user(host_ver->date_len, &target_ver->date_len);
5311     if (host_ver->date_len) {
5312         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5313                                    target_ver->date_len, 0);
5314         if (!host_ver->date) {
5315             goto err;
5316         }
5317     }
5318 
5319     __get_user(host_ver->desc_len, &target_ver->desc_len);
5320     if (host_ver->desc_len) {
5321         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5322                                    target_ver->desc_len, 0);
5323         if (!host_ver->desc) {
5324             goto err;
5325         }
5326     }
5327 
5328     return 0;
5329 err:
5330     unlock_drm_version(host_ver, target_ver, false);
5331     return -EFAULT;
5332 }
5333 
5334 static inline void host_to_target_drmversion(
5335                                           struct target_drm_version *target_ver,
5336                                           struct drm_version *host_ver)
5337 {
5338     __put_user(host_ver->version_major, &target_ver->version_major);
5339     __put_user(host_ver->version_minor, &target_ver->version_minor);
5340     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5341     __put_user(host_ver->name_len, &target_ver->name_len);
5342     __put_user(host_ver->date_len, &target_ver->date_len);
5343     __put_user(host_ver->desc_len, &target_ver->desc_len);
5344     unlock_drm_version(host_ver, target_ver, true);
5345 }
5346 
5347 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5348                              int fd, int cmd, abi_long arg)
5349 {
5350     struct drm_version *ver;
5351     struct target_drm_version *target_ver;
5352     abi_long ret;
5353 
5354     switch (ie->host_cmd) {
5355     case DRM_IOCTL_VERSION:
5356         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5357             return -TARGET_EFAULT;
5358         }
5359         ver = (struct drm_version *)buf_temp;
5360         ret = target_to_host_drmversion(ver, target_ver);
5361         if (!is_error(ret)) {
5362             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5363             if (is_error(ret)) {
5364                 unlock_drm_version(ver, target_ver, false);
5365             } else {
5366                 host_to_target_drmversion(target_ver, ver);
5367             }
5368         }
5369         unlock_user_struct(target_ver, arg, 0);
5370         return ret;
5371     }
5372     return -TARGET_ENOSYS;
5373 }
5374 
5375 #endif
5376 
5377 static IOCTLEntry ioctl_entries[] = {
5378 #define IOCTL(cmd, access, ...) \
5379     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5380 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5381     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5382 #define IOCTL_IGNORE(cmd) \
5383     { TARGET_ ## cmd, 0, #cmd },
5384 #include "ioctls.h"
5385     { 0, 0, },
5386 };
5387 
5388 /* ??? Implement proper locking for ioctls.  */
5389 /* do_ioctl() Must return target values and target errnos. */
5390 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5391 {
5392     const IOCTLEntry *ie;
5393     const argtype *arg_type;
5394     abi_long ret;
5395     uint8_t buf_temp[MAX_STRUCT_SIZE];
5396     int target_size;
5397     void *argptr;
5398 
5399     ie = ioctl_entries;
5400     for(;;) {
5401         if (ie->target_cmd == 0) {
5402             qemu_log_mask(
5403                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5404             return -TARGET_ENOSYS;
5405         }
5406         if (ie->target_cmd == cmd)
5407             break;
5408         ie++;
5409     }
5410     arg_type = ie->arg_type;
5411     if (ie->do_ioctl) {
5412         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5413     } else if (!ie->host_cmd) {
5414         /* Some architectures define BSD ioctls in their headers
5415            that are not implemented in Linux.  */
5416         return -TARGET_ENOSYS;
5417     }
5418 
5419     switch(arg_type[0]) {
5420     case TYPE_NULL:
5421         /* no argument */
5422         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5423         break;
5424     case TYPE_PTRVOID:
5425     case TYPE_INT:
5426     case TYPE_LONG:
5427     case TYPE_ULONG:
5428         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5429         break;
5430     case TYPE_PTR:
5431         arg_type++;
5432         target_size = thunk_type_size(arg_type, 0);
5433         switch(ie->access) {
5434         case IOC_R:
5435             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5436             if (!is_error(ret)) {
5437                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5438                 if (!argptr)
5439                     return -TARGET_EFAULT;
5440                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5441                 unlock_user(argptr, arg, target_size);
5442             }
5443             break;
5444         case IOC_W:
5445             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5446             if (!argptr)
5447                 return -TARGET_EFAULT;
5448             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5449             unlock_user(argptr, arg, 0);
5450             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5451             break;
5452         default:
5453         case IOC_RW:
5454             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5455             if (!argptr)
5456                 return -TARGET_EFAULT;
5457             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5458             unlock_user(argptr, arg, 0);
5459             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5460             if (!is_error(ret)) {
5461                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5462                 if (!argptr)
5463                     return -TARGET_EFAULT;
5464                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5465                 unlock_user(argptr, arg, target_size);
5466             }
5467             break;
5468         }
5469         break;
5470     default:
5471         qemu_log_mask(LOG_UNIMP,
5472                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5473                       (long)cmd, arg_type[0]);
5474         ret = -TARGET_ENOSYS;
5475         break;
5476     }
5477     return ret;
5478 }
5479 
5480 static const bitmask_transtbl iflag_tbl[] = {
5481         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5482         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5483         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5484         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5485         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5486         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5487         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5488         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5489         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5490         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5491         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5492         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5493         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5494         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5495         { 0, 0, 0, 0 }
5496 };
5497 
5498 static const bitmask_transtbl oflag_tbl[] = {
5499 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5500 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5501 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5502 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5503 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5504 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5505 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5506 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5507 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5508 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5509 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5510 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5511 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5512 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5513 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5514 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5515 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5516 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5517 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5518 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5519 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5520 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5521 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5522 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5523 	{ 0, 0, 0, 0 }
5524 };
5525 
5526 static const bitmask_transtbl cflag_tbl[] = {
5527 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5528 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5529 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5530 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5531 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5532 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5533 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5534 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5535 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5536 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5537 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5538 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5539 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5540 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5541 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5542 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5543 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5544 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5545 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5546 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5547 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5548 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5549 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5550 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5551 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5552 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5553 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5554 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5555 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5556 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5557 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5558 	{ 0, 0, 0, 0 }
5559 };
5560 
5561 static const bitmask_transtbl lflag_tbl[] = {
5562 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5563 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5564 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5565 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5566 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5567 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5568 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5569 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5570 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5571 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5572 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5573 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5574 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5575 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5576 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5577 	{ 0, 0, 0, 0 }
5578 };
5579 
5580 static void target_to_host_termios (void *dst, const void *src)
5581 {
5582     struct host_termios *host = dst;
5583     const struct target_termios *target = src;
5584 
5585     host->c_iflag =
5586         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5587     host->c_oflag =
5588         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5589     host->c_cflag =
5590         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5591     host->c_lflag =
5592         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5593     host->c_line = target->c_line;
5594 
5595     memset(host->c_cc, 0, sizeof(host->c_cc));
5596     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5597     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5598     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5599     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5600     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5601     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5602     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5603     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5604     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5605     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5606     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5607     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5608     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5609     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5610     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5611     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5612     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5613 }
5614 
5615 static void host_to_target_termios (void *dst, const void *src)
5616 {
5617     struct target_termios *target = dst;
5618     const struct host_termios *host = src;
5619 
5620     target->c_iflag =
5621         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5622     target->c_oflag =
5623         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5624     target->c_cflag =
5625         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5626     target->c_lflag =
5627         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5628     target->c_line = host->c_line;
5629 
5630     memset(target->c_cc, 0, sizeof(target->c_cc));
5631     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5632     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5633     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5634     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5635     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5636     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5637     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5638     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5639     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5640     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5641     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5642     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5643     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5644     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5645     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5646     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5647     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5648 }
5649 
5650 static const StructEntry struct_termios_def = {
5651     .convert = { host_to_target_termios, target_to_host_termios },
5652     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5653     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5654 };
5655 
5656 static bitmask_transtbl mmap_flags_tbl[] = {
5657     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5658     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5659     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5660     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5661       MAP_ANONYMOUS, MAP_ANONYMOUS },
5662     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5663       MAP_GROWSDOWN, MAP_GROWSDOWN },
5664     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5665       MAP_DENYWRITE, MAP_DENYWRITE },
5666     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5667       MAP_EXECUTABLE, MAP_EXECUTABLE },
5668     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5669     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5670       MAP_NORESERVE, MAP_NORESERVE },
5671     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5672     /* MAP_STACK had been ignored by the kernel for quite some time.
5673        Recognize it for the target insofar as we do not want to pass
5674        it through to the host.  */
5675     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5676     { 0, 0, 0, 0 }
5677 };
5678 
5679 /*
5680  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5681  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5682  */
5683 #if defined(TARGET_I386)
5684 
5685 /* NOTE: there is really one LDT for all the threads */
5686 static uint8_t *ldt_table;
5687 
5688 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5689 {
5690     int size;
5691     void *p;
5692 
5693     if (!ldt_table)
5694         return 0;
5695     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5696     if (size > bytecount)
5697         size = bytecount;
5698     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5699     if (!p)
5700         return -TARGET_EFAULT;
5701     /* ??? Should this by byteswapped?  */
5702     memcpy(p, ldt_table, size);
5703     unlock_user(p, ptr, size);
5704     return size;
5705 }
5706 
5707 /* XXX: add locking support */
5708 static abi_long write_ldt(CPUX86State *env,
5709                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5710 {
5711     struct target_modify_ldt_ldt_s ldt_info;
5712     struct target_modify_ldt_ldt_s *target_ldt_info;
5713     int seg_32bit, contents, read_exec_only, limit_in_pages;
5714     int seg_not_present, useable, lm;
5715     uint32_t *lp, entry_1, entry_2;
5716 
5717     if (bytecount != sizeof(ldt_info))
5718         return -TARGET_EINVAL;
5719     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5720         return -TARGET_EFAULT;
5721     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5722     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5723     ldt_info.limit = tswap32(target_ldt_info->limit);
5724     ldt_info.flags = tswap32(target_ldt_info->flags);
5725     unlock_user_struct(target_ldt_info, ptr, 0);
5726 
5727     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5728         return -TARGET_EINVAL;
5729     seg_32bit = ldt_info.flags & 1;
5730     contents = (ldt_info.flags >> 1) & 3;
5731     read_exec_only = (ldt_info.flags >> 3) & 1;
5732     limit_in_pages = (ldt_info.flags >> 4) & 1;
5733     seg_not_present = (ldt_info.flags >> 5) & 1;
5734     useable = (ldt_info.flags >> 6) & 1;
5735 #ifdef TARGET_ABI32
5736     lm = 0;
5737 #else
5738     lm = (ldt_info.flags >> 7) & 1;
5739 #endif
5740     if (contents == 3) {
5741         if (oldmode)
5742             return -TARGET_EINVAL;
5743         if (seg_not_present == 0)
5744             return -TARGET_EINVAL;
5745     }
5746     /* allocate the LDT */
5747     if (!ldt_table) {
5748         env->ldt.base = target_mmap(0,
5749                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5750                                     PROT_READ|PROT_WRITE,
5751                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5752         if (env->ldt.base == -1)
5753             return -TARGET_ENOMEM;
5754         memset(g2h(env->ldt.base), 0,
5755                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5756         env->ldt.limit = 0xffff;
5757         ldt_table = g2h(env->ldt.base);
5758     }
5759 
5760     /* NOTE: same code as Linux kernel */
5761     /* Allow LDTs to be cleared by the user. */
5762     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5763         if (oldmode ||
5764             (contents == 0		&&
5765              read_exec_only == 1	&&
5766              seg_32bit == 0		&&
5767              limit_in_pages == 0	&&
5768              seg_not_present == 1	&&
5769              useable == 0 )) {
5770             entry_1 = 0;
5771             entry_2 = 0;
5772             goto install;
5773         }
5774     }
5775 
5776     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5777         (ldt_info.limit & 0x0ffff);
5778     entry_2 = (ldt_info.base_addr & 0xff000000) |
5779         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5780         (ldt_info.limit & 0xf0000) |
5781         ((read_exec_only ^ 1) << 9) |
5782         (contents << 10) |
5783         ((seg_not_present ^ 1) << 15) |
5784         (seg_32bit << 22) |
5785         (limit_in_pages << 23) |
5786         (lm << 21) |
5787         0x7000;
5788     if (!oldmode)
5789         entry_2 |= (useable << 20);
5790 
5791     /* Install the new entry ...  */
5792 install:
5793     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5794     lp[0] = tswap32(entry_1);
5795     lp[1] = tswap32(entry_2);
5796     return 0;
5797 }
5798 
5799 /* specific and weird i386 syscalls */
5800 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5801                               unsigned long bytecount)
5802 {
5803     abi_long ret;
5804 
5805     switch (func) {
5806     case 0:
5807         ret = read_ldt(ptr, bytecount);
5808         break;
5809     case 1:
5810         ret = write_ldt(env, ptr, bytecount, 1);
5811         break;
5812     case 0x11:
5813         ret = write_ldt(env, ptr, bytecount, 0);
5814         break;
5815     default:
5816         ret = -TARGET_ENOSYS;
5817         break;
5818     }
5819     return ret;
5820 }
5821 
5822 #if defined(TARGET_ABI32)
5823 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5824 {
5825     uint64_t *gdt_table = g2h(env->gdt.base);
5826     struct target_modify_ldt_ldt_s ldt_info;
5827     struct target_modify_ldt_ldt_s *target_ldt_info;
5828     int seg_32bit, contents, read_exec_only, limit_in_pages;
5829     int seg_not_present, useable, lm;
5830     uint32_t *lp, entry_1, entry_2;
5831     int i;
5832 
5833     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5834     if (!target_ldt_info)
5835         return -TARGET_EFAULT;
5836     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5837     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5838     ldt_info.limit = tswap32(target_ldt_info->limit);
5839     ldt_info.flags = tswap32(target_ldt_info->flags);
5840     if (ldt_info.entry_number == -1) {
5841         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5842             if (gdt_table[i] == 0) {
5843                 ldt_info.entry_number = i;
5844                 target_ldt_info->entry_number = tswap32(i);
5845                 break;
5846             }
5847         }
5848     }
5849     unlock_user_struct(target_ldt_info, ptr, 1);
5850 
5851     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5852         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5853            return -TARGET_EINVAL;
5854     seg_32bit = ldt_info.flags & 1;
5855     contents = (ldt_info.flags >> 1) & 3;
5856     read_exec_only = (ldt_info.flags >> 3) & 1;
5857     limit_in_pages = (ldt_info.flags >> 4) & 1;
5858     seg_not_present = (ldt_info.flags >> 5) & 1;
5859     useable = (ldt_info.flags >> 6) & 1;
5860 #ifdef TARGET_ABI32
5861     lm = 0;
5862 #else
5863     lm = (ldt_info.flags >> 7) & 1;
5864 #endif
5865 
5866     if (contents == 3) {
5867         if (seg_not_present == 0)
5868             return -TARGET_EINVAL;
5869     }
5870 
5871     /* NOTE: same code as Linux kernel */
5872     /* Allow LDTs to be cleared by the user. */
5873     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5874         if ((contents == 0             &&
5875              read_exec_only == 1       &&
5876              seg_32bit == 0            &&
5877              limit_in_pages == 0       &&
5878              seg_not_present == 1      &&
5879              useable == 0 )) {
5880             entry_1 = 0;
5881             entry_2 = 0;
5882             goto install;
5883         }
5884     }
5885 
5886     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5887         (ldt_info.limit & 0x0ffff);
5888     entry_2 = (ldt_info.base_addr & 0xff000000) |
5889         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5890         (ldt_info.limit & 0xf0000) |
5891         ((read_exec_only ^ 1) << 9) |
5892         (contents << 10) |
5893         ((seg_not_present ^ 1) << 15) |
5894         (seg_32bit << 22) |
5895         (limit_in_pages << 23) |
5896         (useable << 20) |
5897         (lm << 21) |
5898         0x7000;
5899 
5900     /* Install the new entry ...  */
5901 install:
5902     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5903     lp[0] = tswap32(entry_1);
5904     lp[1] = tswap32(entry_2);
5905     return 0;
5906 }
5907 
5908 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5909 {
5910     struct target_modify_ldt_ldt_s *target_ldt_info;
5911     uint64_t *gdt_table = g2h(env->gdt.base);
5912     uint32_t base_addr, limit, flags;
5913     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5914     int seg_not_present, useable, lm;
5915     uint32_t *lp, entry_1, entry_2;
5916 
5917     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5918     if (!target_ldt_info)
5919         return -TARGET_EFAULT;
5920     idx = tswap32(target_ldt_info->entry_number);
5921     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5922         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5923         unlock_user_struct(target_ldt_info, ptr, 1);
5924         return -TARGET_EINVAL;
5925     }
5926     lp = (uint32_t *)(gdt_table + idx);
5927     entry_1 = tswap32(lp[0]);
5928     entry_2 = tswap32(lp[1]);
5929 
5930     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5931     contents = (entry_2 >> 10) & 3;
5932     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5933     seg_32bit = (entry_2 >> 22) & 1;
5934     limit_in_pages = (entry_2 >> 23) & 1;
5935     useable = (entry_2 >> 20) & 1;
5936 #ifdef TARGET_ABI32
5937     lm = 0;
5938 #else
5939     lm = (entry_2 >> 21) & 1;
5940 #endif
5941     flags = (seg_32bit << 0) | (contents << 1) |
5942         (read_exec_only << 3) | (limit_in_pages << 4) |
5943         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5944     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5945     base_addr = (entry_1 >> 16) |
5946         (entry_2 & 0xff000000) |
5947         ((entry_2 & 0xff) << 16);
5948     target_ldt_info->base_addr = tswapal(base_addr);
5949     target_ldt_info->limit = tswap32(limit);
5950     target_ldt_info->flags = tswap32(flags);
5951     unlock_user_struct(target_ldt_info, ptr, 1);
5952     return 0;
5953 }
5954 
5955 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5956 {
5957     return -TARGET_ENOSYS;
5958 }
5959 #else
5960 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5961 {
5962     abi_long ret = 0;
5963     abi_ulong val;
5964     int idx;
5965 
5966     switch(code) {
5967     case TARGET_ARCH_SET_GS:
5968     case TARGET_ARCH_SET_FS:
5969         if (code == TARGET_ARCH_SET_GS)
5970             idx = R_GS;
5971         else
5972             idx = R_FS;
5973         cpu_x86_load_seg(env, idx, 0);
5974         env->segs[idx].base = addr;
5975         break;
5976     case TARGET_ARCH_GET_GS:
5977     case TARGET_ARCH_GET_FS:
5978         if (code == TARGET_ARCH_GET_GS)
5979             idx = R_GS;
5980         else
5981             idx = R_FS;
5982         val = env->segs[idx].base;
5983         if (put_user(val, addr, abi_ulong))
5984             ret = -TARGET_EFAULT;
5985         break;
5986     default:
5987         ret = -TARGET_EINVAL;
5988         break;
5989     }
5990     return ret;
5991 }
5992 #endif /* defined(TARGET_ABI32 */
5993 
5994 #endif /* defined(TARGET_I386) */
5995 
5996 #define NEW_STACK_SIZE 0x40000
5997 
5998 
5999 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6000 typedef struct {
6001     CPUArchState *env;
6002     pthread_mutex_t mutex;
6003     pthread_cond_t cond;
6004     pthread_t thread;
6005     uint32_t tid;
6006     abi_ulong child_tidptr;
6007     abi_ulong parent_tidptr;
6008     sigset_t sigmask;
6009 } new_thread_info;
6010 
6011 static void *clone_func(void *arg)
6012 {
6013     new_thread_info *info = arg;
6014     CPUArchState *env;
6015     CPUState *cpu;
6016     TaskState *ts;
6017 
6018     rcu_register_thread();
6019     tcg_register_thread();
6020     env = info->env;
6021     cpu = env_cpu(env);
6022     thread_cpu = cpu;
6023     ts = (TaskState *)cpu->opaque;
6024     info->tid = sys_gettid();
6025     task_settid(ts);
6026     if (info->child_tidptr)
6027         put_user_u32(info->tid, info->child_tidptr);
6028     if (info->parent_tidptr)
6029         put_user_u32(info->tid, info->parent_tidptr);
6030     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6031     /* Enable signals.  */
6032     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6033     /* Signal to the parent that we're ready.  */
6034     pthread_mutex_lock(&info->mutex);
6035     pthread_cond_broadcast(&info->cond);
6036     pthread_mutex_unlock(&info->mutex);
6037     /* Wait until the parent has finished initializing the tls state.  */
6038     pthread_mutex_lock(&clone_lock);
6039     pthread_mutex_unlock(&clone_lock);
6040     cpu_loop(env);
6041     /* never exits */
6042     return NULL;
6043 }
6044 
6045 /* do_fork() Must return host values and target errnos (unlike most
6046    do_*() functions). */
6047 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6048                    abi_ulong parent_tidptr, target_ulong newtls,
6049                    abi_ulong child_tidptr)
6050 {
6051     CPUState *cpu = env_cpu(env);
6052     int ret;
6053     TaskState *ts;
6054     CPUState *new_cpu;
6055     CPUArchState *new_env;
6056     sigset_t sigmask;
6057 
6058     flags &= ~CLONE_IGNORED_FLAGS;
6059 
6060     /* Emulate vfork() with fork() */
6061     if (flags & CLONE_VFORK)
6062         flags &= ~(CLONE_VFORK | CLONE_VM);
6063 
6064     if (flags & CLONE_VM) {
6065         TaskState *parent_ts = (TaskState *)cpu->opaque;
6066         new_thread_info info;
6067         pthread_attr_t attr;
6068 
6069         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6070             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6071             return -TARGET_EINVAL;
6072         }
6073 
6074         ts = g_new0(TaskState, 1);
6075         init_task_state(ts);
6076 
6077         /* Grab a mutex so that thread setup appears atomic.  */
6078         pthread_mutex_lock(&clone_lock);
6079 
6080         /* we create a new CPU instance. */
6081         new_env = cpu_copy(env);
6082         /* Init regs that differ from the parent.  */
6083         cpu_clone_regs_child(new_env, newsp, flags);
6084         cpu_clone_regs_parent(env, flags);
6085         new_cpu = env_cpu(new_env);
6086         new_cpu->opaque = ts;
6087         ts->bprm = parent_ts->bprm;
6088         ts->info = parent_ts->info;
6089         ts->signal_mask = parent_ts->signal_mask;
6090 
6091         if (flags & CLONE_CHILD_CLEARTID) {
6092             ts->child_tidptr = child_tidptr;
6093         }
6094 
6095         if (flags & CLONE_SETTLS) {
6096             cpu_set_tls (new_env, newtls);
6097         }
6098 
6099         memset(&info, 0, sizeof(info));
6100         pthread_mutex_init(&info.mutex, NULL);
6101         pthread_mutex_lock(&info.mutex);
6102         pthread_cond_init(&info.cond, NULL);
6103         info.env = new_env;
6104         if (flags & CLONE_CHILD_SETTID) {
6105             info.child_tidptr = child_tidptr;
6106         }
6107         if (flags & CLONE_PARENT_SETTID) {
6108             info.parent_tidptr = parent_tidptr;
6109         }
6110 
6111         ret = pthread_attr_init(&attr);
6112         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6113         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6114         /* It is not safe to deliver signals until the child has finished
6115            initializing, so temporarily block all signals.  */
6116         sigfillset(&sigmask);
6117         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6118         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6119 
6120         /* If this is our first additional thread, we need to ensure we
6121          * generate code for parallel execution and flush old translations.
6122          */
6123         if (!parallel_cpus) {
6124             parallel_cpus = true;
6125             tb_flush(cpu);
6126         }
6127 
6128         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6129         /* TODO: Free new CPU state if thread creation failed.  */
6130 
6131         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6132         pthread_attr_destroy(&attr);
6133         if (ret == 0) {
6134             /* Wait for the child to initialize.  */
6135             pthread_cond_wait(&info.cond, &info.mutex);
6136             ret = info.tid;
6137         } else {
6138             ret = -1;
6139         }
6140         pthread_mutex_unlock(&info.mutex);
6141         pthread_cond_destroy(&info.cond);
6142         pthread_mutex_destroy(&info.mutex);
6143         pthread_mutex_unlock(&clone_lock);
6144     } else {
6145         /* if no CLONE_VM, we consider it is a fork */
6146         if (flags & CLONE_INVALID_FORK_FLAGS) {
6147             return -TARGET_EINVAL;
6148         }
6149 
6150         /* We can't support custom termination signals */
6151         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6152             return -TARGET_EINVAL;
6153         }
6154 
6155         if (block_signals()) {
6156             return -TARGET_ERESTARTSYS;
6157         }
6158 
6159         fork_start();
6160         ret = fork();
6161         if (ret == 0) {
6162             /* Child Process.  */
6163             cpu_clone_regs_child(env, newsp, flags);
6164             fork_end(1);
6165             /* There is a race condition here.  The parent process could
6166                theoretically read the TID in the child process before the child
6167                tid is set.  This would require using either ptrace
6168                (not implemented) or having *_tidptr to point at a shared memory
6169                mapping.  We can't repeat the spinlock hack used above because
6170                the child process gets its own copy of the lock.  */
6171             if (flags & CLONE_CHILD_SETTID)
6172                 put_user_u32(sys_gettid(), child_tidptr);
6173             if (flags & CLONE_PARENT_SETTID)
6174                 put_user_u32(sys_gettid(), parent_tidptr);
6175             ts = (TaskState *)cpu->opaque;
6176             if (flags & CLONE_SETTLS)
6177                 cpu_set_tls (env, newtls);
6178             if (flags & CLONE_CHILD_CLEARTID)
6179                 ts->child_tidptr = child_tidptr;
6180         } else {
6181             cpu_clone_regs_parent(env, flags);
6182             fork_end(0);
6183         }
6184     }
6185     return ret;
6186 }
6187 
6188 /* warning : doesn't handle linux specific flags... */
6189 static int target_to_host_fcntl_cmd(int cmd)
6190 {
6191     int ret;
6192 
6193     switch(cmd) {
6194     case TARGET_F_DUPFD:
6195     case TARGET_F_GETFD:
6196     case TARGET_F_SETFD:
6197     case TARGET_F_GETFL:
6198     case TARGET_F_SETFL:
6199     case TARGET_F_OFD_GETLK:
6200     case TARGET_F_OFD_SETLK:
6201     case TARGET_F_OFD_SETLKW:
6202         ret = cmd;
6203         break;
6204     case TARGET_F_GETLK:
6205         ret = F_GETLK64;
6206         break;
6207     case TARGET_F_SETLK:
6208         ret = F_SETLK64;
6209         break;
6210     case TARGET_F_SETLKW:
6211         ret = F_SETLKW64;
6212         break;
6213     case TARGET_F_GETOWN:
6214         ret = F_GETOWN;
6215         break;
6216     case TARGET_F_SETOWN:
6217         ret = F_SETOWN;
6218         break;
6219     case TARGET_F_GETSIG:
6220         ret = F_GETSIG;
6221         break;
6222     case TARGET_F_SETSIG:
6223         ret = F_SETSIG;
6224         break;
6225 #if TARGET_ABI_BITS == 32
6226     case TARGET_F_GETLK64:
6227         ret = F_GETLK64;
6228         break;
6229     case TARGET_F_SETLK64:
6230         ret = F_SETLK64;
6231         break;
6232     case TARGET_F_SETLKW64:
6233         ret = F_SETLKW64;
6234         break;
6235 #endif
6236     case TARGET_F_SETLEASE:
6237         ret = F_SETLEASE;
6238         break;
6239     case TARGET_F_GETLEASE:
6240         ret = F_GETLEASE;
6241         break;
6242 #ifdef F_DUPFD_CLOEXEC
6243     case TARGET_F_DUPFD_CLOEXEC:
6244         ret = F_DUPFD_CLOEXEC;
6245         break;
6246 #endif
6247     case TARGET_F_NOTIFY:
6248         ret = F_NOTIFY;
6249         break;
6250 #ifdef F_GETOWN_EX
6251     case TARGET_F_GETOWN_EX:
6252         ret = F_GETOWN_EX;
6253         break;
6254 #endif
6255 #ifdef F_SETOWN_EX
6256     case TARGET_F_SETOWN_EX:
6257         ret = F_SETOWN_EX;
6258         break;
6259 #endif
6260 #ifdef F_SETPIPE_SZ
6261     case TARGET_F_SETPIPE_SZ:
6262         ret = F_SETPIPE_SZ;
6263         break;
6264     case TARGET_F_GETPIPE_SZ:
6265         ret = F_GETPIPE_SZ;
6266         break;
6267 #endif
6268     default:
6269         ret = -TARGET_EINVAL;
6270         break;
6271     }
6272 
6273 #if defined(__powerpc64__)
6274     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6275      * is not supported by kernel. The glibc fcntl call actually adjusts
6276      * them to 5, 6 and 7 before making the syscall(). Since we make the
6277      * syscall directly, adjust to what is supported by the kernel.
6278      */
6279     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6280         ret -= F_GETLK64 - 5;
6281     }
6282 #endif
6283 
6284     return ret;
6285 }
6286 
6287 #define FLOCK_TRANSTBL \
6288     switch (type) { \
6289     TRANSTBL_CONVERT(F_RDLCK); \
6290     TRANSTBL_CONVERT(F_WRLCK); \
6291     TRANSTBL_CONVERT(F_UNLCK); \
6292     TRANSTBL_CONVERT(F_EXLCK); \
6293     TRANSTBL_CONVERT(F_SHLCK); \
6294     }
6295 
6296 static int target_to_host_flock(int type)
6297 {
6298 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6299     FLOCK_TRANSTBL
6300 #undef  TRANSTBL_CONVERT
6301     return -TARGET_EINVAL;
6302 }
6303 
6304 static int host_to_target_flock(int type)
6305 {
6306 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6307     FLOCK_TRANSTBL
6308 #undef  TRANSTBL_CONVERT
6309     /* if we don't know how to convert the value coming
6310      * from the host we copy to the target field as-is
6311      */
6312     return type;
6313 }
6314 
6315 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6316                                             abi_ulong target_flock_addr)
6317 {
6318     struct target_flock *target_fl;
6319     int l_type;
6320 
6321     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6322         return -TARGET_EFAULT;
6323     }
6324 
6325     __get_user(l_type, &target_fl->l_type);
6326     l_type = target_to_host_flock(l_type);
6327     if (l_type < 0) {
6328         return l_type;
6329     }
6330     fl->l_type = l_type;
6331     __get_user(fl->l_whence, &target_fl->l_whence);
6332     __get_user(fl->l_start, &target_fl->l_start);
6333     __get_user(fl->l_len, &target_fl->l_len);
6334     __get_user(fl->l_pid, &target_fl->l_pid);
6335     unlock_user_struct(target_fl, target_flock_addr, 0);
6336     return 0;
6337 }
6338 
6339 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6340                                           const struct flock64 *fl)
6341 {
6342     struct target_flock *target_fl;
6343     short l_type;
6344 
6345     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6346         return -TARGET_EFAULT;
6347     }
6348 
6349     l_type = host_to_target_flock(fl->l_type);
6350     __put_user(l_type, &target_fl->l_type);
6351     __put_user(fl->l_whence, &target_fl->l_whence);
6352     __put_user(fl->l_start, &target_fl->l_start);
6353     __put_user(fl->l_len, &target_fl->l_len);
6354     __put_user(fl->l_pid, &target_fl->l_pid);
6355     unlock_user_struct(target_fl, target_flock_addr, 1);
6356     return 0;
6357 }
6358 
6359 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6360 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6361 
6362 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6363 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6364                                                    abi_ulong target_flock_addr)
6365 {
6366     struct target_oabi_flock64 *target_fl;
6367     int l_type;
6368 
6369     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6370         return -TARGET_EFAULT;
6371     }
6372 
6373     __get_user(l_type, &target_fl->l_type);
6374     l_type = target_to_host_flock(l_type);
6375     if (l_type < 0) {
6376         return l_type;
6377     }
6378     fl->l_type = l_type;
6379     __get_user(fl->l_whence, &target_fl->l_whence);
6380     __get_user(fl->l_start, &target_fl->l_start);
6381     __get_user(fl->l_len, &target_fl->l_len);
6382     __get_user(fl->l_pid, &target_fl->l_pid);
6383     unlock_user_struct(target_fl, target_flock_addr, 0);
6384     return 0;
6385 }
6386 
6387 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6388                                                  const struct flock64 *fl)
6389 {
6390     struct target_oabi_flock64 *target_fl;
6391     short l_type;
6392 
6393     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6394         return -TARGET_EFAULT;
6395     }
6396 
6397     l_type = host_to_target_flock(fl->l_type);
6398     __put_user(l_type, &target_fl->l_type);
6399     __put_user(fl->l_whence, &target_fl->l_whence);
6400     __put_user(fl->l_start, &target_fl->l_start);
6401     __put_user(fl->l_len, &target_fl->l_len);
6402     __put_user(fl->l_pid, &target_fl->l_pid);
6403     unlock_user_struct(target_fl, target_flock_addr, 1);
6404     return 0;
6405 }
6406 #endif
6407 
6408 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6409                                               abi_ulong target_flock_addr)
6410 {
6411     struct target_flock64 *target_fl;
6412     int l_type;
6413 
6414     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6415         return -TARGET_EFAULT;
6416     }
6417 
6418     __get_user(l_type, &target_fl->l_type);
6419     l_type = target_to_host_flock(l_type);
6420     if (l_type < 0) {
6421         return l_type;
6422     }
6423     fl->l_type = l_type;
6424     __get_user(fl->l_whence, &target_fl->l_whence);
6425     __get_user(fl->l_start, &target_fl->l_start);
6426     __get_user(fl->l_len, &target_fl->l_len);
6427     __get_user(fl->l_pid, &target_fl->l_pid);
6428     unlock_user_struct(target_fl, target_flock_addr, 0);
6429     return 0;
6430 }
6431 
6432 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6433                                             const struct flock64 *fl)
6434 {
6435     struct target_flock64 *target_fl;
6436     short l_type;
6437 
6438     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6439         return -TARGET_EFAULT;
6440     }
6441 
6442     l_type = host_to_target_flock(fl->l_type);
6443     __put_user(l_type, &target_fl->l_type);
6444     __put_user(fl->l_whence, &target_fl->l_whence);
6445     __put_user(fl->l_start, &target_fl->l_start);
6446     __put_user(fl->l_len, &target_fl->l_len);
6447     __put_user(fl->l_pid, &target_fl->l_pid);
6448     unlock_user_struct(target_fl, target_flock_addr, 1);
6449     return 0;
6450 }
6451 
6452 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6453 {
6454     struct flock64 fl64;
6455 #ifdef F_GETOWN_EX
6456     struct f_owner_ex fox;
6457     struct target_f_owner_ex *target_fox;
6458 #endif
6459     abi_long ret;
6460     int host_cmd = target_to_host_fcntl_cmd(cmd);
6461 
6462     if (host_cmd == -TARGET_EINVAL)
6463 	    return host_cmd;
6464 
6465     switch(cmd) {
6466     case TARGET_F_GETLK:
6467         ret = copy_from_user_flock(&fl64, arg);
6468         if (ret) {
6469             return ret;
6470         }
6471         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6472         if (ret == 0) {
6473             ret = copy_to_user_flock(arg, &fl64);
6474         }
6475         break;
6476 
6477     case TARGET_F_SETLK:
6478     case TARGET_F_SETLKW:
6479         ret = copy_from_user_flock(&fl64, arg);
6480         if (ret) {
6481             return ret;
6482         }
6483         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6484         break;
6485 
6486     case TARGET_F_GETLK64:
6487     case TARGET_F_OFD_GETLK:
6488         ret = copy_from_user_flock64(&fl64, arg);
6489         if (ret) {
6490             return ret;
6491         }
6492         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6493         if (ret == 0) {
6494             ret = copy_to_user_flock64(arg, &fl64);
6495         }
6496         break;
6497     case TARGET_F_SETLK64:
6498     case TARGET_F_SETLKW64:
6499     case TARGET_F_OFD_SETLK:
6500     case TARGET_F_OFD_SETLKW:
6501         ret = copy_from_user_flock64(&fl64, arg);
6502         if (ret) {
6503             return ret;
6504         }
6505         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6506         break;
6507 
6508     case TARGET_F_GETFL:
6509         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6510         if (ret >= 0) {
6511             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6512         }
6513         break;
6514 
6515     case TARGET_F_SETFL:
6516         ret = get_errno(safe_fcntl(fd, host_cmd,
6517                                    target_to_host_bitmask(arg,
6518                                                           fcntl_flags_tbl)));
6519         break;
6520 
6521 #ifdef F_GETOWN_EX
6522     case TARGET_F_GETOWN_EX:
6523         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6524         if (ret >= 0) {
6525             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6526                 return -TARGET_EFAULT;
6527             target_fox->type = tswap32(fox.type);
6528             target_fox->pid = tswap32(fox.pid);
6529             unlock_user_struct(target_fox, arg, 1);
6530         }
6531         break;
6532 #endif
6533 
6534 #ifdef F_SETOWN_EX
6535     case TARGET_F_SETOWN_EX:
6536         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6537             return -TARGET_EFAULT;
6538         fox.type = tswap32(target_fox->type);
6539         fox.pid = tswap32(target_fox->pid);
6540         unlock_user_struct(target_fox, arg, 0);
6541         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6542         break;
6543 #endif
6544 
6545     case TARGET_F_SETOWN:
6546     case TARGET_F_GETOWN:
6547     case TARGET_F_SETSIG:
6548     case TARGET_F_GETSIG:
6549     case TARGET_F_SETLEASE:
6550     case TARGET_F_GETLEASE:
6551     case TARGET_F_SETPIPE_SZ:
6552     case TARGET_F_GETPIPE_SZ:
6553         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6554         break;
6555 
6556     default:
6557         ret = get_errno(safe_fcntl(fd, cmd, arg));
6558         break;
6559     }
6560     return ret;
6561 }
6562 
6563 #ifdef USE_UID16
6564 
6565 static inline int high2lowuid(int uid)
6566 {
6567     if (uid > 65535)
6568         return 65534;
6569     else
6570         return uid;
6571 }
6572 
6573 static inline int high2lowgid(int gid)
6574 {
6575     if (gid > 65535)
6576         return 65534;
6577     else
6578         return gid;
6579 }
6580 
6581 static inline int low2highuid(int uid)
6582 {
6583     if ((int16_t)uid == -1)
6584         return -1;
6585     else
6586         return uid;
6587 }
6588 
6589 static inline int low2highgid(int gid)
6590 {
6591     if ((int16_t)gid == -1)
6592         return -1;
6593     else
6594         return gid;
6595 }
6596 static inline int tswapid(int id)
6597 {
6598     return tswap16(id);
6599 }
6600 
6601 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6602 
6603 #else /* !USE_UID16 */
6604 static inline int high2lowuid(int uid)
6605 {
6606     return uid;
6607 }
6608 static inline int high2lowgid(int gid)
6609 {
6610     return gid;
6611 }
6612 static inline int low2highuid(int uid)
6613 {
6614     return uid;
6615 }
6616 static inline int low2highgid(int gid)
6617 {
6618     return gid;
6619 }
6620 static inline int tswapid(int id)
6621 {
6622     return tswap32(id);
6623 }
6624 
6625 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6626 
6627 #endif /* USE_UID16 */
6628 
6629 /* We must do direct syscalls for setting UID/GID, because we want to
6630  * implement the Linux system call semantics of "change only for this thread",
6631  * not the libc/POSIX semantics of "change for all threads in process".
6632  * (See http://ewontfix.com/17/ for more details.)
6633  * We use the 32-bit version of the syscalls if present; if it is not
6634  * then either the host architecture supports 32-bit UIDs natively with
6635  * the standard syscall, or the 16-bit UID is the best we can do.
6636  */
6637 #ifdef __NR_setuid32
6638 #define __NR_sys_setuid __NR_setuid32
6639 #else
6640 #define __NR_sys_setuid __NR_setuid
6641 #endif
6642 #ifdef __NR_setgid32
6643 #define __NR_sys_setgid __NR_setgid32
6644 #else
6645 #define __NR_sys_setgid __NR_setgid
6646 #endif
6647 #ifdef __NR_setresuid32
6648 #define __NR_sys_setresuid __NR_setresuid32
6649 #else
6650 #define __NR_sys_setresuid __NR_setresuid
6651 #endif
6652 #ifdef __NR_setresgid32
6653 #define __NR_sys_setresgid __NR_setresgid32
6654 #else
6655 #define __NR_sys_setresgid __NR_setresgid
6656 #endif
6657 
6658 _syscall1(int, sys_setuid, uid_t, uid)
6659 _syscall1(int, sys_setgid, gid_t, gid)
6660 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6661 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6662 
6663 void syscall_init(void)
6664 {
6665     IOCTLEntry *ie;
6666     const argtype *arg_type;
6667     int size;
6668     int i;
6669 
6670     thunk_init(STRUCT_MAX);
6671 
6672 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6673 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6674 #include "syscall_types.h"
6675 #undef STRUCT
6676 #undef STRUCT_SPECIAL
6677 
6678     /* Build target_to_host_errno_table[] table from
6679      * host_to_target_errno_table[]. */
6680     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6681         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6682     }
6683 
6684     /* we patch the ioctl size if necessary. We rely on the fact that
6685        no ioctl has all the bits at '1' in the size field */
6686     ie = ioctl_entries;
6687     while (ie->target_cmd != 0) {
6688         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6689             TARGET_IOC_SIZEMASK) {
6690             arg_type = ie->arg_type;
6691             if (arg_type[0] != TYPE_PTR) {
6692                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6693                         ie->target_cmd);
6694                 exit(1);
6695             }
6696             arg_type++;
6697             size = thunk_type_size(arg_type, 0);
6698             ie->target_cmd = (ie->target_cmd &
6699                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6700                 (size << TARGET_IOC_SIZESHIFT);
6701         }
6702 
6703         /* automatic consistency check if same arch */
6704 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6705     (defined(__x86_64__) && defined(TARGET_X86_64))
6706         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6707             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6708                     ie->name, ie->target_cmd, ie->host_cmd);
6709         }
6710 #endif
6711         ie++;
6712     }
6713 }
6714 
6715 #if TARGET_ABI_BITS == 32
6716 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6717 {
6718 #ifdef TARGET_WORDS_BIGENDIAN
6719     return ((uint64_t)word0 << 32) | word1;
6720 #else
6721     return ((uint64_t)word1 << 32) | word0;
6722 #endif
6723 }
6724 #else /* TARGET_ABI_BITS == 32 */
6725 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6726 {
6727     return word0;
6728 }
6729 #endif /* TARGET_ABI_BITS != 32 */
6730 
6731 #ifdef TARGET_NR_truncate64
6732 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6733                                          abi_long arg2,
6734                                          abi_long arg3,
6735                                          abi_long arg4)
6736 {
6737     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6738         arg2 = arg3;
6739         arg3 = arg4;
6740     }
6741     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6742 }
6743 #endif
6744 
6745 #ifdef TARGET_NR_ftruncate64
6746 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6747                                           abi_long arg2,
6748                                           abi_long arg3,
6749                                           abi_long arg4)
6750 {
6751     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6752         arg2 = arg3;
6753         arg3 = arg4;
6754     }
6755     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6756 }
6757 #endif
6758 
6759 #if defined(TARGET_NR_timer_settime) || \
6760     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6761 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6762                                                  abi_ulong target_addr)
6763 {
6764     struct target_itimerspec *target_itspec;
6765 
6766     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6767         return -TARGET_EFAULT;
6768     }
6769 
6770     host_itspec->it_interval.tv_sec =
6771                             tswapal(target_itspec->it_interval.tv_sec);
6772     host_itspec->it_interval.tv_nsec =
6773                             tswapal(target_itspec->it_interval.tv_nsec);
6774     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6775     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6776 
6777     unlock_user_struct(target_itspec, target_addr, 1);
6778     return 0;
6779 }
6780 #endif
6781 
6782 #if ((defined(TARGET_NR_timerfd_gettime) || \
6783       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6784     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6785 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6786                                                struct itimerspec *host_its)
6787 {
6788     struct target_itimerspec *target_itspec;
6789 
6790     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6791         return -TARGET_EFAULT;
6792     }
6793 
6794     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6795     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6796 
6797     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6798     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6799 
6800     unlock_user_struct(target_itspec, target_addr, 0);
6801     return 0;
6802 }
6803 #endif
6804 
6805 #if defined(TARGET_NR_adjtimex) || \
6806     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6807 static inline abi_long target_to_host_timex(struct timex *host_tx,
6808                                             abi_long target_addr)
6809 {
6810     struct target_timex *target_tx;
6811 
6812     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6813         return -TARGET_EFAULT;
6814     }
6815 
6816     __get_user(host_tx->modes, &target_tx->modes);
6817     __get_user(host_tx->offset, &target_tx->offset);
6818     __get_user(host_tx->freq, &target_tx->freq);
6819     __get_user(host_tx->maxerror, &target_tx->maxerror);
6820     __get_user(host_tx->esterror, &target_tx->esterror);
6821     __get_user(host_tx->status, &target_tx->status);
6822     __get_user(host_tx->constant, &target_tx->constant);
6823     __get_user(host_tx->precision, &target_tx->precision);
6824     __get_user(host_tx->tolerance, &target_tx->tolerance);
6825     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6826     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6827     __get_user(host_tx->tick, &target_tx->tick);
6828     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6829     __get_user(host_tx->jitter, &target_tx->jitter);
6830     __get_user(host_tx->shift, &target_tx->shift);
6831     __get_user(host_tx->stabil, &target_tx->stabil);
6832     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6833     __get_user(host_tx->calcnt, &target_tx->calcnt);
6834     __get_user(host_tx->errcnt, &target_tx->errcnt);
6835     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6836     __get_user(host_tx->tai, &target_tx->tai);
6837 
6838     unlock_user_struct(target_tx, target_addr, 0);
6839     return 0;
6840 }
6841 
6842 static inline abi_long host_to_target_timex(abi_long target_addr,
6843                                             struct timex *host_tx)
6844 {
6845     struct target_timex *target_tx;
6846 
6847     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6848         return -TARGET_EFAULT;
6849     }
6850 
6851     __put_user(host_tx->modes, &target_tx->modes);
6852     __put_user(host_tx->offset, &target_tx->offset);
6853     __put_user(host_tx->freq, &target_tx->freq);
6854     __put_user(host_tx->maxerror, &target_tx->maxerror);
6855     __put_user(host_tx->esterror, &target_tx->esterror);
6856     __put_user(host_tx->status, &target_tx->status);
6857     __put_user(host_tx->constant, &target_tx->constant);
6858     __put_user(host_tx->precision, &target_tx->precision);
6859     __put_user(host_tx->tolerance, &target_tx->tolerance);
6860     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6861     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6862     __put_user(host_tx->tick, &target_tx->tick);
6863     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6864     __put_user(host_tx->jitter, &target_tx->jitter);
6865     __put_user(host_tx->shift, &target_tx->shift);
6866     __put_user(host_tx->stabil, &target_tx->stabil);
6867     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6868     __put_user(host_tx->calcnt, &target_tx->calcnt);
6869     __put_user(host_tx->errcnt, &target_tx->errcnt);
6870     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6871     __put_user(host_tx->tai, &target_tx->tai);
6872 
6873     unlock_user_struct(target_tx, target_addr, 1);
6874     return 0;
6875 }
6876 #endif
6877 
6878 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6879                                                abi_ulong target_addr)
6880 {
6881     struct target_sigevent *target_sevp;
6882 
6883     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6884         return -TARGET_EFAULT;
6885     }
6886 
6887     /* This union is awkward on 64 bit systems because it has a 32 bit
6888      * integer and a pointer in it; we follow the conversion approach
6889      * used for handling sigval types in signal.c so the guest should get
6890      * the correct value back even if we did a 64 bit byteswap and it's
6891      * using the 32 bit integer.
6892      */
6893     host_sevp->sigev_value.sival_ptr =
6894         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6895     host_sevp->sigev_signo =
6896         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6897     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6898     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6899 
6900     unlock_user_struct(target_sevp, target_addr, 1);
6901     return 0;
6902 }
6903 
6904 #if defined(TARGET_NR_mlockall)
6905 static inline int target_to_host_mlockall_arg(int arg)
6906 {
6907     int result = 0;
6908 
6909     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6910         result |= MCL_CURRENT;
6911     }
6912     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6913         result |= MCL_FUTURE;
6914     }
6915     return result;
6916 }
6917 #endif
6918 
6919 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6920      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6921      defined(TARGET_NR_newfstatat))
6922 static inline abi_long host_to_target_stat64(void *cpu_env,
6923                                              abi_ulong target_addr,
6924                                              struct stat *host_st)
6925 {
6926 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6927     if (((CPUARMState *)cpu_env)->eabi) {
6928         struct target_eabi_stat64 *target_st;
6929 
6930         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6931             return -TARGET_EFAULT;
6932         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6933         __put_user(host_st->st_dev, &target_st->st_dev);
6934         __put_user(host_st->st_ino, &target_st->st_ino);
6935 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6936         __put_user(host_st->st_ino, &target_st->__st_ino);
6937 #endif
6938         __put_user(host_st->st_mode, &target_st->st_mode);
6939         __put_user(host_st->st_nlink, &target_st->st_nlink);
6940         __put_user(host_st->st_uid, &target_st->st_uid);
6941         __put_user(host_st->st_gid, &target_st->st_gid);
6942         __put_user(host_st->st_rdev, &target_st->st_rdev);
6943         __put_user(host_st->st_size, &target_st->st_size);
6944         __put_user(host_st->st_blksize, &target_st->st_blksize);
6945         __put_user(host_st->st_blocks, &target_st->st_blocks);
6946         __put_user(host_st->st_atime, &target_st->target_st_atime);
6947         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6948         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6949 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6950         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6951         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6952         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6953 #endif
6954         unlock_user_struct(target_st, target_addr, 1);
6955     } else
6956 #endif
6957     {
6958 #if defined(TARGET_HAS_STRUCT_STAT64)
6959         struct target_stat64 *target_st;
6960 #else
6961         struct target_stat *target_st;
6962 #endif
6963 
6964         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6965             return -TARGET_EFAULT;
6966         memset(target_st, 0, sizeof(*target_st));
6967         __put_user(host_st->st_dev, &target_st->st_dev);
6968         __put_user(host_st->st_ino, &target_st->st_ino);
6969 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6970         __put_user(host_st->st_ino, &target_st->__st_ino);
6971 #endif
6972         __put_user(host_st->st_mode, &target_st->st_mode);
6973         __put_user(host_st->st_nlink, &target_st->st_nlink);
6974         __put_user(host_st->st_uid, &target_st->st_uid);
6975         __put_user(host_st->st_gid, &target_st->st_gid);
6976         __put_user(host_st->st_rdev, &target_st->st_rdev);
6977         /* XXX: better use of kernel struct */
6978         __put_user(host_st->st_size, &target_st->st_size);
6979         __put_user(host_st->st_blksize, &target_st->st_blksize);
6980         __put_user(host_st->st_blocks, &target_st->st_blocks);
6981         __put_user(host_st->st_atime, &target_st->target_st_atime);
6982         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6983         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6984 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6985         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6986         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6987         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6988 #endif
6989         unlock_user_struct(target_st, target_addr, 1);
6990     }
6991 
6992     return 0;
6993 }
6994 #endif
6995 
6996 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6997 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6998                                             abi_ulong target_addr)
6999 {
7000     struct target_statx *target_stx;
7001 
7002     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7003         return -TARGET_EFAULT;
7004     }
7005     memset(target_stx, 0, sizeof(*target_stx));
7006 
7007     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7008     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7009     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7010     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7011     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7012     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7013     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7014     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7015     __put_user(host_stx->stx_size, &target_stx->stx_size);
7016     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7017     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7018     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7019     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7020     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7021     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7022     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7023     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7024     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7025     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7026     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7027     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7028     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7029     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7030 
7031     unlock_user_struct(target_stx, target_addr, 1);
7032 
7033     return 0;
7034 }
7035 #endif
7036 
7037 static int do_sys_futex(int *uaddr, int op, int val,
7038                          const struct timespec *timeout, int *uaddr2,
7039                          int val3)
7040 {
7041 #if HOST_LONG_BITS == 64
7042 #if defined(__NR_futex)
7043     /* always a 64-bit time_t, it doesn't define _time64 version  */
7044     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7045 
7046 #endif
7047 #else /* HOST_LONG_BITS == 64 */
7048 #if defined(__NR_futex_time64)
7049     if (sizeof(timeout->tv_sec) == 8) {
7050         /* _time64 function on 32bit arch */
7051         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7052     }
7053 #endif
7054 #if defined(__NR_futex)
7055     /* old function on 32bit arch */
7056     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7057 #endif
7058 #endif /* HOST_LONG_BITS == 64 */
7059     g_assert_not_reached();
7060 }
7061 
7062 static int do_safe_futex(int *uaddr, int op, int val,
7063                          const struct timespec *timeout, int *uaddr2,
7064                          int val3)
7065 {
7066 #if HOST_LONG_BITS == 64
7067 #if defined(__NR_futex)
7068     /* always a 64-bit time_t, it doesn't define _time64 version  */
7069     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7070 #endif
7071 #else /* HOST_LONG_BITS == 64 */
7072 #if defined(__NR_futex_time64)
7073     if (sizeof(timeout->tv_sec) == 8) {
7074         /* _time64 function on 32bit arch */
7075         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7076                                            val3));
7077     }
7078 #endif
7079 #if defined(__NR_futex)
7080     /* old function on 32bit arch */
7081     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7082 #endif
7083 #endif /* HOST_LONG_BITS == 64 */
7084     return -TARGET_ENOSYS;
7085 }
7086 
7087 /* ??? Using host futex calls even when target atomic operations
7088    are not really atomic probably breaks things.  However implementing
7089    futexes locally would make futexes shared between multiple processes
7090    tricky.  However they're probably useless because guest atomic
7091    operations won't work either.  */
7092 #if defined(TARGET_NR_futex)
7093 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7094                     target_ulong uaddr2, int val3)
7095 {
7096     struct timespec ts, *pts;
7097     int base_op;
7098 
7099     /* ??? We assume FUTEX_* constants are the same on both host
7100        and target.  */
7101 #ifdef FUTEX_CMD_MASK
7102     base_op = op & FUTEX_CMD_MASK;
7103 #else
7104     base_op = op;
7105 #endif
7106     switch (base_op) {
7107     case FUTEX_WAIT:
7108     case FUTEX_WAIT_BITSET:
7109         if (timeout) {
7110             pts = &ts;
7111             target_to_host_timespec(pts, timeout);
7112         } else {
7113             pts = NULL;
7114         }
7115         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7116     case FUTEX_WAKE:
7117         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7118     case FUTEX_FD:
7119         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7120     case FUTEX_REQUEUE:
7121     case FUTEX_CMP_REQUEUE:
7122     case FUTEX_WAKE_OP:
7123         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7124            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7125            But the prototype takes a `struct timespec *'; insert casts
7126            to satisfy the compiler.  We do not need to tswap TIMEOUT
7127            since it's not compared to guest memory.  */
7128         pts = (struct timespec *)(uintptr_t) timeout;
7129         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7130                              (base_op == FUTEX_CMP_REQUEUE
7131                                       ? tswap32(val3)
7132                                       : val3));
7133     default:
7134         return -TARGET_ENOSYS;
7135     }
7136 }
7137 #endif
7138 
7139 #if defined(TARGET_NR_futex_time64)
7140 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7141                            target_ulong uaddr2, int val3)
7142 {
7143     struct timespec ts, *pts;
7144     int base_op;
7145 
7146     /* ??? We assume FUTEX_* constants are the same on both host
7147        and target.  */
7148 #ifdef FUTEX_CMD_MASK
7149     base_op = op & FUTEX_CMD_MASK;
7150 #else
7151     base_op = op;
7152 #endif
7153     switch (base_op) {
7154     case FUTEX_WAIT:
7155     case FUTEX_WAIT_BITSET:
7156         if (timeout) {
7157             pts = &ts;
7158             target_to_host_timespec64(pts, timeout);
7159         } else {
7160             pts = NULL;
7161         }
7162         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7163     case FUTEX_WAKE:
7164         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7165     case FUTEX_FD:
7166         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7167     case FUTEX_REQUEUE:
7168     case FUTEX_CMP_REQUEUE:
7169     case FUTEX_WAKE_OP:
7170         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7171            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7172            But the prototype takes a `struct timespec *'; insert casts
7173            to satisfy the compiler.  We do not need to tswap TIMEOUT
7174            since it's not compared to guest memory.  */
7175         pts = (struct timespec *)(uintptr_t) timeout;
7176         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7177                              (base_op == FUTEX_CMP_REQUEUE
7178                                       ? tswap32(val3)
7179                                       : val3));
7180     default:
7181         return -TARGET_ENOSYS;
7182     }
7183 }
7184 #endif
7185 
7186 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7187 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7188                                      abi_long handle, abi_long mount_id,
7189                                      abi_long flags)
7190 {
7191     struct file_handle *target_fh;
7192     struct file_handle *fh;
7193     int mid = 0;
7194     abi_long ret;
7195     char *name;
7196     unsigned int size, total_size;
7197 
7198     if (get_user_s32(size, handle)) {
7199         return -TARGET_EFAULT;
7200     }
7201 
7202     name = lock_user_string(pathname);
7203     if (!name) {
7204         return -TARGET_EFAULT;
7205     }
7206 
7207     total_size = sizeof(struct file_handle) + size;
7208     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7209     if (!target_fh) {
7210         unlock_user(name, pathname, 0);
7211         return -TARGET_EFAULT;
7212     }
7213 
7214     fh = g_malloc0(total_size);
7215     fh->handle_bytes = size;
7216 
7217     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7218     unlock_user(name, pathname, 0);
7219 
7220     /* man name_to_handle_at(2):
7221      * Other than the use of the handle_bytes field, the caller should treat
7222      * the file_handle structure as an opaque data type
7223      */
7224 
7225     memcpy(target_fh, fh, total_size);
7226     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7227     target_fh->handle_type = tswap32(fh->handle_type);
7228     g_free(fh);
7229     unlock_user(target_fh, handle, total_size);
7230 
7231     if (put_user_s32(mid, mount_id)) {
7232         return -TARGET_EFAULT;
7233     }
7234 
7235     return ret;
7236 
7237 }
7238 #endif
7239 
7240 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7241 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7242                                      abi_long flags)
7243 {
7244     struct file_handle *target_fh;
7245     struct file_handle *fh;
7246     unsigned int size, total_size;
7247     abi_long ret;
7248 
7249     if (get_user_s32(size, handle)) {
7250         return -TARGET_EFAULT;
7251     }
7252 
7253     total_size = sizeof(struct file_handle) + size;
7254     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7255     if (!target_fh) {
7256         return -TARGET_EFAULT;
7257     }
7258 
7259     fh = g_memdup(target_fh, total_size);
7260     fh->handle_bytes = size;
7261     fh->handle_type = tswap32(target_fh->handle_type);
7262 
7263     ret = get_errno(open_by_handle_at(mount_fd, fh,
7264                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7265 
7266     g_free(fh);
7267 
7268     unlock_user(target_fh, handle, total_size);
7269 
7270     return ret;
7271 }
7272 #endif
7273 
7274 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7275 
7276 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7277 {
7278     int host_flags;
7279     target_sigset_t *target_mask;
7280     sigset_t host_mask;
7281     abi_long ret;
7282 
7283     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7284         return -TARGET_EINVAL;
7285     }
7286     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7287         return -TARGET_EFAULT;
7288     }
7289 
7290     target_to_host_sigset(&host_mask, target_mask);
7291 
7292     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7293 
7294     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7295     if (ret >= 0) {
7296         fd_trans_register(ret, &target_signalfd_trans);
7297     }
7298 
7299     unlock_user_struct(target_mask, mask, 0);
7300 
7301     return ret;
7302 }
7303 #endif
7304 
7305 /* Map host to target signal numbers for the wait family of syscalls.
7306    Assume all other status bits are the same.  */
7307 int host_to_target_waitstatus(int status)
7308 {
7309     if (WIFSIGNALED(status)) {
7310         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7311     }
7312     if (WIFSTOPPED(status)) {
7313         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7314                | (status & 0xff);
7315     }
7316     return status;
7317 }
7318 
7319 static int open_self_cmdline(void *cpu_env, int fd)
7320 {
7321     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7322     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7323     int i;
7324 
7325     for (i = 0; i < bprm->argc; i++) {
7326         size_t len = strlen(bprm->argv[i]) + 1;
7327 
7328         if (write(fd, bprm->argv[i], len) != len) {
7329             return -1;
7330         }
7331     }
7332 
7333     return 0;
7334 }
7335 
7336 static int open_self_maps(void *cpu_env, int fd)
7337 {
7338     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7339     TaskState *ts = cpu->opaque;
7340     GSList *map_info = read_self_maps();
7341     GSList *s;
7342     int count;
7343 
7344     for (s = map_info; s; s = g_slist_next(s)) {
7345         MapInfo *e = (MapInfo *) s->data;
7346 
7347         if (h2g_valid(e->start)) {
7348             unsigned long min = e->start;
7349             unsigned long max = e->end;
7350             int flags = page_get_flags(h2g(min));
7351             const char *path;
7352 
7353             max = h2g_valid(max - 1) ?
7354                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7355 
7356             if (page_check_range(h2g(min), max - min, flags) == -1) {
7357                 continue;
7358             }
7359 
7360             if (h2g(min) == ts->info->stack_limit) {
7361                 path = "[stack]";
7362             } else {
7363                 path = e->path;
7364             }
7365 
7366             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7367                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7368                             h2g(min), h2g(max - 1) + 1,
7369                             e->is_read ? 'r' : '-',
7370                             e->is_write ? 'w' : '-',
7371                             e->is_exec ? 'x' : '-',
7372                             e->is_priv ? 'p' : '-',
7373                             (uint64_t) e->offset, e->dev, e->inode);
7374             if (path) {
7375                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7376             } else {
7377                 dprintf(fd, "\n");
7378             }
7379         }
7380     }
7381 
7382     free_self_maps(map_info);
7383 
7384 #ifdef TARGET_VSYSCALL_PAGE
7385     /*
7386      * We only support execution from the vsyscall page.
7387      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7388      */
7389     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7390                     " --xp 00000000 00:00 0",
7391                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7392     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7393 #endif
7394 
7395     return 0;
7396 }
7397 
7398 static int open_self_stat(void *cpu_env, int fd)
7399 {
7400     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7401     TaskState *ts = cpu->opaque;
7402     g_autoptr(GString) buf = g_string_new(NULL);
7403     int i;
7404 
7405     for (i = 0; i < 44; i++) {
7406         if (i == 0) {
7407             /* pid */
7408             g_string_printf(buf, FMT_pid " ", getpid());
7409         } else if (i == 1) {
7410             /* app name */
7411             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7412             bin = bin ? bin + 1 : ts->bprm->argv[0];
7413             g_string_printf(buf, "(%.15s) ", bin);
7414         } else if (i == 27) {
7415             /* stack bottom */
7416             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7417         } else {
7418             /* for the rest, there is MasterCard */
7419             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7420         }
7421 
7422         if (write(fd, buf->str, buf->len) != buf->len) {
7423             return -1;
7424         }
7425     }
7426 
7427     return 0;
7428 }
7429 
7430 static int open_self_auxv(void *cpu_env, int fd)
7431 {
7432     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7433     TaskState *ts = cpu->opaque;
7434     abi_ulong auxv = ts->info->saved_auxv;
7435     abi_ulong len = ts->info->auxv_len;
7436     char *ptr;
7437 
7438     /*
7439      * Auxiliary vector is stored in target process stack.
7440      * read in whole auxv vector and copy it to file
7441      */
7442     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7443     if (ptr != NULL) {
7444         while (len > 0) {
7445             ssize_t r;
7446             r = write(fd, ptr, len);
7447             if (r <= 0) {
7448                 break;
7449             }
7450             len -= r;
7451             ptr += r;
7452         }
7453         lseek(fd, 0, SEEK_SET);
7454         unlock_user(ptr, auxv, len);
7455     }
7456 
7457     return 0;
7458 }
7459 
7460 static int is_proc_myself(const char *filename, const char *entry)
7461 {
7462     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7463         filename += strlen("/proc/");
7464         if (!strncmp(filename, "self/", strlen("self/"))) {
7465             filename += strlen("self/");
7466         } else if (*filename >= '1' && *filename <= '9') {
7467             char myself[80];
7468             snprintf(myself, sizeof(myself), "%d/", getpid());
7469             if (!strncmp(filename, myself, strlen(myself))) {
7470                 filename += strlen(myself);
7471             } else {
7472                 return 0;
7473             }
7474         } else {
7475             return 0;
7476         }
7477         if (!strcmp(filename, entry)) {
7478             return 1;
7479         }
7480     }
7481     return 0;
7482 }
7483 
7484 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7485     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7486 static int is_proc(const char *filename, const char *entry)
7487 {
7488     return strcmp(filename, entry) == 0;
7489 }
7490 #endif
7491 
7492 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7493 static int open_net_route(void *cpu_env, int fd)
7494 {
7495     FILE *fp;
7496     char *line = NULL;
7497     size_t len = 0;
7498     ssize_t read;
7499 
7500     fp = fopen("/proc/net/route", "r");
7501     if (fp == NULL) {
7502         return -1;
7503     }
7504 
7505     /* read header */
7506 
7507     read = getline(&line, &len, fp);
7508     dprintf(fd, "%s", line);
7509 
7510     /* read routes */
7511 
7512     while ((read = getline(&line, &len, fp)) != -1) {
7513         char iface[16];
7514         uint32_t dest, gw, mask;
7515         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7516         int fields;
7517 
7518         fields = sscanf(line,
7519                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7520                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7521                         &mask, &mtu, &window, &irtt);
7522         if (fields != 11) {
7523             continue;
7524         }
7525         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7526                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7527                 metric, tswap32(mask), mtu, window, irtt);
7528     }
7529 
7530     free(line);
7531     fclose(fp);
7532 
7533     return 0;
7534 }
7535 #endif
7536 
7537 #if defined(TARGET_SPARC)
7538 static int open_cpuinfo(void *cpu_env, int fd)
7539 {
7540     dprintf(fd, "type\t\t: sun4u\n");
7541     return 0;
7542 }
7543 #endif
7544 
7545 #if defined(TARGET_HPPA)
7546 static int open_cpuinfo(void *cpu_env, int fd)
7547 {
7548     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7549     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7550     dprintf(fd, "capabilities\t: os32\n");
7551     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7552     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7553     return 0;
7554 }
7555 #endif
7556 
7557 #if defined(TARGET_M68K)
7558 static int open_hardware(void *cpu_env, int fd)
7559 {
7560     dprintf(fd, "Model:\t\tqemu-m68k\n");
7561     return 0;
7562 }
7563 #endif
7564 
7565 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7566 {
7567     struct fake_open {
7568         const char *filename;
7569         int (*fill)(void *cpu_env, int fd);
7570         int (*cmp)(const char *s1, const char *s2);
7571     };
7572     const struct fake_open *fake_open;
7573     static const struct fake_open fakes[] = {
7574         { "maps", open_self_maps, is_proc_myself },
7575         { "stat", open_self_stat, is_proc_myself },
7576         { "auxv", open_self_auxv, is_proc_myself },
7577         { "cmdline", open_self_cmdline, is_proc_myself },
7578 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7579         { "/proc/net/route", open_net_route, is_proc },
7580 #endif
7581 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7582         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7583 #endif
7584 #if defined(TARGET_M68K)
7585         { "/proc/hardware", open_hardware, is_proc },
7586 #endif
7587         { NULL, NULL, NULL }
7588     };
7589 
7590     if (is_proc_myself(pathname, "exe")) {
7591         int execfd = qemu_getauxval(AT_EXECFD);
7592         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7593     }
7594 
7595     for (fake_open = fakes; fake_open->filename; fake_open++) {
7596         if (fake_open->cmp(pathname, fake_open->filename)) {
7597             break;
7598         }
7599     }
7600 
7601     if (fake_open->filename) {
7602         const char *tmpdir;
7603         char filename[PATH_MAX];
7604         int fd, r;
7605 
7606         /* create temporary file to map stat to */
7607         tmpdir = getenv("TMPDIR");
7608         if (!tmpdir)
7609             tmpdir = "/tmp";
7610         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7611         fd = mkstemp(filename);
7612         if (fd < 0) {
7613             return fd;
7614         }
7615         unlink(filename);
7616 
7617         if ((r = fake_open->fill(cpu_env, fd))) {
7618             int e = errno;
7619             close(fd);
7620             errno = e;
7621             return r;
7622         }
7623         lseek(fd, 0, SEEK_SET);
7624 
7625         return fd;
7626     }
7627 
7628     return safe_openat(dirfd, path(pathname), flags, mode);
7629 }
7630 
7631 #define TIMER_MAGIC 0x0caf0000
7632 #define TIMER_MAGIC_MASK 0xffff0000
7633 
7634 /* Convert QEMU provided timer ID back to internal 16bit index format */
7635 static target_timer_t get_timer_id(abi_long arg)
7636 {
7637     target_timer_t timerid = arg;
7638 
7639     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7640         return -TARGET_EINVAL;
7641     }
7642 
7643     timerid &= 0xffff;
7644 
7645     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7646         return -TARGET_EINVAL;
7647     }
7648 
7649     return timerid;
7650 }
7651 
7652 static int target_to_host_cpu_mask(unsigned long *host_mask,
7653                                    size_t host_size,
7654                                    abi_ulong target_addr,
7655                                    size_t target_size)
7656 {
7657     unsigned target_bits = sizeof(abi_ulong) * 8;
7658     unsigned host_bits = sizeof(*host_mask) * 8;
7659     abi_ulong *target_mask;
7660     unsigned i, j;
7661 
7662     assert(host_size >= target_size);
7663 
7664     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7665     if (!target_mask) {
7666         return -TARGET_EFAULT;
7667     }
7668     memset(host_mask, 0, host_size);
7669 
7670     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7671         unsigned bit = i * target_bits;
7672         abi_ulong val;
7673 
7674         __get_user(val, &target_mask[i]);
7675         for (j = 0; j < target_bits; j++, bit++) {
7676             if (val & (1UL << j)) {
7677                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7678             }
7679         }
7680     }
7681 
7682     unlock_user(target_mask, target_addr, 0);
7683     return 0;
7684 }
7685 
7686 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7687                                    size_t host_size,
7688                                    abi_ulong target_addr,
7689                                    size_t target_size)
7690 {
7691     unsigned target_bits = sizeof(abi_ulong) * 8;
7692     unsigned host_bits = sizeof(*host_mask) * 8;
7693     abi_ulong *target_mask;
7694     unsigned i, j;
7695 
7696     assert(host_size >= target_size);
7697 
7698     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7699     if (!target_mask) {
7700         return -TARGET_EFAULT;
7701     }
7702 
7703     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7704         unsigned bit = i * target_bits;
7705         abi_ulong val = 0;
7706 
7707         for (j = 0; j < target_bits; j++, bit++) {
7708             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7709                 val |= 1UL << j;
7710             }
7711         }
7712         __put_user(val, &target_mask[i]);
7713     }
7714 
7715     unlock_user(target_mask, target_addr, target_size);
7716     return 0;
7717 }
7718 
7719 /* This is an internal helper for do_syscall so that it is easier
7720  * to have a single return point, so that actions, such as logging
7721  * of syscall results, can be performed.
7722  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7723  */
7724 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7725                             abi_long arg2, abi_long arg3, abi_long arg4,
7726                             abi_long arg5, abi_long arg6, abi_long arg7,
7727                             abi_long arg8)
7728 {
7729     CPUState *cpu = env_cpu(cpu_env);
7730     abi_long ret;
7731 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7732     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7733     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7734     || defined(TARGET_NR_statx)
7735     struct stat st;
7736 #endif
7737 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7738     || defined(TARGET_NR_fstatfs)
7739     struct statfs stfs;
7740 #endif
7741     void *p;
7742 
7743     switch(num) {
7744     case TARGET_NR_exit:
7745         /* In old applications this may be used to implement _exit(2).
7746            However in threaded applictions it is used for thread termination,
7747            and _exit_group is used for application termination.
7748            Do thread termination if we have more then one thread.  */
7749 
7750         if (block_signals()) {
7751             return -TARGET_ERESTARTSYS;
7752         }
7753 
7754         pthread_mutex_lock(&clone_lock);
7755 
7756         if (CPU_NEXT(first_cpu)) {
7757             TaskState *ts = cpu->opaque;
7758 
7759             object_property_set_bool(OBJECT(cpu), false, "realized", NULL);
7760             object_unref(OBJECT(cpu));
7761             /*
7762              * At this point the CPU should be unrealized and removed
7763              * from cpu lists. We can clean-up the rest of the thread
7764              * data without the lock held.
7765              */
7766 
7767             pthread_mutex_unlock(&clone_lock);
7768 
7769             if (ts->child_tidptr) {
7770                 put_user_u32(0, ts->child_tidptr);
7771                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7772                           NULL, NULL, 0);
7773             }
7774             thread_cpu = NULL;
7775             g_free(ts);
7776             rcu_unregister_thread();
7777             pthread_exit(NULL);
7778         }
7779 
7780         pthread_mutex_unlock(&clone_lock);
7781         preexit_cleanup(cpu_env, arg1);
7782         _exit(arg1);
7783         return 0; /* avoid warning */
7784     case TARGET_NR_read:
7785         if (arg2 == 0 && arg3 == 0) {
7786             return get_errno(safe_read(arg1, 0, 0));
7787         } else {
7788             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7789                 return -TARGET_EFAULT;
7790             ret = get_errno(safe_read(arg1, p, arg3));
7791             if (ret >= 0 &&
7792                 fd_trans_host_to_target_data(arg1)) {
7793                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7794             }
7795             unlock_user(p, arg2, ret);
7796         }
7797         return ret;
7798     case TARGET_NR_write:
7799         if (arg2 == 0 && arg3 == 0) {
7800             return get_errno(safe_write(arg1, 0, 0));
7801         }
7802         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7803             return -TARGET_EFAULT;
7804         if (fd_trans_target_to_host_data(arg1)) {
7805             void *copy = g_malloc(arg3);
7806             memcpy(copy, p, arg3);
7807             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7808             if (ret >= 0) {
7809                 ret = get_errno(safe_write(arg1, copy, ret));
7810             }
7811             g_free(copy);
7812         } else {
7813             ret = get_errno(safe_write(arg1, p, arg3));
7814         }
7815         unlock_user(p, arg2, 0);
7816         return ret;
7817 
7818 #ifdef TARGET_NR_open
7819     case TARGET_NR_open:
7820         if (!(p = lock_user_string(arg1)))
7821             return -TARGET_EFAULT;
7822         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7823                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7824                                   arg3));
7825         fd_trans_unregister(ret);
7826         unlock_user(p, arg1, 0);
7827         return ret;
7828 #endif
7829     case TARGET_NR_openat:
7830         if (!(p = lock_user_string(arg2)))
7831             return -TARGET_EFAULT;
7832         ret = get_errno(do_openat(cpu_env, arg1, p,
7833                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7834                                   arg4));
7835         fd_trans_unregister(ret);
7836         unlock_user(p, arg2, 0);
7837         return ret;
7838 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7839     case TARGET_NR_name_to_handle_at:
7840         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7841         return ret;
7842 #endif
7843 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7844     case TARGET_NR_open_by_handle_at:
7845         ret = do_open_by_handle_at(arg1, arg2, arg3);
7846         fd_trans_unregister(ret);
7847         return ret;
7848 #endif
7849     case TARGET_NR_close:
7850         fd_trans_unregister(arg1);
7851         return get_errno(close(arg1));
7852 
7853     case TARGET_NR_brk:
7854         return do_brk(arg1);
7855 #ifdef TARGET_NR_fork
7856     case TARGET_NR_fork:
7857         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7858 #endif
7859 #ifdef TARGET_NR_waitpid
7860     case TARGET_NR_waitpid:
7861         {
7862             int status;
7863             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7864             if (!is_error(ret) && arg2 && ret
7865                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7866                 return -TARGET_EFAULT;
7867         }
7868         return ret;
7869 #endif
7870 #ifdef TARGET_NR_waitid
7871     case TARGET_NR_waitid:
7872         {
7873             siginfo_t info;
7874             info.si_pid = 0;
7875             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7876             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7877                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7878                     return -TARGET_EFAULT;
7879                 host_to_target_siginfo(p, &info);
7880                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7881             }
7882         }
7883         return ret;
7884 #endif
7885 #ifdef TARGET_NR_creat /* not on alpha */
7886     case TARGET_NR_creat:
7887         if (!(p = lock_user_string(arg1)))
7888             return -TARGET_EFAULT;
7889         ret = get_errno(creat(p, arg2));
7890         fd_trans_unregister(ret);
7891         unlock_user(p, arg1, 0);
7892         return ret;
7893 #endif
7894 #ifdef TARGET_NR_link
7895     case TARGET_NR_link:
7896         {
7897             void * p2;
7898             p = lock_user_string(arg1);
7899             p2 = lock_user_string(arg2);
7900             if (!p || !p2)
7901                 ret = -TARGET_EFAULT;
7902             else
7903                 ret = get_errno(link(p, p2));
7904             unlock_user(p2, arg2, 0);
7905             unlock_user(p, arg1, 0);
7906         }
7907         return ret;
7908 #endif
7909 #if defined(TARGET_NR_linkat)
7910     case TARGET_NR_linkat:
7911         {
7912             void * p2 = NULL;
7913             if (!arg2 || !arg4)
7914                 return -TARGET_EFAULT;
7915             p  = lock_user_string(arg2);
7916             p2 = lock_user_string(arg4);
7917             if (!p || !p2)
7918                 ret = -TARGET_EFAULT;
7919             else
7920                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7921             unlock_user(p, arg2, 0);
7922             unlock_user(p2, arg4, 0);
7923         }
7924         return ret;
7925 #endif
7926 #ifdef TARGET_NR_unlink
7927     case TARGET_NR_unlink:
7928         if (!(p = lock_user_string(arg1)))
7929             return -TARGET_EFAULT;
7930         ret = get_errno(unlink(p));
7931         unlock_user(p, arg1, 0);
7932         return ret;
7933 #endif
7934 #if defined(TARGET_NR_unlinkat)
7935     case TARGET_NR_unlinkat:
7936         if (!(p = lock_user_string(arg2)))
7937             return -TARGET_EFAULT;
7938         ret = get_errno(unlinkat(arg1, p, arg3));
7939         unlock_user(p, arg2, 0);
7940         return ret;
7941 #endif
7942     case TARGET_NR_execve:
7943         {
7944             char **argp, **envp;
7945             int argc, envc;
7946             abi_ulong gp;
7947             abi_ulong guest_argp;
7948             abi_ulong guest_envp;
7949             abi_ulong addr;
7950             char **q;
7951             int total_size = 0;
7952 
7953             argc = 0;
7954             guest_argp = arg2;
7955             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7956                 if (get_user_ual(addr, gp))
7957                     return -TARGET_EFAULT;
7958                 if (!addr)
7959                     break;
7960                 argc++;
7961             }
7962             envc = 0;
7963             guest_envp = arg3;
7964             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7965                 if (get_user_ual(addr, gp))
7966                     return -TARGET_EFAULT;
7967                 if (!addr)
7968                     break;
7969                 envc++;
7970             }
7971 
7972             argp = g_new0(char *, argc + 1);
7973             envp = g_new0(char *, envc + 1);
7974 
7975             for (gp = guest_argp, q = argp; gp;
7976                   gp += sizeof(abi_ulong), q++) {
7977                 if (get_user_ual(addr, gp))
7978                     goto execve_efault;
7979                 if (!addr)
7980                     break;
7981                 if (!(*q = lock_user_string(addr)))
7982                     goto execve_efault;
7983                 total_size += strlen(*q) + 1;
7984             }
7985             *q = NULL;
7986 
7987             for (gp = guest_envp, q = envp; gp;
7988                   gp += sizeof(abi_ulong), q++) {
7989                 if (get_user_ual(addr, gp))
7990                     goto execve_efault;
7991                 if (!addr)
7992                     break;
7993                 if (!(*q = lock_user_string(addr)))
7994                     goto execve_efault;
7995                 total_size += strlen(*q) + 1;
7996             }
7997             *q = NULL;
7998 
7999             if (!(p = lock_user_string(arg1)))
8000                 goto execve_efault;
8001             /* Although execve() is not an interruptible syscall it is
8002              * a special case where we must use the safe_syscall wrapper:
8003              * if we allow a signal to happen before we make the host
8004              * syscall then we will 'lose' it, because at the point of
8005              * execve the process leaves QEMU's control. So we use the
8006              * safe syscall wrapper to ensure that we either take the
8007              * signal as a guest signal, or else it does not happen
8008              * before the execve completes and makes it the other
8009              * program's problem.
8010              */
8011             ret = get_errno(safe_execve(p, argp, envp));
8012             unlock_user(p, arg1, 0);
8013 
8014             goto execve_end;
8015 
8016         execve_efault:
8017             ret = -TARGET_EFAULT;
8018 
8019         execve_end:
8020             for (gp = guest_argp, q = argp; *q;
8021                   gp += sizeof(abi_ulong), q++) {
8022                 if (get_user_ual(addr, gp)
8023                     || !addr)
8024                     break;
8025                 unlock_user(*q, addr, 0);
8026             }
8027             for (gp = guest_envp, q = envp; *q;
8028                   gp += sizeof(abi_ulong), q++) {
8029                 if (get_user_ual(addr, gp)
8030                     || !addr)
8031                     break;
8032                 unlock_user(*q, addr, 0);
8033             }
8034 
8035             g_free(argp);
8036             g_free(envp);
8037         }
8038         return ret;
8039     case TARGET_NR_chdir:
8040         if (!(p = lock_user_string(arg1)))
8041             return -TARGET_EFAULT;
8042         ret = get_errno(chdir(p));
8043         unlock_user(p, arg1, 0);
8044         return ret;
8045 #ifdef TARGET_NR_time
8046     case TARGET_NR_time:
8047         {
8048             time_t host_time;
8049             ret = get_errno(time(&host_time));
8050             if (!is_error(ret)
8051                 && arg1
8052                 && put_user_sal(host_time, arg1))
8053                 return -TARGET_EFAULT;
8054         }
8055         return ret;
8056 #endif
8057 #ifdef TARGET_NR_mknod
8058     case TARGET_NR_mknod:
8059         if (!(p = lock_user_string(arg1)))
8060             return -TARGET_EFAULT;
8061         ret = get_errno(mknod(p, arg2, arg3));
8062         unlock_user(p, arg1, 0);
8063         return ret;
8064 #endif
8065 #if defined(TARGET_NR_mknodat)
8066     case TARGET_NR_mknodat:
8067         if (!(p = lock_user_string(arg2)))
8068             return -TARGET_EFAULT;
8069         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8070         unlock_user(p, arg2, 0);
8071         return ret;
8072 #endif
8073 #ifdef TARGET_NR_chmod
8074     case TARGET_NR_chmod:
8075         if (!(p = lock_user_string(arg1)))
8076             return -TARGET_EFAULT;
8077         ret = get_errno(chmod(p, arg2));
8078         unlock_user(p, arg1, 0);
8079         return ret;
8080 #endif
8081 #ifdef TARGET_NR_lseek
8082     case TARGET_NR_lseek:
8083         return get_errno(lseek(arg1, arg2, arg3));
8084 #endif
8085 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8086     /* Alpha specific */
8087     case TARGET_NR_getxpid:
8088         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8089         return get_errno(getpid());
8090 #endif
8091 #ifdef TARGET_NR_getpid
8092     case TARGET_NR_getpid:
8093         return get_errno(getpid());
8094 #endif
8095     case TARGET_NR_mount:
8096         {
8097             /* need to look at the data field */
8098             void *p2, *p3;
8099 
8100             if (arg1) {
8101                 p = lock_user_string(arg1);
8102                 if (!p) {
8103                     return -TARGET_EFAULT;
8104                 }
8105             } else {
8106                 p = NULL;
8107             }
8108 
8109             p2 = lock_user_string(arg2);
8110             if (!p2) {
8111                 if (arg1) {
8112                     unlock_user(p, arg1, 0);
8113                 }
8114                 return -TARGET_EFAULT;
8115             }
8116 
8117             if (arg3) {
8118                 p3 = lock_user_string(arg3);
8119                 if (!p3) {
8120                     if (arg1) {
8121                         unlock_user(p, arg1, 0);
8122                     }
8123                     unlock_user(p2, arg2, 0);
8124                     return -TARGET_EFAULT;
8125                 }
8126             } else {
8127                 p3 = NULL;
8128             }
8129 
8130             /* FIXME - arg5 should be locked, but it isn't clear how to
8131              * do that since it's not guaranteed to be a NULL-terminated
8132              * string.
8133              */
8134             if (!arg5) {
8135                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8136             } else {
8137                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8138             }
8139             ret = get_errno(ret);
8140 
8141             if (arg1) {
8142                 unlock_user(p, arg1, 0);
8143             }
8144             unlock_user(p2, arg2, 0);
8145             if (arg3) {
8146                 unlock_user(p3, arg3, 0);
8147             }
8148         }
8149         return ret;
8150 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8151 #if defined(TARGET_NR_umount)
8152     case TARGET_NR_umount:
8153 #endif
8154 #if defined(TARGET_NR_oldumount)
8155     case TARGET_NR_oldumount:
8156 #endif
8157         if (!(p = lock_user_string(arg1)))
8158             return -TARGET_EFAULT;
8159         ret = get_errno(umount(p));
8160         unlock_user(p, arg1, 0);
8161         return ret;
8162 #endif
8163 #ifdef TARGET_NR_stime /* not on alpha */
8164     case TARGET_NR_stime:
8165         {
8166             struct timespec ts;
8167             ts.tv_nsec = 0;
8168             if (get_user_sal(ts.tv_sec, arg1)) {
8169                 return -TARGET_EFAULT;
8170             }
8171             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8172         }
8173 #endif
8174 #ifdef TARGET_NR_alarm /* not on alpha */
8175     case TARGET_NR_alarm:
8176         return alarm(arg1);
8177 #endif
8178 #ifdef TARGET_NR_pause /* not on alpha */
8179     case TARGET_NR_pause:
8180         if (!block_signals()) {
8181             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8182         }
8183         return -TARGET_EINTR;
8184 #endif
8185 #ifdef TARGET_NR_utime
8186     case TARGET_NR_utime:
8187         {
8188             struct utimbuf tbuf, *host_tbuf;
8189             struct target_utimbuf *target_tbuf;
8190             if (arg2) {
8191                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8192                     return -TARGET_EFAULT;
8193                 tbuf.actime = tswapal(target_tbuf->actime);
8194                 tbuf.modtime = tswapal(target_tbuf->modtime);
8195                 unlock_user_struct(target_tbuf, arg2, 0);
8196                 host_tbuf = &tbuf;
8197             } else {
8198                 host_tbuf = NULL;
8199             }
8200             if (!(p = lock_user_string(arg1)))
8201                 return -TARGET_EFAULT;
8202             ret = get_errno(utime(p, host_tbuf));
8203             unlock_user(p, arg1, 0);
8204         }
8205         return ret;
8206 #endif
8207 #ifdef TARGET_NR_utimes
8208     case TARGET_NR_utimes:
8209         {
8210             struct timeval *tvp, tv[2];
8211             if (arg2) {
8212                 if (copy_from_user_timeval(&tv[0], arg2)
8213                     || copy_from_user_timeval(&tv[1],
8214                                               arg2 + sizeof(struct target_timeval)))
8215                     return -TARGET_EFAULT;
8216                 tvp = tv;
8217             } else {
8218                 tvp = NULL;
8219             }
8220             if (!(p = lock_user_string(arg1)))
8221                 return -TARGET_EFAULT;
8222             ret = get_errno(utimes(p, tvp));
8223             unlock_user(p, arg1, 0);
8224         }
8225         return ret;
8226 #endif
8227 #if defined(TARGET_NR_futimesat)
8228     case TARGET_NR_futimesat:
8229         {
8230             struct timeval *tvp, tv[2];
8231             if (arg3) {
8232                 if (copy_from_user_timeval(&tv[0], arg3)
8233                     || copy_from_user_timeval(&tv[1],
8234                                               arg3 + sizeof(struct target_timeval)))
8235                     return -TARGET_EFAULT;
8236                 tvp = tv;
8237             } else {
8238                 tvp = NULL;
8239             }
8240             if (!(p = lock_user_string(arg2))) {
8241                 return -TARGET_EFAULT;
8242             }
8243             ret = get_errno(futimesat(arg1, path(p), tvp));
8244             unlock_user(p, arg2, 0);
8245         }
8246         return ret;
8247 #endif
8248 #ifdef TARGET_NR_access
8249     case TARGET_NR_access:
8250         if (!(p = lock_user_string(arg1))) {
8251             return -TARGET_EFAULT;
8252         }
8253         ret = get_errno(access(path(p), arg2));
8254         unlock_user(p, arg1, 0);
8255         return ret;
8256 #endif
8257 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8258     case TARGET_NR_faccessat:
8259         if (!(p = lock_user_string(arg2))) {
8260             return -TARGET_EFAULT;
8261         }
8262         ret = get_errno(faccessat(arg1, p, arg3, 0));
8263         unlock_user(p, arg2, 0);
8264         return ret;
8265 #endif
8266 #ifdef TARGET_NR_nice /* not on alpha */
8267     case TARGET_NR_nice:
8268         return get_errno(nice(arg1));
8269 #endif
8270     case TARGET_NR_sync:
8271         sync();
8272         return 0;
8273 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8274     case TARGET_NR_syncfs:
8275         return get_errno(syncfs(arg1));
8276 #endif
8277     case TARGET_NR_kill:
8278         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8279 #ifdef TARGET_NR_rename
8280     case TARGET_NR_rename:
8281         {
8282             void *p2;
8283             p = lock_user_string(arg1);
8284             p2 = lock_user_string(arg2);
8285             if (!p || !p2)
8286                 ret = -TARGET_EFAULT;
8287             else
8288                 ret = get_errno(rename(p, p2));
8289             unlock_user(p2, arg2, 0);
8290             unlock_user(p, arg1, 0);
8291         }
8292         return ret;
8293 #endif
8294 #if defined(TARGET_NR_renameat)
8295     case TARGET_NR_renameat:
8296         {
8297             void *p2;
8298             p  = lock_user_string(arg2);
8299             p2 = lock_user_string(arg4);
8300             if (!p || !p2)
8301                 ret = -TARGET_EFAULT;
8302             else
8303                 ret = get_errno(renameat(arg1, p, arg3, p2));
8304             unlock_user(p2, arg4, 0);
8305             unlock_user(p, arg2, 0);
8306         }
8307         return ret;
8308 #endif
8309 #if defined(TARGET_NR_renameat2)
8310     case TARGET_NR_renameat2:
8311         {
8312             void *p2;
8313             p  = lock_user_string(arg2);
8314             p2 = lock_user_string(arg4);
8315             if (!p || !p2) {
8316                 ret = -TARGET_EFAULT;
8317             } else {
8318                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8319             }
8320             unlock_user(p2, arg4, 0);
8321             unlock_user(p, arg2, 0);
8322         }
8323         return ret;
8324 #endif
8325 #ifdef TARGET_NR_mkdir
8326     case TARGET_NR_mkdir:
8327         if (!(p = lock_user_string(arg1)))
8328             return -TARGET_EFAULT;
8329         ret = get_errno(mkdir(p, arg2));
8330         unlock_user(p, arg1, 0);
8331         return ret;
8332 #endif
8333 #if defined(TARGET_NR_mkdirat)
8334     case TARGET_NR_mkdirat:
8335         if (!(p = lock_user_string(arg2)))
8336             return -TARGET_EFAULT;
8337         ret = get_errno(mkdirat(arg1, p, arg3));
8338         unlock_user(p, arg2, 0);
8339         return ret;
8340 #endif
8341 #ifdef TARGET_NR_rmdir
8342     case TARGET_NR_rmdir:
8343         if (!(p = lock_user_string(arg1)))
8344             return -TARGET_EFAULT;
8345         ret = get_errno(rmdir(p));
8346         unlock_user(p, arg1, 0);
8347         return ret;
8348 #endif
8349     case TARGET_NR_dup:
8350         ret = get_errno(dup(arg1));
8351         if (ret >= 0) {
8352             fd_trans_dup(arg1, ret);
8353         }
8354         return ret;
8355 #ifdef TARGET_NR_pipe
8356     case TARGET_NR_pipe:
8357         return do_pipe(cpu_env, arg1, 0, 0);
8358 #endif
8359 #ifdef TARGET_NR_pipe2
8360     case TARGET_NR_pipe2:
8361         return do_pipe(cpu_env, arg1,
8362                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8363 #endif
8364     case TARGET_NR_times:
8365         {
8366             struct target_tms *tmsp;
8367             struct tms tms;
8368             ret = get_errno(times(&tms));
8369             if (arg1) {
8370                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8371                 if (!tmsp)
8372                     return -TARGET_EFAULT;
8373                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8374                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8375                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8376                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8377             }
8378             if (!is_error(ret))
8379                 ret = host_to_target_clock_t(ret);
8380         }
8381         return ret;
8382     case TARGET_NR_acct:
8383         if (arg1 == 0) {
8384             ret = get_errno(acct(NULL));
8385         } else {
8386             if (!(p = lock_user_string(arg1))) {
8387                 return -TARGET_EFAULT;
8388             }
8389             ret = get_errno(acct(path(p)));
8390             unlock_user(p, arg1, 0);
8391         }
8392         return ret;
8393 #ifdef TARGET_NR_umount2
8394     case TARGET_NR_umount2:
8395         if (!(p = lock_user_string(arg1)))
8396             return -TARGET_EFAULT;
8397         ret = get_errno(umount2(p, arg2));
8398         unlock_user(p, arg1, 0);
8399         return ret;
8400 #endif
8401     case TARGET_NR_ioctl:
8402         return do_ioctl(arg1, arg2, arg3);
8403 #ifdef TARGET_NR_fcntl
8404     case TARGET_NR_fcntl:
8405         return do_fcntl(arg1, arg2, arg3);
8406 #endif
8407     case TARGET_NR_setpgid:
8408         return get_errno(setpgid(arg1, arg2));
8409     case TARGET_NR_umask:
8410         return get_errno(umask(arg1));
8411     case TARGET_NR_chroot:
8412         if (!(p = lock_user_string(arg1)))
8413             return -TARGET_EFAULT;
8414         ret = get_errno(chroot(p));
8415         unlock_user(p, arg1, 0);
8416         return ret;
8417 #ifdef TARGET_NR_dup2
8418     case TARGET_NR_dup2:
8419         ret = get_errno(dup2(arg1, arg2));
8420         if (ret >= 0) {
8421             fd_trans_dup(arg1, arg2);
8422         }
8423         return ret;
8424 #endif
8425 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8426     case TARGET_NR_dup3:
8427     {
8428         int host_flags;
8429 
8430         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8431             return -EINVAL;
8432         }
8433         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8434         ret = get_errno(dup3(arg1, arg2, host_flags));
8435         if (ret >= 0) {
8436             fd_trans_dup(arg1, arg2);
8437         }
8438         return ret;
8439     }
8440 #endif
8441 #ifdef TARGET_NR_getppid /* not on alpha */
8442     case TARGET_NR_getppid:
8443         return get_errno(getppid());
8444 #endif
8445 #ifdef TARGET_NR_getpgrp
8446     case TARGET_NR_getpgrp:
8447         return get_errno(getpgrp());
8448 #endif
8449     case TARGET_NR_setsid:
8450         return get_errno(setsid());
8451 #ifdef TARGET_NR_sigaction
8452     case TARGET_NR_sigaction:
8453         {
8454 #if defined(TARGET_ALPHA)
8455             struct target_sigaction act, oact, *pact = 0;
8456             struct target_old_sigaction *old_act;
8457             if (arg2) {
8458                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8459                     return -TARGET_EFAULT;
8460                 act._sa_handler = old_act->_sa_handler;
8461                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8462                 act.sa_flags = old_act->sa_flags;
8463                 act.sa_restorer = 0;
8464                 unlock_user_struct(old_act, arg2, 0);
8465                 pact = &act;
8466             }
8467             ret = get_errno(do_sigaction(arg1, pact, &oact));
8468             if (!is_error(ret) && arg3) {
8469                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8470                     return -TARGET_EFAULT;
8471                 old_act->_sa_handler = oact._sa_handler;
8472                 old_act->sa_mask = oact.sa_mask.sig[0];
8473                 old_act->sa_flags = oact.sa_flags;
8474                 unlock_user_struct(old_act, arg3, 1);
8475             }
8476 #elif defined(TARGET_MIPS)
8477 	    struct target_sigaction act, oact, *pact, *old_act;
8478 
8479 	    if (arg2) {
8480                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8481                     return -TARGET_EFAULT;
8482 		act._sa_handler = old_act->_sa_handler;
8483 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8484 		act.sa_flags = old_act->sa_flags;
8485 		unlock_user_struct(old_act, arg2, 0);
8486 		pact = &act;
8487 	    } else {
8488 		pact = NULL;
8489 	    }
8490 
8491 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8492 
8493 	    if (!is_error(ret) && arg3) {
8494                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8495                     return -TARGET_EFAULT;
8496 		old_act->_sa_handler = oact._sa_handler;
8497 		old_act->sa_flags = oact.sa_flags;
8498 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8499 		old_act->sa_mask.sig[1] = 0;
8500 		old_act->sa_mask.sig[2] = 0;
8501 		old_act->sa_mask.sig[3] = 0;
8502 		unlock_user_struct(old_act, arg3, 1);
8503 	    }
8504 #else
8505             struct target_old_sigaction *old_act;
8506             struct target_sigaction act, oact, *pact;
8507             if (arg2) {
8508                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8509                     return -TARGET_EFAULT;
8510                 act._sa_handler = old_act->_sa_handler;
8511                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8512                 act.sa_flags = old_act->sa_flags;
8513                 act.sa_restorer = old_act->sa_restorer;
8514 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8515                 act.ka_restorer = 0;
8516 #endif
8517                 unlock_user_struct(old_act, arg2, 0);
8518                 pact = &act;
8519             } else {
8520                 pact = NULL;
8521             }
8522             ret = get_errno(do_sigaction(arg1, pact, &oact));
8523             if (!is_error(ret) && arg3) {
8524                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8525                     return -TARGET_EFAULT;
8526                 old_act->_sa_handler = oact._sa_handler;
8527                 old_act->sa_mask = oact.sa_mask.sig[0];
8528                 old_act->sa_flags = oact.sa_flags;
8529                 old_act->sa_restorer = oact.sa_restorer;
8530                 unlock_user_struct(old_act, arg3, 1);
8531             }
8532 #endif
8533         }
8534         return ret;
8535 #endif
8536     case TARGET_NR_rt_sigaction:
8537         {
8538 #if defined(TARGET_ALPHA)
8539             /* For Alpha and SPARC this is a 5 argument syscall, with
8540              * a 'restorer' parameter which must be copied into the
8541              * sa_restorer field of the sigaction struct.
8542              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8543              * and arg5 is the sigsetsize.
8544              * Alpha also has a separate rt_sigaction struct that it uses
8545              * here; SPARC uses the usual sigaction struct.
8546              */
8547             struct target_rt_sigaction *rt_act;
8548             struct target_sigaction act, oact, *pact = 0;
8549 
8550             if (arg4 != sizeof(target_sigset_t)) {
8551                 return -TARGET_EINVAL;
8552             }
8553             if (arg2) {
8554                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8555                     return -TARGET_EFAULT;
8556                 act._sa_handler = rt_act->_sa_handler;
8557                 act.sa_mask = rt_act->sa_mask;
8558                 act.sa_flags = rt_act->sa_flags;
8559                 act.sa_restorer = arg5;
8560                 unlock_user_struct(rt_act, arg2, 0);
8561                 pact = &act;
8562             }
8563             ret = get_errno(do_sigaction(arg1, pact, &oact));
8564             if (!is_error(ret) && arg3) {
8565                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8566                     return -TARGET_EFAULT;
8567                 rt_act->_sa_handler = oact._sa_handler;
8568                 rt_act->sa_mask = oact.sa_mask;
8569                 rt_act->sa_flags = oact.sa_flags;
8570                 unlock_user_struct(rt_act, arg3, 1);
8571             }
8572 #else
8573 #ifdef TARGET_SPARC
8574             target_ulong restorer = arg4;
8575             target_ulong sigsetsize = arg5;
8576 #else
8577             target_ulong sigsetsize = arg4;
8578 #endif
8579             struct target_sigaction *act;
8580             struct target_sigaction *oact;
8581 
8582             if (sigsetsize != sizeof(target_sigset_t)) {
8583                 return -TARGET_EINVAL;
8584             }
8585             if (arg2) {
8586                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8587                     return -TARGET_EFAULT;
8588                 }
8589 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8590                 act->ka_restorer = restorer;
8591 #endif
8592             } else {
8593                 act = NULL;
8594             }
8595             if (arg3) {
8596                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8597                     ret = -TARGET_EFAULT;
8598                     goto rt_sigaction_fail;
8599                 }
8600             } else
8601                 oact = NULL;
8602             ret = get_errno(do_sigaction(arg1, act, oact));
8603 	rt_sigaction_fail:
8604             if (act)
8605                 unlock_user_struct(act, arg2, 0);
8606             if (oact)
8607                 unlock_user_struct(oact, arg3, 1);
8608 #endif
8609         }
8610         return ret;
8611 #ifdef TARGET_NR_sgetmask /* not on alpha */
8612     case TARGET_NR_sgetmask:
8613         {
8614             sigset_t cur_set;
8615             abi_ulong target_set;
8616             ret = do_sigprocmask(0, NULL, &cur_set);
8617             if (!ret) {
8618                 host_to_target_old_sigset(&target_set, &cur_set);
8619                 ret = target_set;
8620             }
8621         }
8622         return ret;
8623 #endif
8624 #ifdef TARGET_NR_ssetmask /* not on alpha */
8625     case TARGET_NR_ssetmask:
8626         {
8627             sigset_t set, oset;
8628             abi_ulong target_set = arg1;
8629             target_to_host_old_sigset(&set, &target_set);
8630             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8631             if (!ret) {
8632                 host_to_target_old_sigset(&target_set, &oset);
8633                 ret = target_set;
8634             }
8635         }
8636         return ret;
8637 #endif
8638 #ifdef TARGET_NR_sigprocmask
8639     case TARGET_NR_sigprocmask:
8640         {
8641 #if defined(TARGET_ALPHA)
8642             sigset_t set, oldset;
8643             abi_ulong mask;
8644             int how;
8645 
8646             switch (arg1) {
8647             case TARGET_SIG_BLOCK:
8648                 how = SIG_BLOCK;
8649                 break;
8650             case TARGET_SIG_UNBLOCK:
8651                 how = SIG_UNBLOCK;
8652                 break;
8653             case TARGET_SIG_SETMASK:
8654                 how = SIG_SETMASK;
8655                 break;
8656             default:
8657                 return -TARGET_EINVAL;
8658             }
8659             mask = arg2;
8660             target_to_host_old_sigset(&set, &mask);
8661 
8662             ret = do_sigprocmask(how, &set, &oldset);
8663             if (!is_error(ret)) {
8664                 host_to_target_old_sigset(&mask, &oldset);
8665                 ret = mask;
8666                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8667             }
8668 #else
8669             sigset_t set, oldset, *set_ptr;
8670             int how;
8671 
8672             if (arg2) {
8673                 switch (arg1) {
8674                 case TARGET_SIG_BLOCK:
8675                     how = SIG_BLOCK;
8676                     break;
8677                 case TARGET_SIG_UNBLOCK:
8678                     how = SIG_UNBLOCK;
8679                     break;
8680                 case TARGET_SIG_SETMASK:
8681                     how = SIG_SETMASK;
8682                     break;
8683                 default:
8684                     return -TARGET_EINVAL;
8685                 }
8686                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8687                     return -TARGET_EFAULT;
8688                 target_to_host_old_sigset(&set, p);
8689                 unlock_user(p, arg2, 0);
8690                 set_ptr = &set;
8691             } else {
8692                 how = 0;
8693                 set_ptr = NULL;
8694             }
8695             ret = do_sigprocmask(how, set_ptr, &oldset);
8696             if (!is_error(ret) && arg3) {
8697                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8698                     return -TARGET_EFAULT;
8699                 host_to_target_old_sigset(p, &oldset);
8700                 unlock_user(p, arg3, sizeof(target_sigset_t));
8701             }
8702 #endif
8703         }
8704         return ret;
8705 #endif
8706     case TARGET_NR_rt_sigprocmask:
8707         {
8708             int how = arg1;
8709             sigset_t set, oldset, *set_ptr;
8710 
8711             if (arg4 != sizeof(target_sigset_t)) {
8712                 return -TARGET_EINVAL;
8713             }
8714 
8715             if (arg2) {
8716                 switch(how) {
8717                 case TARGET_SIG_BLOCK:
8718                     how = SIG_BLOCK;
8719                     break;
8720                 case TARGET_SIG_UNBLOCK:
8721                     how = SIG_UNBLOCK;
8722                     break;
8723                 case TARGET_SIG_SETMASK:
8724                     how = SIG_SETMASK;
8725                     break;
8726                 default:
8727                     return -TARGET_EINVAL;
8728                 }
8729                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8730                     return -TARGET_EFAULT;
8731                 target_to_host_sigset(&set, p);
8732                 unlock_user(p, arg2, 0);
8733                 set_ptr = &set;
8734             } else {
8735                 how = 0;
8736                 set_ptr = NULL;
8737             }
8738             ret = do_sigprocmask(how, set_ptr, &oldset);
8739             if (!is_error(ret) && arg3) {
8740                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8741                     return -TARGET_EFAULT;
8742                 host_to_target_sigset(p, &oldset);
8743                 unlock_user(p, arg3, sizeof(target_sigset_t));
8744             }
8745         }
8746         return ret;
8747 #ifdef TARGET_NR_sigpending
8748     case TARGET_NR_sigpending:
8749         {
8750             sigset_t set;
8751             ret = get_errno(sigpending(&set));
8752             if (!is_error(ret)) {
8753                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8754                     return -TARGET_EFAULT;
8755                 host_to_target_old_sigset(p, &set);
8756                 unlock_user(p, arg1, sizeof(target_sigset_t));
8757             }
8758         }
8759         return ret;
8760 #endif
8761     case TARGET_NR_rt_sigpending:
8762         {
8763             sigset_t set;
8764 
8765             /* Yes, this check is >, not != like most. We follow the kernel's
8766              * logic and it does it like this because it implements
8767              * NR_sigpending through the same code path, and in that case
8768              * the old_sigset_t is smaller in size.
8769              */
8770             if (arg2 > sizeof(target_sigset_t)) {
8771                 return -TARGET_EINVAL;
8772             }
8773 
8774             ret = get_errno(sigpending(&set));
8775             if (!is_error(ret)) {
8776                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8777                     return -TARGET_EFAULT;
8778                 host_to_target_sigset(p, &set);
8779                 unlock_user(p, arg1, sizeof(target_sigset_t));
8780             }
8781         }
8782         return ret;
8783 #ifdef TARGET_NR_sigsuspend
8784     case TARGET_NR_sigsuspend:
8785         {
8786             TaskState *ts = cpu->opaque;
8787 #if defined(TARGET_ALPHA)
8788             abi_ulong mask = arg1;
8789             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8790 #else
8791             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8792                 return -TARGET_EFAULT;
8793             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8794             unlock_user(p, arg1, 0);
8795 #endif
8796             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8797                                                SIGSET_T_SIZE));
8798             if (ret != -TARGET_ERESTARTSYS) {
8799                 ts->in_sigsuspend = 1;
8800             }
8801         }
8802         return ret;
8803 #endif
8804     case TARGET_NR_rt_sigsuspend:
8805         {
8806             TaskState *ts = cpu->opaque;
8807 
8808             if (arg2 != sizeof(target_sigset_t)) {
8809                 return -TARGET_EINVAL;
8810             }
8811             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8812                 return -TARGET_EFAULT;
8813             target_to_host_sigset(&ts->sigsuspend_mask, p);
8814             unlock_user(p, arg1, 0);
8815             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8816                                                SIGSET_T_SIZE));
8817             if (ret != -TARGET_ERESTARTSYS) {
8818                 ts->in_sigsuspend = 1;
8819             }
8820         }
8821         return ret;
8822 #ifdef TARGET_NR_rt_sigtimedwait
8823     case TARGET_NR_rt_sigtimedwait:
8824         {
8825             sigset_t set;
8826             struct timespec uts, *puts;
8827             siginfo_t uinfo;
8828 
8829             if (arg4 != sizeof(target_sigset_t)) {
8830                 return -TARGET_EINVAL;
8831             }
8832 
8833             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8834                 return -TARGET_EFAULT;
8835             target_to_host_sigset(&set, p);
8836             unlock_user(p, arg1, 0);
8837             if (arg3) {
8838                 puts = &uts;
8839                 target_to_host_timespec(puts, arg3);
8840             } else {
8841                 puts = NULL;
8842             }
8843             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8844                                                  SIGSET_T_SIZE));
8845             if (!is_error(ret)) {
8846                 if (arg2) {
8847                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8848                                   0);
8849                     if (!p) {
8850                         return -TARGET_EFAULT;
8851                     }
8852                     host_to_target_siginfo(p, &uinfo);
8853                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8854                 }
8855                 ret = host_to_target_signal(ret);
8856             }
8857         }
8858         return ret;
8859 #endif
8860     case TARGET_NR_rt_sigqueueinfo:
8861         {
8862             siginfo_t uinfo;
8863 
8864             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8865             if (!p) {
8866                 return -TARGET_EFAULT;
8867             }
8868             target_to_host_siginfo(&uinfo, p);
8869             unlock_user(p, arg3, 0);
8870             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8871         }
8872         return ret;
8873     case TARGET_NR_rt_tgsigqueueinfo:
8874         {
8875             siginfo_t uinfo;
8876 
8877             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8878             if (!p) {
8879                 return -TARGET_EFAULT;
8880             }
8881             target_to_host_siginfo(&uinfo, p);
8882             unlock_user(p, arg4, 0);
8883             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8884         }
8885         return ret;
8886 #ifdef TARGET_NR_sigreturn
8887     case TARGET_NR_sigreturn:
8888         if (block_signals()) {
8889             return -TARGET_ERESTARTSYS;
8890         }
8891         return do_sigreturn(cpu_env);
8892 #endif
8893     case TARGET_NR_rt_sigreturn:
8894         if (block_signals()) {
8895             return -TARGET_ERESTARTSYS;
8896         }
8897         return do_rt_sigreturn(cpu_env);
8898     case TARGET_NR_sethostname:
8899         if (!(p = lock_user_string(arg1)))
8900             return -TARGET_EFAULT;
8901         ret = get_errno(sethostname(p, arg2));
8902         unlock_user(p, arg1, 0);
8903         return ret;
8904 #ifdef TARGET_NR_setrlimit
8905     case TARGET_NR_setrlimit:
8906         {
8907             int resource = target_to_host_resource(arg1);
8908             struct target_rlimit *target_rlim;
8909             struct rlimit rlim;
8910             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8911                 return -TARGET_EFAULT;
8912             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8913             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8914             unlock_user_struct(target_rlim, arg2, 0);
8915             /*
8916              * If we just passed through resource limit settings for memory then
8917              * they would also apply to QEMU's own allocations, and QEMU will
8918              * crash or hang or die if its allocations fail. Ideally we would
8919              * track the guest allocations in QEMU and apply the limits ourselves.
8920              * For now, just tell the guest the call succeeded but don't actually
8921              * limit anything.
8922              */
8923             if (resource != RLIMIT_AS &&
8924                 resource != RLIMIT_DATA &&
8925                 resource != RLIMIT_STACK) {
8926                 return get_errno(setrlimit(resource, &rlim));
8927             } else {
8928                 return 0;
8929             }
8930         }
8931 #endif
8932 #ifdef TARGET_NR_getrlimit
8933     case TARGET_NR_getrlimit:
8934         {
8935             int resource = target_to_host_resource(arg1);
8936             struct target_rlimit *target_rlim;
8937             struct rlimit rlim;
8938 
8939             ret = get_errno(getrlimit(resource, &rlim));
8940             if (!is_error(ret)) {
8941                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8942                     return -TARGET_EFAULT;
8943                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8944                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8945                 unlock_user_struct(target_rlim, arg2, 1);
8946             }
8947         }
8948         return ret;
8949 #endif
8950     case TARGET_NR_getrusage:
8951         {
8952             struct rusage rusage;
8953             ret = get_errno(getrusage(arg1, &rusage));
8954             if (!is_error(ret)) {
8955                 ret = host_to_target_rusage(arg2, &rusage);
8956             }
8957         }
8958         return ret;
8959 #if defined(TARGET_NR_gettimeofday)
8960     case TARGET_NR_gettimeofday:
8961         {
8962             struct timeval tv;
8963             struct timezone tz;
8964 
8965             ret = get_errno(gettimeofday(&tv, &tz));
8966             if (!is_error(ret)) {
8967                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8968                     return -TARGET_EFAULT;
8969                 }
8970                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8971                     return -TARGET_EFAULT;
8972                 }
8973             }
8974         }
8975         return ret;
8976 #endif
8977 #if defined(TARGET_NR_settimeofday)
8978     case TARGET_NR_settimeofday:
8979         {
8980             struct timeval tv, *ptv = NULL;
8981             struct timezone tz, *ptz = NULL;
8982 
8983             if (arg1) {
8984                 if (copy_from_user_timeval(&tv, arg1)) {
8985                     return -TARGET_EFAULT;
8986                 }
8987                 ptv = &tv;
8988             }
8989 
8990             if (arg2) {
8991                 if (copy_from_user_timezone(&tz, arg2)) {
8992                     return -TARGET_EFAULT;
8993                 }
8994                 ptz = &tz;
8995             }
8996 
8997             return get_errno(settimeofday(ptv, ptz));
8998         }
8999 #endif
9000 #if defined(TARGET_NR_select)
9001     case TARGET_NR_select:
9002 #if defined(TARGET_WANT_NI_OLD_SELECT)
9003         /* some architectures used to have old_select here
9004          * but now ENOSYS it.
9005          */
9006         ret = -TARGET_ENOSYS;
9007 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9008         ret = do_old_select(arg1);
9009 #else
9010         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9011 #endif
9012         return ret;
9013 #endif
9014 #ifdef TARGET_NR_pselect6
9015     case TARGET_NR_pselect6:
9016         {
9017             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9018             fd_set rfds, wfds, efds;
9019             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9020             struct timespec ts, *ts_ptr;
9021 
9022             /*
9023              * The 6th arg is actually two args smashed together,
9024              * so we cannot use the C library.
9025              */
9026             sigset_t set;
9027             struct {
9028                 sigset_t *set;
9029                 size_t size;
9030             } sig, *sig_ptr;
9031 
9032             abi_ulong arg_sigset, arg_sigsize, *arg7;
9033             target_sigset_t *target_sigset;
9034 
9035             n = arg1;
9036             rfd_addr = arg2;
9037             wfd_addr = arg3;
9038             efd_addr = arg4;
9039             ts_addr = arg5;
9040 
9041             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9042             if (ret) {
9043                 return ret;
9044             }
9045             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9046             if (ret) {
9047                 return ret;
9048             }
9049             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9050             if (ret) {
9051                 return ret;
9052             }
9053 
9054             /*
9055              * This takes a timespec, and not a timeval, so we cannot
9056              * use the do_select() helper ...
9057              */
9058             if (ts_addr) {
9059                 if (target_to_host_timespec(&ts, ts_addr)) {
9060                     return -TARGET_EFAULT;
9061                 }
9062                 ts_ptr = &ts;
9063             } else {
9064                 ts_ptr = NULL;
9065             }
9066 
9067             /* Extract the two packed args for the sigset */
9068             if (arg6) {
9069                 sig_ptr = &sig;
9070                 sig.size = SIGSET_T_SIZE;
9071 
9072                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9073                 if (!arg7) {
9074                     return -TARGET_EFAULT;
9075                 }
9076                 arg_sigset = tswapal(arg7[0]);
9077                 arg_sigsize = tswapal(arg7[1]);
9078                 unlock_user(arg7, arg6, 0);
9079 
9080                 if (arg_sigset) {
9081                     sig.set = &set;
9082                     if (arg_sigsize != sizeof(*target_sigset)) {
9083                         /* Like the kernel, we enforce correct size sigsets */
9084                         return -TARGET_EINVAL;
9085                     }
9086                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9087                                               sizeof(*target_sigset), 1);
9088                     if (!target_sigset) {
9089                         return -TARGET_EFAULT;
9090                     }
9091                     target_to_host_sigset(&set, target_sigset);
9092                     unlock_user(target_sigset, arg_sigset, 0);
9093                 } else {
9094                     sig.set = NULL;
9095                 }
9096             } else {
9097                 sig_ptr = NULL;
9098             }
9099 
9100             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9101                                           ts_ptr, sig_ptr));
9102 
9103             if (!is_error(ret)) {
9104                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9105                     return -TARGET_EFAULT;
9106                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9107                     return -TARGET_EFAULT;
9108                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9109                     return -TARGET_EFAULT;
9110 
9111                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9112                     return -TARGET_EFAULT;
9113             }
9114         }
9115         return ret;
9116 #endif
9117 #ifdef TARGET_NR_symlink
9118     case TARGET_NR_symlink:
9119         {
9120             void *p2;
9121             p = lock_user_string(arg1);
9122             p2 = lock_user_string(arg2);
9123             if (!p || !p2)
9124                 ret = -TARGET_EFAULT;
9125             else
9126                 ret = get_errno(symlink(p, p2));
9127             unlock_user(p2, arg2, 0);
9128             unlock_user(p, arg1, 0);
9129         }
9130         return ret;
9131 #endif
9132 #if defined(TARGET_NR_symlinkat)
9133     case TARGET_NR_symlinkat:
9134         {
9135             void *p2;
9136             p  = lock_user_string(arg1);
9137             p2 = lock_user_string(arg3);
9138             if (!p || !p2)
9139                 ret = -TARGET_EFAULT;
9140             else
9141                 ret = get_errno(symlinkat(p, arg2, p2));
9142             unlock_user(p2, arg3, 0);
9143             unlock_user(p, arg1, 0);
9144         }
9145         return ret;
9146 #endif
9147 #ifdef TARGET_NR_readlink
9148     case TARGET_NR_readlink:
9149         {
9150             void *p2;
9151             p = lock_user_string(arg1);
9152             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9153             if (!p || !p2) {
9154                 ret = -TARGET_EFAULT;
9155             } else if (!arg3) {
9156                 /* Short circuit this for the magic exe check. */
9157                 ret = -TARGET_EINVAL;
9158             } else if (is_proc_myself((const char *)p, "exe")) {
9159                 char real[PATH_MAX], *temp;
9160                 temp = realpath(exec_path, real);
9161                 /* Return value is # of bytes that we wrote to the buffer. */
9162                 if (temp == NULL) {
9163                     ret = get_errno(-1);
9164                 } else {
9165                     /* Don't worry about sign mismatch as earlier mapping
9166                      * logic would have thrown a bad address error. */
9167                     ret = MIN(strlen(real), arg3);
9168                     /* We cannot NUL terminate the string. */
9169                     memcpy(p2, real, ret);
9170                 }
9171             } else {
9172                 ret = get_errno(readlink(path(p), p2, arg3));
9173             }
9174             unlock_user(p2, arg2, ret);
9175             unlock_user(p, arg1, 0);
9176         }
9177         return ret;
9178 #endif
9179 #if defined(TARGET_NR_readlinkat)
9180     case TARGET_NR_readlinkat:
9181         {
9182             void *p2;
9183             p  = lock_user_string(arg2);
9184             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9185             if (!p || !p2) {
9186                 ret = -TARGET_EFAULT;
9187             } else if (is_proc_myself((const char *)p, "exe")) {
9188                 char real[PATH_MAX], *temp;
9189                 temp = realpath(exec_path, real);
9190                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9191                 snprintf((char *)p2, arg4, "%s", real);
9192             } else {
9193                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9194             }
9195             unlock_user(p2, arg3, ret);
9196             unlock_user(p, arg2, 0);
9197         }
9198         return ret;
9199 #endif
9200 #ifdef TARGET_NR_swapon
9201     case TARGET_NR_swapon:
9202         if (!(p = lock_user_string(arg1)))
9203             return -TARGET_EFAULT;
9204         ret = get_errno(swapon(p, arg2));
9205         unlock_user(p, arg1, 0);
9206         return ret;
9207 #endif
9208     case TARGET_NR_reboot:
9209         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9210            /* arg4 must be ignored in all other cases */
9211            p = lock_user_string(arg4);
9212            if (!p) {
9213                return -TARGET_EFAULT;
9214            }
9215            ret = get_errno(reboot(arg1, arg2, arg3, p));
9216            unlock_user(p, arg4, 0);
9217         } else {
9218            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9219         }
9220         return ret;
9221 #ifdef TARGET_NR_mmap
9222     case TARGET_NR_mmap:
9223 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9224     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9225     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9226     || defined(TARGET_S390X)
9227         {
9228             abi_ulong *v;
9229             abi_ulong v1, v2, v3, v4, v5, v6;
9230             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9231                 return -TARGET_EFAULT;
9232             v1 = tswapal(v[0]);
9233             v2 = tswapal(v[1]);
9234             v3 = tswapal(v[2]);
9235             v4 = tswapal(v[3]);
9236             v5 = tswapal(v[4]);
9237             v6 = tswapal(v[5]);
9238             unlock_user(v, arg1, 0);
9239             ret = get_errno(target_mmap(v1, v2, v3,
9240                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9241                                         v5, v6));
9242         }
9243 #else
9244         ret = get_errno(target_mmap(arg1, arg2, arg3,
9245                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9246                                     arg5,
9247                                     arg6));
9248 #endif
9249         return ret;
9250 #endif
9251 #ifdef TARGET_NR_mmap2
9252     case TARGET_NR_mmap2:
9253 #ifndef MMAP_SHIFT
9254 #define MMAP_SHIFT 12
9255 #endif
9256         ret = target_mmap(arg1, arg2, arg3,
9257                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9258                           arg5, arg6 << MMAP_SHIFT);
9259         return get_errno(ret);
9260 #endif
9261     case TARGET_NR_munmap:
9262         return get_errno(target_munmap(arg1, arg2));
9263     case TARGET_NR_mprotect:
9264         {
9265             TaskState *ts = cpu->opaque;
9266             /* Special hack to detect libc making the stack executable.  */
9267             if ((arg3 & PROT_GROWSDOWN)
9268                 && arg1 >= ts->info->stack_limit
9269                 && arg1 <= ts->info->start_stack) {
9270                 arg3 &= ~PROT_GROWSDOWN;
9271                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9272                 arg1 = ts->info->stack_limit;
9273             }
9274         }
9275         return get_errno(target_mprotect(arg1, arg2, arg3));
9276 #ifdef TARGET_NR_mremap
9277     case TARGET_NR_mremap:
9278         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9279 #endif
9280         /* ??? msync/mlock/munlock are broken for softmmu.  */
9281 #ifdef TARGET_NR_msync
9282     case TARGET_NR_msync:
9283         return get_errno(msync(g2h(arg1), arg2, arg3));
9284 #endif
9285 #ifdef TARGET_NR_mlock
9286     case TARGET_NR_mlock:
9287         return get_errno(mlock(g2h(arg1), arg2));
9288 #endif
9289 #ifdef TARGET_NR_munlock
9290     case TARGET_NR_munlock:
9291         return get_errno(munlock(g2h(arg1), arg2));
9292 #endif
9293 #ifdef TARGET_NR_mlockall
9294     case TARGET_NR_mlockall:
9295         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9296 #endif
9297 #ifdef TARGET_NR_munlockall
9298     case TARGET_NR_munlockall:
9299         return get_errno(munlockall());
9300 #endif
9301 #ifdef TARGET_NR_truncate
9302     case TARGET_NR_truncate:
9303         if (!(p = lock_user_string(arg1)))
9304             return -TARGET_EFAULT;
9305         ret = get_errno(truncate(p, arg2));
9306         unlock_user(p, arg1, 0);
9307         return ret;
9308 #endif
9309 #ifdef TARGET_NR_ftruncate
9310     case TARGET_NR_ftruncate:
9311         return get_errno(ftruncate(arg1, arg2));
9312 #endif
9313     case TARGET_NR_fchmod:
9314         return get_errno(fchmod(arg1, arg2));
9315 #if defined(TARGET_NR_fchmodat)
9316     case TARGET_NR_fchmodat:
9317         if (!(p = lock_user_string(arg2)))
9318             return -TARGET_EFAULT;
9319         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9320         unlock_user(p, arg2, 0);
9321         return ret;
9322 #endif
9323     case TARGET_NR_getpriority:
9324         /* Note that negative values are valid for getpriority, so we must
9325            differentiate based on errno settings.  */
9326         errno = 0;
9327         ret = getpriority(arg1, arg2);
9328         if (ret == -1 && errno != 0) {
9329             return -host_to_target_errno(errno);
9330         }
9331 #ifdef TARGET_ALPHA
9332         /* Return value is the unbiased priority.  Signal no error.  */
9333         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9334 #else
9335         /* Return value is a biased priority to avoid negative numbers.  */
9336         ret = 20 - ret;
9337 #endif
9338         return ret;
9339     case TARGET_NR_setpriority:
9340         return get_errno(setpriority(arg1, arg2, arg3));
9341 #ifdef TARGET_NR_statfs
9342     case TARGET_NR_statfs:
9343         if (!(p = lock_user_string(arg1))) {
9344             return -TARGET_EFAULT;
9345         }
9346         ret = get_errno(statfs(path(p), &stfs));
9347         unlock_user(p, arg1, 0);
9348     convert_statfs:
9349         if (!is_error(ret)) {
9350             struct target_statfs *target_stfs;
9351 
9352             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9353                 return -TARGET_EFAULT;
9354             __put_user(stfs.f_type, &target_stfs->f_type);
9355             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9356             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9357             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9358             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9359             __put_user(stfs.f_files, &target_stfs->f_files);
9360             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9361             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9362             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9363             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9364             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9365 #ifdef _STATFS_F_FLAGS
9366             __put_user(stfs.f_flags, &target_stfs->f_flags);
9367 #else
9368             __put_user(0, &target_stfs->f_flags);
9369 #endif
9370             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9371             unlock_user_struct(target_stfs, arg2, 1);
9372         }
9373         return ret;
9374 #endif
9375 #ifdef TARGET_NR_fstatfs
9376     case TARGET_NR_fstatfs:
9377         ret = get_errno(fstatfs(arg1, &stfs));
9378         goto convert_statfs;
9379 #endif
9380 #ifdef TARGET_NR_statfs64
9381     case TARGET_NR_statfs64:
9382         if (!(p = lock_user_string(arg1))) {
9383             return -TARGET_EFAULT;
9384         }
9385         ret = get_errno(statfs(path(p), &stfs));
9386         unlock_user(p, arg1, 0);
9387     convert_statfs64:
9388         if (!is_error(ret)) {
9389             struct target_statfs64 *target_stfs;
9390 
9391             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9392                 return -TARGET_EFAULT;
9393             __put_user(stfs.f_type, &target_stfs->f_type);
9394             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9395             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9396             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9397             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9398             __put_user(stfs.f_files, &target_stfs->f_files);
9399             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9400             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9401             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9402             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9403             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9404             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9405             unlock_user_struct(target_stfs, arg3, 1);
9406         }
9407         return ret;
9408     case TARGET_NR_fstatfs64:
9409         ret = get_errno(fstatfs(arg1, &stfs));
9410         goto convert_statfs64;
9411 #endif
9412 #ifdef TARGET_NR_socketcall
9413     case TARGET_NR_socketcall:
9414         return do_socketcall(arg1, arg2);
9415 #endif
9416 #ifdef TARGET_NR_accept
9417     case TARGET_NR_accept:
9418         return do_accept4(arg1, arg2, arg3, 0);
9419 #endif
9420 #ifdef TARGET_NR_accept4
9421     case TARGET_NR_accept4:
9422         return do_accept4(arg1, arg2, arg3, arg4);
9423 #endif
9424 #ifdef TARGET_NR_bind
9425     case TARGET_NR_bind:
9426         return do_bind(arg1, arg2, arg3);
9427 #endif
9428 #ifdef TARGET_NR_connect
9429     case TARGET_NR_connect:
9430         return do_connect(arg1, arg2, arg3);
9431 #endif
9432 #ifdef TARGET_NR_getpeername
9433     case TARGET_NR_getpeername:
9434         return do_getpeername(arg1, arg2, arg3);
9435 #endif
9436 #ifdef TARGET_NR_getsockname
9437     case TARGET_NR_getsockname:
9438         return do_getsockname(arg1, arg2, arg3);
9439 #endif
9440 #ifdef TARGET_NR_getsockopt
9441     case TARGET_NR_getsockopt:
9442         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9443 #endif
9444 #ifdef TARGET_NR_listen
9445     case TARGET_NR_listen:
9446         return get_errno(listen(arg1, arg2));
9447 #endif
9448 #ifdef TARGET_NR_recv
9449     case TARGET_NR_recv:
9450         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9451 #endif
9452 #ifdef TARGET_NR_recvfrom
9453     case TARGET_NR_recvfrom:
9454         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9455 #endif
9456 #ifdef TARGET_NR_recvmsg
9457     case TARGET_NR_recvmsg:
9458         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9459 #endif
9460 #ifdef TARGET_NR_send
9461     case TARGET_NR_send:
9462         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9463 #endif
9464 #ifdef TARGET_NR_sendmsg
9465     case TARGET_NR_sendmsg:
9466         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9467 #endif
9468 #ifdef TARGET_NR_sendmmsg
9469     case TARGET_NR_sendmmsg:
9470         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9471 #endif
9472 #ifdef TARGET_NR_recvmmsg
9473     case TARGET_NR_recvmmsg:
9474         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9475 #endif
9476 #ifdef TARGET_NR_sendto
9477     case TARGET_NR_sendto:
9478         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9479 #endif
9480 #ifdef TARGET_NR_shutdown
9481     case TARGET_NR_shutdown:
9482         return get_errno(shutdown(arg1, arg2));
9483 #endif
9484 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9485     case TARGET_NR_getrandom:
9486         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9487         if (!p) {
9488             return -TARGET_EFAULT;
9489         }
9490         ret = get_errno(getrandom(p, arg2, arg3));
9491         unlock_user(p, arg1, ret);
9492         return ret;
9493 #endif
9494 #ifdef TARGET_NR_socket
9495     case TARGET_NR_socket:
9496         return do_socket(arg1, arg2, arg3);
9497 #endif
9498 #ifdef TARGET_NR_socketpair
9499     case TARGET_NR_socketpair:
9500         return do_socketpair(arg1, arg2, arg3, arg4);
9501 #endif
9502 #ifdef TARGET_NR_setsockopt
9503     case TARGET_NR_setsockopt:
9504         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9505 #endif
9506 #if defined(TARGET_NR_syslog)
9507     case TARGET_NR_syslog:
9508         {
9509             int len = arg2;
9510 
9511             switch (arg1) {
9512             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9513             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9514             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9515             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9516             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9517             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9518             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9519             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9520                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9521             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9522             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9523             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9524                 {
9525                     if (len < 0) {
9526                         return -TARGET_EINVAL;
9527                     }
9528                     if (len == 0) {
9529                         return 0;
9530                     }
9531                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9532                     if (!p) {
9533                         return -TARGET_EFAULT;
9534                     }
9535                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9536                     unlock_user(p, arg2, arg3);
9537                 }
9538                 return ret;
9539             default:
9540                 return -TARGET_EINVAL;
9541             }
9542         }
9543         break;
9544 #endif
9545     case TARGET_NR_setitimer:
9546         {
9547             struct itimerval value, ovalue, *pvalue;
9548 
9549             if (arg2) {
9550                 pvalue = &value;
9551                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9552                     || copy_from_user_timeval(&pvalue->it_value,
9553                                               arg2 + sizeof(struct target_timeval)))
9554                     return -TARGET_EFAULT;
9555             } else {
9556                 pvalue = NULL;
9557             }
9558             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9559             if (!is_error(ret) && arg3) {
9560                 if (copy_to_user_timeval(arg3,
9561                                          &ovalue.it_interval)
9562                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9563                                             &ovalue.it_value))
9564                     return -TARGET_EFAULT;
9565             }
9566         }
9567         return ret;
9568     case TARGET_NR_getitimer:
9569         {
9570             struct itimerval value;
9571 
9572             ret = get_errno(getitimer(arg1, &value));
9573             if (!is_error(ret) && arg2) {
9574                 if (copy_to_user_timeval(arg2,
9575                                          &value.it_interval)
9576                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9577                                             &value.it_value))
9578                     return -TARGET_EFAULT;
9579             }
9580         }
9581         return ret;
9582 #ifdef TARGET_NR_stat
9583     case TARGET_NR_stat:
9584         if (!(p = lock_user_string(arg1))) {
9585             return -TARGET_EFAULT;
9586         }
9587         ret = get_errno(stat(path(p), &st));
9588         unlock_user(p, arg1, 0);
9589         goto do_stat;
9590 #endif
9591 #ifdef TARGET_NR_lstat
9592     case TARGET_NR_lstat:
9593         if (!(p = lock_user_string(arg1))) {
9594             return -TARGET_EFAULT;
9595         }
9596         ret = get_errno(lstat(path(p), &st));
9597         unlock_user(p, arg1, 0);
9598         goto do_stat;
9599 #endif
9600 #ifdef TARGET_NR_fstat
9601     case TARGET_NR_fstat:
9602         {
9603             ret = get_errno(fstat(arg1, &st));
9604 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9605         do_stat:
9606 #endif
9607             if (!is_error(ret)) {
9608                 struct target_stat *target_st;
9609 
9610                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9611                     return -TARGET_EFAULT;
9612                 memset(target_st, 0, sizeof(*target_st));
9613                 __put_user(st.st_dev, &target_st->st_dev);
9614                 __put_user(st.st_ino, &target_st->st_ino);
9615                 __put_user(st.st_mode, &target_st->st_mode);
9616                 __put_user(st.st_uid, &target_st->st_uid);
9617                 __put_user(st.st_gid, &target_st->st_gid);
9618                 __put_user(st.st_nlink, &target_st->st_nlink);
9619                 __put_user(st.st_rdev, &target_st->st_rdev);
9620                 __put_user(st.st_size, &target_st->st_size);
9621                 __put_user(st.st_blksize, &target_st->st_blksize);
9622                 __put_user(st.st_blocks, &target_st->st_blocks);
9623                 __put_user(st.st_atime, &target_st->target_st_atime);
9624                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9625                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9626 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9627     defined(TARGET_STAT_HAVE_NSEC)
9628                 __put_user(st.st_atim.tv_nsec,
9629                            &target_st->target_st_atime_nsec);
9630                 __put_user(st.st_mtim.tv_nsec,
9631                            &target_st->target_st_mtime_nsec);
9632                 __put_user(st.st_ctim.tv_nsec,
9633                            &target_st->target_st_ctime_nsec);
9634 #endif
9635                 unlock_user_struct(target_st, arg2, 1);
9636             }
9637         }
9638         return ret;
9639 #endif
9640     case TARGET_NR_vhangup:
9641         return get_errno(vhangup());
9642 #ifdef TARGET_NR_syscall
9643     case TARGET_NR_syscall:
9644         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9645                           arg6, arg7, arg8, 0);
9646 #endif
9647 #if defined(TARGET_NR_wait4)
9648     case TARGET_NR_wait4:
9649         {
9650             int status;
9651             abi_long status_ptr = arg2;
9652             struct rusage rusage, *rusage_ptr;
9653             abi_ulong target_rusage = arg4;
9654             abi_long rusage_err;
9655             if (target_rusage)
9656                 rusage_ptr = &rusage;
9657             else
9658                 rusage_ptr = NULL;
9659             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9660             if (!is_error(ret)) {
9661                 if (status_ptr && ret) {
9662                     status = host_to_target_waitstatus(status);
9663                     if (put_user_s32(status, status_ptr))
9664                         return -TARGET_EFAULT;
9665                 }
9666                 if (target_rusage) {
9667                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9668                     if (rusage_err) {
9669                         ret = rusage_err;
9670                     }
9671                 }
9672             }
9673         }
9674         return ret;
9675 #endif
9676 #ifdef TARGET_NR_swapoff
9677     case TARGET_NR_swapoff:
9678         if (!(p = lock_user_string(arg1)))
9679             return -TARGET_EFAULT;
9680         ret = get_errno(swapoff(p));
9681         unlock_user(p, arg1, 0);
9682         return ret;
9683 #endif
9684     case TARGET_NR_sysinfo:
9685         {
9686             struct target_sysinfo *target_value;
9687             struct sysinfo value;
9688             ret = get_errno(sysinfo(&value));
9689             if (!is_error(ret) && arg1)
9690             {
9691                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9692                     return -TARGET_EFAULT;
9693                 __put_user(value.uptime, &target_value->uptime);
9694                 __put_user(value.loads[0], &target_value->loads[0]);
9695                 __put_user(value.loads[1], &target_value->loads[1]);
9696                 __put_user(value.loads[2], &target_value->loads[2]);
9697                 __put_user(value.totalram, &target_value->totalram);
9698                 __put_user(value.freeram, &target_value->freeram);
9699                 __put_user(value.sharedram, &target_value->sharedram);
9700                 __put_user(value.bufferram, &target_value->bufferram);
9701                 __put_user(value.totalswap, &target_value->totalswap);
9702                 __put_user(value.freeswap, &target_value->freeswap);
9703                 __put_user(value.procs, &target_value->procs);
9704                 __put_user(value.totalhigh, &target_value->totalhigh);
9705                 __put_user(value.freehigh, &target_value->freehigh);
9706                 __put_user(value.mem_unit, &target_value->mem_unit);
9707                 unlock_user_struct(target_value, arg1, 1);
9708             }
9709         }
9710         return ret;
9711 #ifdef TARGET_NR_ipc
9712     case TARGET_NR_ipc:
9713         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9714 #endif
9715 #ifdef TARGET_NR_semget
9716     case TARGET_NR_semget:
9717         return get_errno(semget(arg1, arg2, arg3));
9718 #endif
9719 #ifdef TARGET_NR_semop
9720     case TARGET_NR_semop:
9721         return do_semop(arg1, arg2, arg3);
9722 #endif
9723 #ifdef TARGET_NR_semctl
9724     case TARGET_NR_semctl:
9725         return do_semctl(arg1, arg2, arg3, arg4);
9726 #endif
9727 #ifdef TARGET_NR_msgctl
9728     case TARGET_NR_msgctl:
9729         return do_msgctl(arg1, arg2, arg3);
9730 #endif
9731 #ifdef TARGET_NR_msgget
9732     case TARGET_NR_msgget:
9733         return get_errno(msgget(arg1, arg2));
9734 #endif
9735 #ifdef TARGET_NR_msgrcv
9736     case TARGET_NR_msgrcv:
9737         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9738 #endif
9739 #ifdef TARGET_NR_msgsnd
9740     case TARGET_NR_msgsnd:
9741         return do_msgsnd(arg1, arg2, arg3, arg4);
9742 #endif
9743 #ifdef TARGET_NR_shmget
9744     case TARGET_NR_shmget:
9745         return get_errno(shmget(arg1, arg2, arg3));
9746 #endif
9747 #ifdef TARGET_NR_shmctl
9748     case TARGET_NR_shmctl:
9749         return do_shmctl(arg1, arg2, arg3);
9750 #endif
9751 #ifdef TARGET_NR_shmat
9752     case TARGET_NR_shmat:
9753         return do_shmat(cpu_env, arg1, arg2, arg3);
9754 #endif
9755 #ifdef TARGET_NR_shmdt
9756     case TARGET_NR_shmdt:
9757         return do_shmdt(arg1);
9758 #endif
9759     case TARGET_NR_fsync:
9760         return get_errno(fsync(arg1));
9761     case TARGET_NR_clone:
9762         /* Linux manages to have three different orderings for its
9763          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9764          * match the kernel's CONFIG_CLONE_* settings.
9765          * Microblaze is further special in that it uses a sixth
9766          * implicit argument to clone for the TLS pointer.
9767          */
9768 #if defined(TARGET_MICROBLAZE)
9769         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9770 #elif defined(TARGET_CLONE_BACKWARDS)
9771         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9772 #elif defined(TARGET_CLONE_BACKWARDS2)
9773         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9774 #else
9775         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9776 #endif
9777         return ret;
9778 #ifdef __NR_exit_group
9779         /* new thread calls */
9780     case TARGET_NR_exit_group:
9781         preexit_cleanup(cpu_env, arg1);
9782         return get_errno(exit_group(arg1));
9783 #endif
9784     case TARGET_NR_setdomainname:
9785         if (!(p = lock_user_string(arg1)))
9786             return -TARGET_EFAULT;
9787         ret = get_errno(setdomainname(p, arg2));
9788         unlock_user(p, arg1, 0);
9789         return ret;
9790     case TARGET_NR_uname:
9791         /* no need to transcode because we use the linux syscall */
9792         {
9793             struct new_utsname * buf;
9794 
9795             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9796                 return -TARGET_EFAULT;
9797             ret = get_errno(sys_uname(buf));
9798             if (!is_error(ret)) {
9799                 /* Overwrite the native machine name with whatever is being
9800                    emulated. */
9801                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9802                           sizeof(buf->machine));
9803                 /* Allow the user to override the reported release.  */
9804                 if (qemu_uname_release && *qemu_uname_release) {
9805                     g_strlcpy(buf->release, qemu_uname_release,
9806                               sizeof(buf->release));
9807                 }
9808             }
9809             unlock_user_struct(buf, arg1, 1);
9810         }
9811         return ret;
9812 #ifdef TARGET_I386
9813     case TARGET_NR_modify_ldt:
9814         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9815 #if !defined(TARGET_X86_64)
9816     case TARGET_NR_vm86:
9817         return do_vm86(cpu_env, arg1, arg2);
9818 #endif
9819 #endif
9820 #if defined(TARGET_NR_adjtimex)
9821     case TARGET_NR_adjtimex:
9822         {
9823             struct timex host_buf;
9824 
9825             if (target_to_host_timex(&host_buf, arg1) != 0) {
9826                 return -TARGET_EFAULT;
9827             }
9828             ret = get_errno(adjtimex(&host_buf));
9829             if (!is_error(ret)) {
9830                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9831                     return -TARGET_EFAULT;
9832                 }
9833             }
9834         }
9835         return ret;
9836 #endif
9837 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9838     case TARGET_NR_clock_adjtime:
9839         {
9840             struct timex htx, *phtx = &htx;
9841 
9842             if (target_to_host_timex(phtx, arg2) != 0) {
9843                 return -TARGET_EFAULT;
9844             }
9845             ret = get_errno(clock_adjtime(arg1, phtx));
9846             if (!is_error(ret) && phtx) {
9847                 if (host_to_target_timex(arg2, phtx) != 0) {
9848                     return -TARGET_EFAULT;
9849                 }
9850             }
9851         }
9852         return ret;
9853 #endif
9854     case TARGET_NR_getpgid:
9855         return get_errno(getpgid(arg1));
9856     case TARGET_NR_fchdir:
9857         return get_errno(fchdir(arg1));
9858     case TARGET_NR_personality:
9859         return get_errno(personality(arg1));
9860 #ifdef TARGET_NR__llseek /* Not on alpha */
9861     case TARGET_NR__llseek:
9862         {
9863             int64_t res;
9864 #if !defined(__NR_llseek)
9865             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9866             if (res == -1) {
9867                 ret = get_errno(res);
9868             } else {
9869                 ret = 0;
9870             }
9871 #else
9872             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9873 #endif
9874             if ((ret == 0) && put_user_s64(res, arg4)) {
9875                 return -TARGET_EFAULT;
9876             }
9877         }
9878         return ret;
9879 #endif
9880 #ifdef TARGET_NR_getdents
9881     case TARGET_NR_getdents:
9882 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9883 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9884         {
9885             struct target_dirent *target_dirp;
9886             struct linux_dirent *dirp;
9887             abi_long count = arg3;
9888 
9889             dirp = g_try_malloc(count);
9890             if (!dirp) {
9891                 return -TARGET_ENOMEM;
9892             }
9893 
9894             ret = get_errno(sys_getdents(arg1, dirp, count));
9895             if (!is_error(ret)) {
9896                 struct linux_dirent *de;
9897 		struct target_dirent *tde;
9898                 int len = ret;
9899                 int reclen, treclen;
9900 		int count1, tnamelen;
9901 
9902 		count1 = 0;
9903                 de = dirp;
9904                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9905                     return -TARGET_EFAULT;
9906 		tde = target_dirp;
9907                 while (len > 0) {
9908                     reclen = de->d_reclen;
9909                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9910                     assert(tnamelen >= 0);
9911                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9912                     assert(count1 + treclen <= count);
9913                     tde->d_reclen = tswap16(treclen);
9914                     tde->d_ino = tswapal(de->d_ino);
9915                     tde->d_off = tswapal(de->d_off);
9916                     memcpy(tde->d_name, de->d_name, tnamelen);
9917                     de = (struct linux_dirent *)((char *)de + reclen);
9918                     len -= reclen;
9919                     tde = (struct target_dirent *)((char *)tde + treclen);
9920 		    count1 += treclen;
9921                 }
9922 		ret = count1;
9923                 unlock_user(target_dirp, arg2, ret);
9924             }
9925             g_free(dirp);
9926         }
9927 #else
9928         {
9929             struct linux_dirent *dirp;
9930             abi_long count = arg3;
9931 
9932             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9933                 return -TARGET_EFAULT;
9934             ret = get_errno(sys_getdents(arg1, dirp, count));
9935             if (!is_error(ret)) {
9936                 struct linux_dirent *de;
9937                 int len = ret;
9938                 int reclen;
9939                 de = dirp;
9940                 while (len > 0) {
9941                     reclen = de->d_reclen;
9942                     if (reclen > len)
9943                         break;
9944                     de->d_reclen = tswap16(reclen);
9945                     tswapls(&de->d_ino);
9946                     tswapls(&de->d_off);
9947                     de = (struct linux_dirent *)((char *)de + reclen);
9948                     len -= reclen;
9949                 }
9950             }
9951             unlock_user(dirp, arg2, ret);
9952         }
9953 #endif
9954 #else
9955         /* Implement getdents in terms of getdents64 */
9956         {
9957             struct linux_dirent64 *dirp;
9958             abi_long count = arg3;
9959 
9960             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9961             if (!dirp) {
9962                 return -TARGET_EFAULT;
9963             }
9964             ret = get_errno(sys_getdents64(arg1, dirp, count));
9965             if (!is_error(ret)) {
9966                 /* Convert the dirent64 structs to target dirent.  We do this
9967                  * in-place, since we can guarantee that a target_dirent is no
9968                  * larger than a dirent64; however this means we have to be
9969                  * careful to read everything before writing in the new format.
9970                  */
9971                 struct linux_dirent64 *de;
9972                 struct target_dirent *tde;
9973                 int len = ret;
9974                 int tlen = 0;
9975 
9976                 de = dirp;
9977                 tde = (struct target_dirent *)dirp;
9978                 while (len > 0) {
9979                     int namelen, treclen;
9980                     int reclen = de->d_reclen;
9981                     uint64_t ino = de->d_ino;
9982                     int64_t off = de->d_off;
9983                     uint8_t type = de->d_type;
9984 
9985                     namelen = strlen(de->d_name);
9986                     treclen = offsetof(struct target_dirent, d_name)
9987                         + namelen + 2;
9988                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9989 
9990                     memmove(tde->d_name, de->d_name, namelen + 1);
9991                     tde->d_ino = tswapal(ino);
9992                     tde->d_off = tswapal(off);
9993                     tde->d_reclen = tswap16(treclen);
9994                     /* The target_dirent type is in what was formerly a padding
9995                      * byte at the end of the structure:
9996                      */
9997                     *(((char *)tde) + treclen - 1) = type;
9998 
9999                     de = (struct linux_dirent64 *)((char *)de + reclen);
10000                     tde = (struct target_dirent *)((char *)tde + treclen);
10001                     len -= reclen;
10002                     tlen += treclen;
10003                 }
10004                 ret = tlen;
10005             }
10006             unlock_user(dirp, arg2, ret);
10007         }
10008 #endif
10009         return ret;
10010 #endif /* TARGET_NR_getdents */
10011 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10012     case TARGET_NR_getdents64:
10013         {
10014             struct linux_dirent64 *dirp;
10015             abi_long count = arg3;
10016             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10017                 return -TARGET_EFAULT;
10018             ret = get_errno(sys_getdents64(arg1, dirp, count));
10019             if (!is_error(ret)) {
10020                 struct linux_dirent64 *de;
10021                 int len = ret;
10022                 int reclen;
10023                 de = dirp;
10024                 while (len > 0) {
10025                     reclen = de->d_reclen;
10026                     if (reclen > len)
10027                         break;
10028                     de->d_reclen = tswap16(reclen);
10029                     tswap64s((uint64_t *)&de->d_ino);
10030                     tswap64s((uint64_t *)&de->d_off);
10031                     de = (struct linux_dirent64 *)((char *)de + reclen);
10032                     len -= reclen;
10033                 }
10034             }
10035             unlock_user(dirp, arg2, ret);
10036         }
10037         return ret;
10038 #endif /* TARGET_NR_getdents64 */
10039 #if defined(TARGET_NR__newselect)
10040     case TARGET_NR__newselect:
10041         return do_select(arg1, arg2, arg3, arg4, arg5);
10042 #endif
10043 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10044 # ifdef TARGET_NR_poll
10045     case TARGET_NR_poll:
10046 # endif
10047 # ifdef TARGET_NR_ppoll
10048     case TARGET_NR_ppoll:
10049 # endif
10050         {
10051             struct target_pollfd *target_pfd;
10052             unsigned int nfds = arg2;
10053             struct pollfd *pfd;
10054             unsigned int i;
10055 
10056             pfd = NULL;
10057             target_pfd = NULL;
10058             if (nfds) {
10059                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10060                     return -TARGET_EINVAL;
10061                 }
10062 
10063                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10064                                        sizeof(struct target_pollfd) * nfds, 1);
10065                 if (!target_pfd) {
10066                     return -TARGET_EFAULT;
10067                 }
10068 
10069                 pfd = alloca(sizeof(struct pollfd) * nfds);
10070                 for (i = 0; i < nfds; i++) {
10071                     pfd[i].fd = tswap32(target_pfd[i].fd);
10072                     pfd[i].events = tswap16(target_pfd[i].events);
10073                 }
10074             }
10075 
10076             switch (num) {
10077 # ifdef TARGET_NR_ppoll
10078             case TARGET_NR_ppoll:
10079             {
10080                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10081                 target_sigset_t *target_set;
10082                 sigset_t _set, *set = &_set;
10083 
10084                 if (arg3) {
10085                     if (target_to_host_timespec(timeout_ts, arg3)) {
10086                         unlock_user(target_pfd, arg1, 0);
10087                         return -TARGET_EFAULT;
10088                     }
10089                 } else {
10090                     timeout_ts = NULL;
10091                 }
10092 
10093                 if (arg4) {
10094                     if (arg5 != sizeof(target_sigset_t)) {
10095                         unlock_user(target_pfd, arg1, 0);
10096                         return -TARGET_EINVAL;
10097                     }
10098 
10099                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10100                     if (!target_set) {
10101                         unlock_user(target_pfd, arg1, 0);
10102                         return -TARGET_EFAULT;
10103                     }
10104                     target_to_host_sigset(set, target_set);
10105                 } else {
10106                     set = NULL;
10107                 }
10108 
10109                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10110                                            set, SIGSET_T_SIZE));
10111 
10112                 if (!is_error(ret) && arg3) {
10113                     host_to_target_timespec(arg3, timeout_ts);
10114                 }
10115                 if (arg4) {
10116                     unlock_user(target_set, arg4, 0);
10117                 }
10118                 break;
10119             }
10120 # endif
10121 # ifdef TARGET_NR_poll
10122             case TARGET_NR_poll:
10123             {
10124                 struct timespec ts, *pts;
10125 
10126                 if (arg3 >= 0) {
10127                     /* Convert ms to secs, ns */
10128                     ts.tv_sec = arg3 / 1000;
10129                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10130                     pts = &ts;
10131                 } else {
10132                     /* -ve poll() timeout means "infinite" */
10133                     pts = NULL;
10134                 }
10135                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10136                 break;
10137             }
10138 # endif
10139             default:
10140                 g_assert_not_reached();
10141             }
10142 
10143             if (!is_error(ret)) {
10144                 for(i = 0; i < nfds; i++) {
10145                     target_pfd[i].revents = tswap16(pfd[i].revents);
10146                 }
10147             }
10148             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10149         }
10150         return ret;
10151 #endif
10152     case TARGET_NR_flock:
10153         /* NOTE: the flock constant seems to be the same for every
10154            Linux platform */
10155         return get_errno(safe_flock(arg1, arg2));
10156     case TARGET_NR_readv:
10157         {
10158             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10159             if (vec != NULL) {
10160                 ret = get_errno(safe_readv(arg1, vec, arg3));
10161                 unlock_iovec(vec, arg2, arg3, 1);
10162             } else {
10163                 ret = -host_to_target_errno(errno);
10164             }
10165         }
10166         return ret;
10167     case TARGET_NR_writev:
10168         {
10169             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10170             if (vec != NULL) {
10171                 ret = get_errno(safe_writev(arg1, vec, arg3));
10172                 unlock_iovec(vec, arg2, arg3, 0);
10173             } else {
10174                 ret = -host_to_target_errno(errno);
10175             }
10176         }
10177         return ret;
10178 #if defined(TARGET_NR_preadv)
10179     case TARGET_NR_preadv:
10180         {
10181             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10182             if (vec != NULL) {
10183                 unsigned long low, high;
10184 
10185                 target_to_host_low_high(arg4, arg5, &low, &high);
10186                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10187                 unlock_iovec(vec, arg2, arg3, 1);
10188             } else {
10189                 ret = -host_to_target_errno(errno);
10190            }
10191         }
10192         return ret;
10193 #endif
10194 #if defined(TARGET_NR_pwritev)
10195     case TARGET_NR_pwritev:
10196         {
10197             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10198             if (vec != NULL) {
10199                 unsigned long low, high;
10200 
10201                 target_to_host_low_high(arg4, arg5, &low, &high);
10202                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10203                 unlock_iovec(vec, arg2, arg3, 0);
10204             } else {
10205                 ret = -host_to_target_errno(errno);
10206            }
10207         }
10208         return ret;
10209 #endif
10210     case TARGET_NR_getsid:
10211         return get_errno(getsid(arg1));
10212 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10213     case TARGET_NR_fdatasync:
10214         return get_errno(fdatasync(arg1));
10215 #endif
10216 #ifdef TARGET_NR__sysctl
10217     case TARGET_NR__sysctl:
10218         /* We don't implement this, but ENOTDIR is always a safe
10219            return value. */
10220         return -TARGET_ENOTDIR;
10221 #endif
10222     case TARGET_NR_sched_getaffinity:
10223         {
10224             unsigned int mask_size;
10225             unsigned long *mask;
10226 
10227             /*
10228              * sched_getaffinity needs multiples of ulong, so need to take
10229              * care of mismatches between target ulong and host ulong sizes.
10230              */
10231             if (arg2 & (sizeof(abi_ulong) - 1)) {
10232                 return -TARGET_EINVAL;
10233             }
10234             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10235 
10236             mask = alloca(mask_size);
10237             memset(mask, 0, mask_size);
10238             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10239 
10240             if (!is_error(ret)) {
10241                 if (ret > arg2) {
10242                     /* More data returned than the caller's buffer will fit.
10243                      * This only happens if sizeof(abi_long) < sizeof(long)
10244                      * and the caller passed us a buffer holding an odd number
10245                      * of abi_longs. If the host kernel is actually using the
10246                      * extra 4 bytes then fail EINVAL; otherwise we can just
10247                      * ignore them and only copy the interesting part.
10248                      */
10249                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10250                     if (numcpus > arg2 * 8) {
10251                         return -TARGET_EINVAL;
10252                     }
10253                     ret = arg2;
10254                 }
10255 
10256                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10257                     return -TARGET_EFAULT;
10258                 }
10259             }
10260         }
10261         return ret;
10262     case TARGET_NR_sched_setaffinity:
10263         {
10264             unsigned int mask_size;
10265             unsigned long *mask;
10266 
10267             /*
10268              * sched_setaffinity needs multiples of ulong, so need to take
10269              * care of mismatches between target ulong and host ulong sizes.
10270              */
10271             if (arg2 & (sizeof(abi_ulong) - 1)) {
10272                 return -TARGET_EINVAL;
10273             }
10274             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10275             mask = alloca(mask_size);
10276 
10277             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10278             if (ret) {
10279                 return ret;
10280             }
10281 
10282             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10283         }
10284     case TARGET_NR_getcpu:
10285         {
10286             unsigned cpu, node;
10287             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10288                                        arg2 ? &node : NULL,
10289                                        NULL));
10290             if (is_error(ret)) {
10291                 return ret;
10292             }
10293             if (arg1 && put_user_u32(cpu, arg1)) {
10294                 return -TARGET_EFAULT;
10295             }
10296             if (arg2 && put_user_u32(node, arg2)) {
10297                 return -TARGET_EFAULT;
10298             }
10299         }
10300         return ret;
10301     case TARGET_NR_sched_setparam:
10302         {
10303             struct sched_param *target_schp;
10304             struct sched_param schp;
10305 
10306             if (arg2 == 0) {
10307                 return -TARGET_EINVAL;
10308             }
10309             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10310                 return -TARGET_EFAULT;
10311             schp.sched_priority = tswap32(target_schp->sched_priority);
10312             unlock_user_struct(target_schp, arg2, 0);
10313             return get_errno(sched_setparam(arg1, &schp));
10314         }
10315     case TARGET_NR_sched_getparam:
10316         {
10317             struct sched_param *target_schp;
10318             struct sched_param schp;
10319 
10320             if (arg2 == 0) {
10321                 return -TARGET_EINVAL;
10322             }
10323             ret = get_errno(sched_getparam(arg1, &schp));
10324             if (!is_error(ret)) {
10325                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10326                     return -TARGET_EFAULT;
10327                 target_schp->sched_priority = tswap32(schp.sched_priority);
10328                 unlock_user_struct(target_schp, arg2, 1);
10329             }
10330         }
10331         return ret;
10332     case TARGET_NR_sched_setscheduler:
10333         {
10334             struct sched_param *target_schp;
10335             struct sched_param schp;
10336             if (arg3 == 0) {
10337                 return -TARGET_EINVAL;
10338             }
10339             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10340                 return -TARGET_EFAULT;
10341             schp.sched_priority = tswap32(target_schp->sched_priority);
10342             unlock_user_struct(target_schp, arg3, 0);
10343             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10344         }
10345     case TARGET_NR_sched_getscheduler:
10346         return get_errno(sched_getscheduler(arg1));
10347     case TARGET_NR_sched_yield:
10348         return get_errno(sched_yield());
10349     case TARGET_NR_sched_get_priority_max:
10350         return get_errno(sched_get_priority_max(arg1));
10351     case TARGET_NR_sched_get_priority_min:
10352         return get_errno(sched_get_priority_min(arg1));
10353 #ifdef TARGET_NR_sched_rr_get_interval
10354     case TARGET_NR_sched_rr_get_interval:
10355         {
10356             struct timespec ts;
10357             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10358             if (!is_error(ret)) {
10359                 ret = host_to_target_timespec(arg2, &ts);
10360             }
10361         }
10362         return ret;
10363 #endif
10364 #if defined(TARGET_NR_nanosleep)
10365     case TARGET_NR_nanosleep:
10366         {
10367             struct timespec req, rem;
10368             target_to_host_timespec(&req, arg1);
10369             ret = get_errno(safe_nanosleep(&req, &rem));
10370             if (is_error(ret) && arg2) {
10371                 host_to_target_timespec(arg2, &rem);
10372             }
10373         }
10374         return ret;
10375 #endif
10376     case TARGET_NR_prctl:
10377         switch (arg1) {
10378         case PR_GET_PDEATHSIG:
10379         {
10380             int deathsig;
10381             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10382             if (!is_error(ret) && arg2
10383                 && put_user_ual(deathsig, arg2)) {
10384                 return -TARGET_EFAULT;
10385             }
10386             return ret;
10387         }
10388 #ifdef PR_GET_NAME
10389         case PR_GET_NAME:
10390         {
10391             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10392             if (!name) {
10393                 return -TARGET_EFAULT;
10394             }
10395             ret = get_errno(prctl(arg1, (unsigned long)name,
10396                                   arg3, arg4, arg5));
10397             unlock_user(name, arg2, 16);
10398             return ret;
10399         }
10400         case PR_SET_NAME:
10401         {
10402             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10403             if (!name) {
10404                 return -TARGET_EFAULT;
10405             }
10406             ret = get_errno(prctl(arg1, (unsigned long)name,
10407                                   arg3, arg4, arg5));
10408             unlock_user(name, arg2, 0);
10409             return ret;
10410         }
10411 #endif
10412 #ifdef TARGET_MIPS
10413         case TARGET_PR_GET_FP_MODE:
10414         {
10415             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10416             ret = 0;
10417             if (env->CP0_Status & (1 << CP0St_FR)) {
10418                 ret |= TARGET_PR_FP_MODE_FR;
10419             }
10420             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10421                 ret |= TARGET_PR_FP_MODE_FRE;
10422             }
10423             return ret;
10424         }
10425         case TARGET_PR_SET_FP_MODE:
10426         {
10427             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10428             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10429             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10430             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10431             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10432 
10433             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10434                                             TARGET_PR_FP_MODE_FRE;
10435 
10436             /* If nothing to change, return right away, successfully.  */
10437             if (old_fr == new_fr && old_fre == new_fre) {
10438                 return 0;
10439             }
10440             /* Check the value is valid */
10441             if (arg2 & ~known_bits) {
10442                 return -TARGET_EOPNOTSUPP;
10443             }
10444             /* Setting FRE without FR is not supported.  */
10445             if (new_fre && !new_fr) {
10446                 return -TARGET_EOPNOTSUPP;
10447             }
10448             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10449                 /* FR1 is not supported */
10450                 return -TARGET_EOPNOTSUPP;
10451             }
10452             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10453                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10454                 /* cannot set FR=0 */
10455                 return -TARGET_EOPNOTSUPP;
10456             }
10457             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10458                 /* Cannot set FRE=1 */
10459                 return -TARGET_EOPNOTSUPP;
10460             }
10461 
10462             int i;
10463             fpr_t *fpr = env->active_fpu.fpr;
10464             for (i = 0; i < 32 ; i += 2) {
10465                 if (!old_fr && new_fr) {
10466                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10467                 } else if (old_fr && !new_fr) {
10468                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10469                 }
10470             }
10471 
10472             if (new_fr) {
10473                 env->CP0_Status |= (1 << CP0St_FR);
10474                 env->hflags |= MIPS_HFLAG_F64;
10475             } else {
10476                 env->CP0_Status &= ~(1 << CP0St_FR);
10477                 env->hflags &= ~MIPS_HFLAG_F64;
10478             }
10479             if (new_fre) {
10480                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10481                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10482                     env->hflags |= MIPS_HFLAG_FRE;
10483                 }
10484             } else {
10485                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10486                 env->hflags &= ~MIPS_HFLAG_FRE;
10487             }
10488 
10489             return 0;
10490         }
10491 #endif /* MIPS */
10492 #ifdef TARGET_AARCH64
10493         case TARGET_PR_SVE_SET_VL:
10494             /*
10495              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10496              * PR_SVE_VL_INHERIT.  Note the kernel definition
10497              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10498              * even though the current architectural maximum is VQ=16.
10499              */
10500             ret = -TARGET_EINVAL;
10501             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10502                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10503                 CPUARMState *env = cpu_env;
10504                 ARMCPU *cpu = env_archcpu(env);
10505                 uint32_t vq, old_vq;
10506 
10507                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10508                 vq = MAX(arg2 / 16, 1);
10509                 vq = MIN(vq, cpu->sve_max_vq);
10510 
10511                 if (vq < old_vq) {
10512                     aarch64_sve_narrow_vq(env, vq);
10513                 }
10514                 env->vfp.zcr_el[1] = vq - 1;
10515                 arm_rebuild_hflags(env);
10516                 ret = vq * 16;
10517             }
10518             return ret;
10519         case TARGET_PR_SVE_GET_VL:
10520             ret = -TARGET_EINVAL;
10521             {
10522                 ARMCPU *cpu = env_archcpu(cpu_env);
10523                 if (cpu_isar_feature(aa64_sve, cpu)) {
10524                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10525                 }
10526             }
10527             return ret;
10528         case TARGET_PR_PAC_RESET_KEYS:
10529             {
10530                 CPUARMState *env = cpu_env;
10531                 ARMCPU *cpu = env_archcpu(env);
10532 
10533                 if (arg3 || arg4 || arg5) {
10534                     return -TARGET_EINVAL;
10535                 }
10536                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10537                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10538                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10539                                TARGET_PR_PAC_APGAKEY);
10540                     int ret = 0;
10541                     Error *err = NULL;
10542 
10543                     if (arg2 == 0) {
10544                         arg2 = all;
10545                     } else if (arg2 & ~all) {
10546                         return -TARGET_EINVAL;
10547                     }
10548                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10549                         ret |= qemu_guest_getrandom(&env->keys.apia,
10550                                                     sizeof(ARMPACKey), &err);
10551                     }
10552                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10553                         ret |= qemu_guest_getrandom(&env->keys.apib,
10554                                                     sizeof(ARMPACKey), &err);
10555                     }
10556                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10557                         ret |= qemu_guest_getrandom(&env->keys.apda,
10558                                                     sizeof(ARMPACKey), &err);
10559                     }
10560                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10561                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10562                                                     sizeof(ARMPACKey), &err);
10563                     }
10564                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10565                         ret |= qemu_guest_getrandom(&env->keys.apga,
10566                                                     sizeof(ARMPACKey), &err);
10567                     }
10568                     if (ret != 0) {
10569                         /*
10570                          * Some unknown failure in the crypto.  The best
10571                          * we can do is log it and fail the syscall.
10572                          * The real syscall cannot fail this way.
10573                          */
10574                         qemu_log_mask(LOG_UNIMP,
10575                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10576                                       error_get_pretty(err));
10577                         error_free(err);
10578                         return -TARGET_EIO;
10579                     }
10580                     return 0;
10581                 }
10582             }
10583             return -TARGET_EINVAL;
10584 #endif /* AARCH64 */
10585         case PR_GET_SECCOMP:
10586         case PR_SET_SECCOMP:
10587             /* Disable seccomp to prevent the target disabling syscalls we
10588              * need. */
10589             return -TARGET_EINVAL;
10590         default:
10591             /* Most prctl options have no pointer arguments */
10592             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10593         }
10594         break;
10595 #ifdef TARGET_NR_arch_prctl
10596     case TARGET_NR_arch_prctl:
10597         return do_arch_prctl(cpu_env, arg1, arg2);
10598 #endif
10599 #ifdef TARGET_NR_pread64
10600     case TARGET_NR_pread64:
10601         if (regpairs_aligned(cpu_env, num)) {
10602             arg4 = arg5;
10603             arg5 = arg6;
10604         }
10605         if (arg2 == 0 && arg3 == 0) {
10606             /* Special-case NULL buffer and zero length, which should succeed */
10607             p = 0;
10608         } else {
10609             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10610             if (!p) {
10611                 return -TARGET_EFAULT;
10612             }
10613         }
10614         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10615         unlock_user(p, arg2, ret);
10616         return ret;
10617     case TARGET_NR_pwrite64:
10618         if (regpairs_aligned(cpu_env, num)) {
10619             arg4 = arg5;
10620             arg5 = arg6;
10621         }
10622         if (arg2 == 0 && arg3 == 0) {
10623             /* Special-case NULL buffer and zero length, which should succeed */
10624             p = 0;
10625         } else {
10626             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10627             if (!p) {
10628                 return -TARGET_EFAULT;
10629             }
10630         }
10631         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10632         unlock_user(p, arg2, 0);
10633         return ret;
10634 #endif
10635     case TARGET_NR_getcwd:
10636         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10637             return -TARGET_EFAULT;
10638         ret = get_errno(sys_getcwd1(p, arg2));
10639         unlock_user(p, arg1, ret);
10640         return ret;
10641     case TARGET_NR_capget:
10642     case TARGET_NR_capset:
10643     {
10644         struct target_user_cap_header *target_header;
10645         struct target_user_cap_data *target_data = NULL;
10646         struct __user_cap_header_struct header;
10647         struct __user_cap_data_struct data[2];
10648         struct __user_cap_data_struct *dataptr = NULL;
10649         int i, target_datalen;
10650         int data_items = 1;
10651 
10652         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10653             return -TARGET_EFAULT;
10654         }
10655         header.version = tswap32(target_header->version);
10656         header.pid = tswap32(target_header->pid);
10657 
10658         if (header.version != _LINUX_CAPABILITY_VERSION) {
10659             /* Version 2 and up takes pointer to two user_data structs */
10660             data_items = 2;
10661         }
10662 
10663         target_datalen = sizeof(*target_data) * data_items;
10664 
10665         if (arg2) {
10666             if (num == TARGET_NR_capget) {
10667                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10668             } else {
10669                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10670             }
10671             if (!target_data) {
10672                 unlock_user_struct(target_header, arg1, 0);
10673                 return -TARGET_EFAULT;
10674             }
10675 
10676             if (num == TARGET_NR_capset) {
10677                 for (i = 0; i < data_items; i++) {
10678                     data[i].effective = tswap32(target_data[i].effective);
10679                     data[i].permitted = tswap32(target_data[i].permitted);
10680                     data[i].inheritable = tswap32(target_data[i].inheritable);
10681                 }
10682             }
10683 
10684             dataptr = data;
10685         }
10686 
10687         if (num == TARGET_NR_capget) {
10688             ret = get_errno(capget(&header, dataptr));
10689         } else {
10690             ret = get_errno(capset(&header, dataptr));
10691         }
10692 
10693         /* The kernel always updates version for both capget and capset */
10694         target_header->version = tswap32(header.version);
10695         unlock_user_struct(target_header, arg1, 1);
10696 
10697         if (arg2) {
10698             if (num == TARGET_NR_capget) {
10699                 for (i = 0; i < data_items; i++) {
10700                     target_data[i].effective = tswap32(data[i].effective);
10701                     target_data[i].permitted = tswap32(data[i].permitted);
10702                     target_data[i].inheritable = tswap32(data[i].inheritable);
10703                 }
10704                 unlock_user(target_data, arg2, target_datalen);
10705             } else {
10706                 unlock_user(target_data, arg2, 0);
10707             }
10708         }
10709         return ret;
10710     }
10711     case TARGET_NR_sigaltstack:
10712         return do_sigaltstack(arg1, arg2,
10713                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10714 
10715 #ifdef CONFIG_SENDFILE
10716 #ifdef TARGET_NR_sendfile
10717     case TARGET_NR_sendfile:
10718     {
10719         off_t *offp = NULL;
10720         off_t off;
10721         if (arg3) {
10722             ret = get_user_sal(off, arg3);
10723             if (is_error(ret)) {
10724                 return ret;
10725             }
10726             offp = &off;
10727         }
10728         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10729         if (!is_error(ret) && arg3) {
10730             abi_long ret2 = put_user_sal(off, arg3);
10731             if (is_error(ret2)) {
10732                 ret = ret2;
10733             }
10734         }
10735         return ret;
10736     }
10737 #endif
10738 #ifdef TARGET_NR_sendfile64
10739     case TARGET_NR_sendfile64:
10740     {
10741         off_t *offp = NULL;
10742         off_t off;
10743         if (arg3) {
10744             ret = get_user_s64(off, arg3);
10745             if (is_error(ret)) {
10746                 return ret;
10747             }
10748             offp = &off;
10749         }
10750         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10751         if (!is_error(ret) && arg3) {
10752             abi_long ret2 = put_user_s64(off, arg3);
10753             if (is_error(ret2)) {
10754                 ret = ret2;
10755             }
10756         }
10757         return ret;
10758     }
10759 #endif
10760 #endif
10761 #ifdef TARGET_NR_vfork
10762     case TARGET_NR_vfork:
10763         return get_errno(do_fork(cpu_env,
10764                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10765                          0, 0, 0, 0));
10766 #endif
10767 #ifdef TARGET_NR_ugetrlimit
10768     case TARGET_NR_ugetrlimit:
10769     {
10770 	struct rlimit rlim;
10771 	int resource = target_to_host_resource(arg1);
10772 	ret = get_errno(getrlimit(resource, &rlim));
10773 	if (!is_error(ret)) {
10774 	    struct target_rlimit *target_rlim;
10775             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10776                 return -TARGET_EFAULT;
10777 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10778 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10779             unlock_user_struct(target_rlim, arg2, 1);
10780 	}
10781         return ret;
10782     }
10783 #endif
10784 #ifdef TARGET_NR_truncate64
10785     case TARGET_NR_truncate64:
10786         if (!(p = lock_user_string(arg1)))
10787             return -TARGET_EFAULT;
10788 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10789         unlock_user(p, arg1, 0);
10790         return ret;
10791 #endif
10792 #ifdef TARGET_NR_ftruncate64
10793     case TARGET_NR_ftruncate64:
10794         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10795 #endif
10796 #ifdef TARGET_NR_stat64
10797     case TARGET_NR_stat64:
10798         if (!(p = lock_user_string(arg1))) {
10799             return -TARGET_EFAULT;
10800         }
10801         ret = get_errno(stat(path(p), &st));
10802         unlock_user(p, arg1, 0);
10803         if (!is_error(ret))
10804             ret = host_to_target_stat64(cpu_env, arg2, &st);
10805         return ret;
10806 #endif
10807 #ifdef TARGET_NR_lstat64
10808     case TARGET_NR_lstat64:
10809         if (!(p = lock_user_string(arg1))) {
10810             return -TARGET_EFAULT;
10811         }
10812         ret = get_errno(lstat(path(p), &st));
10813         unlock_user(p, arg1, 0);
10814         if (!is_error(ret))
10815             ret = host_to_target_stat64(cpu_env, arg2, &st);
10816         return ret;
10817 #endif
10818 #ifdef TARGET_NR_fstat64
10819     case TARGET_NR_fstat64:
10820         ret = get_errno(fstat(arg1, &st));
10821         if (!is_error(ret))
10822             ret = host_to_target_stat64(cpu_env, arg2, &st);
10823         return ret;
10824 #endif
10825 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10826 #ifdef TARGET_NR_fstatat64
10827     case TARGET_NR_fstatat64:
10828 #endif
10829 #ifdef TARGET_NR_newfstatat
10830     case TARGET_NR_newfstatat:
10831 #endif
10832         if (!(p = lock_user_string(arg2))) {
10833             return -TARGET_EFAULT;
10834         }
10835         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10836         unlock_user(p, arg2, 0);
10837         if (!is_error(ret))
10838             ret = host_to_target_stat64(cpu_env, arg3, &st);
10839         return ret;
10840 #endif
10841 #if defined(TARGET_NR_statx)
10842     case TARGET_NR_statx:
10843         {
10844             struct target_statx *target_stx;
10845             int dirfd = arg1;
10846             int flags = arg3;
10847 
10848             p = lock_user_string(arg2);
10849             if (p == NULL) {
10850                 return -TARGET_EFAULT;
10851             }
10852 #if defined(__NR_statx)
10853             {
10854                 /*
10855                  * It is assumed that struct statx is architecture independent.
10856                  */
10857                 struct target_statx host_stx;
10858                 int mask = arg4;
10859 
10860                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10861                 if (!is_error(ret)) {
10862                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10863                         unlock_user(p, arg2, 0);
10864                         return -TARGET_EFAULT;
10865                     }
10866                 }
10867 
10868                 if (ret != -TARGET_ENOSYS) {
10869                     unlock_user(p, arg2, 0);
10870                     return ret;
10871                 }
10872             }
10873 #endif
10874             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10875             unlock_user(p, arg2, 0);
10876 
10877             if (!is_error(ret)) {
10878                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10879                     return -TARGET_EFAULT;
10880                 }
10881                 memset(target_stx, 0, sizeof(*target_stx));
10882                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10883                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10884                 __put_user(st.st_ino, &target_stx->stx_ino);
10885                 __put_user(st.st_mode, &target_stx->stx_mode);
10886                 __put_user(st.st_uid, &target_stx->stx_uid);
10887                 __put_user(st.st_gid, &target_stx->stx_gid);
10888                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10889                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10890                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10891                 __put_user(st.st_size, &target_stx->stx_size);
10892                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10893                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10894                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10895                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10896                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10897                 unlock_user_struct(target_stx, arg5, 1);
10898             }
10899         }
10900         return ret;
10901 #endif
10902 #ifdef TARGET_NR_lchown
10903     case TARGET_NR_lchown:
10904         if (!(p = lock_user_string(arg1)))
10905             return -TARGET_EFAULT;
10906         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10907         unlock_user(p, arg1, 0);
10908         return ret;
10909 #endif
10910 #ifdef TARGET_NR_getuid
10911     case TARGET_NR_getuid:
10912         return get_errno(high2lowuid(getuid()));
10913 #endif
10914 #ifdef TARGET_NR_getgid
10915     case TARGET_NR_getgid:
10916         return get_errno(high2lowgid(getgid()));
10917 #endif
10918 #ifdef TARGET_NR_geteuid
10919     case TARGET_NR_geteuid:
10920         return get_errno(high2lowuid(geteuid()));
10921 #endif
10922 #ifdef TARGET_NR_getegid
10923     case TARGET_NR_getegid:
10924         return get_errno(high2lowgid(getegid()));
10925 #endif
10926     case TARGET_NR_setreuid:
10927         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10928     case TARGET_NR_setregid:
10929         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10930     case TARGET_NR_getgroups:
10931         {
10932             int gidsetsize = arg1;
10933             target_id *target_grouplist;
10934             gid_t *grouplist;
10935             int i;
10936 
10937             grouplist = alloca(gidsetsize * sizeof(gid_t));
10938             ret = get_errno(getgroups(gidsetsize, grouplist));
10939             if (gidsetsize == 0)
10940                 return ret;
10941             if (!is_error(ret)) {
10942                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10943                 if (!target_grouplist)
10944                     return -TARGET_EFAULT;
10945                 for(i = 0;i < ret; i++)
10946                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10947                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10948             }
10949         }
10950         return ret;
10951     case TARGET_NR_setgroups:
10952         {
10953             int gidsetsize = arg1;
10954             target_id *target_grouplist;
10955             gid_t *grouplist = NULL;
10956             int i;
10957             if (gidsetsize) {
10958                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10959                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10960                 if (!target_grouplist) {
10961                     return -TARGET_EFAULT;
10962                 }
10963                 for (i = 0; i < gidsetsize; i++) {
10964                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10965                 }
10966                 unlock_user(target_grouplist, arg2, 0);
10967             }
10968             return get_errno(setgroups(gidsetsize, grouplist));
10969         }
10970     case TARGET_NR_fchown:
10971         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10972 #if defined(TARGET_NR_fchownat)
10973     case TARGET_NR_fchownat:
10974         if (!(p = lock_user_string(arg2)))
10975             return -TARGET_EFAULT;
10976         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10977                                  low2highgid(arg4), arg5));
10978         unlock_user(p, arg2, 0);
10979         return ret;
10980 #endif
10981 #ifdef TARGET_NR_setresuid
10982     case TARGET_NR_setresuid:
10983         return get_errno(sys_setresuid(low2highuid(arg1),
10984                                        low2highuid(arg2),
10985                                        low2highuid(arg3)));
10986 #endif
10987 #ifdef TARGET_NR_getresuid
10988     case TARGET_NR_getresuid:
10989         {
10990             uid_t ruid, euid, suid;
10991             ret = get_errno(getresuid(&ruid, &euid, &suid));
10992             if (!is_error(ret)) {
10993                 if (put_user_id(high2lowuid(ruid), arg1)
10994                     || put_user_id(high2lowuid(euid), arg2)
10995                     || put_user_id(high2lowuid(suid), arg3))
10996                     return -TARGET_EFAULT;
10997             }
10998         }
10999         return ret;
11000 #endif
11001 #ifdef TARGET_NR_getresgid
11002     case TARGET_NR_setresgid:
11003         return get_errno(sys_setresgid(low2highgid(arg1),
11004                                        low2highgid(arg2),
11005                                        low2highgid(arg3)));
11006 #endif
11007 #ifdef TARGET_NR_getresgid
11008     case TARGET_NR_getresgid:
11009         {
11010             gid_t rgid, egid, sgid;
11011             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11012             if (!is_error(ret)) {
11013                 if (put_user_id(high2lowgid(rgid), arg1)
11014                     || put_user_id(high2lowgid(egid), arg2)
11015                     || put_user_id(high2lowgid(sgid), arg3))
11016                     return -TARGET_EFAULT;
11017             }
11018         }
11019         return ret;
11020 #endif
11021 #ifdef TARGET_NR_chown
11022     case TARGET_NR_chown:
11023         if (!(p = lock_user_string(arg1)))
11024             return -TARGET_EFAULT;
11025         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11026         unlock_user(p, arg1, 0);
11027         return ret;
11028 #endif
11029     case TARGET_NR_setuid:
11030         return get_errno(sys_setuid(low2highuid(arg1)));
11031     case TARGET_NR_setgid:
11032         return get_errno(sys_setgid(low2highgid(arg1)));
11033     case TARGET_NR_setfsuid:
11034         return get_errno(setfsuid(arg1));
11035     case TARGET_NR_setfsgid:
11036         return get_errno(setfsgid(arg1));
11037 
11038 #ifdef TARGET_NR_lchown32
11039     case TARGET_NR_lchown32:
11040         if (!(p = lock_user_string(arg1)))
11041             return -TARGET_EFAULT;
11042         ret = get_errno(lchown(p, arg2, arg3));
11043         unlock_user(p, arg1, 0);
11044         return ret;
11045 #endif
11046 #ifdef TARGET_NR_getuid32
11047     case TARGET_NR_getuid32:
11048         return get_errno(getuid());
11049 #endif
11050 
11051 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11052    /* Alpha specific */
11053     case TARGET_NR_getxuid:
11054          {
11055             uid_t euid;
11056             euid=geteuid();
11057             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11058          }
11059         return get_errno(getuid());
11060 #endif
11061 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11062    /* Alpha specific */
11063     case TARGET_NR_getxgid:
11064          {
11065             uid_t egid;
11066             egid=getegid();
11067             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11068          }
11069         return get_errno(getgid());
11070 #endif
11071 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11072     /* Alpha specific */
11073     case TARGET_NR_osf_getsysinfo:
11074         ret = -TARGET_EOPNOTSUPP;
11075         switch (arg1) {
11076           case TARGET_GSI_IEEE_FP_CONTROL:
11077             {
11078                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11079                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11080 
11081                 swcr &= ~SWCR_STATUS_MASK;
11082                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11083 
11084                 if (put_user_u64 (swcr, arg2))
11085                         return -TARGET_EFAULT;
11086                 ret = 0;
11087             }
11088             break;
11089 
11090           /* case GSI_IEEE_STATE_AT_SIGNAL:
11091              -- Not implemented in linux kernel.
11092              case GSI_UACPROC:
11093              -- Retrieves current unaligned access state; not much used.
11094              case GSI_PROC_TYPE:
11095              -- Retrieves implver information; surely not used.
11096              case GSI_GET_HWRPB:
11097              -- Grabs a copy of the HWRPB; surely not used.
11098           */
11099         }
11100         return ret;
11101 #endif
11102 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11103     /* Alpha specific */
11104     case TARGET_NR_osf_setsysinfo:
11105         ret = -TARGET_EOPNOTSUPP;
11106         switch (arg1) {
11107           case TARGET_SSI_IEEE_FP_CONTROL:
11108             {
11109                 uint64_t swcr, fpcr;
11110 
11111                 if (get_user_u64 (swcr, arg2)) {
11112                     return -TARGET_EFAULT;
11113                 }
11114 
11115                 /*
11116                  * The kernel calls swcr_update_status to update the
11117                  * status bits from the fpcr at every point that it
11118                  * could be queried.  Therefore, we store the status
11119                  * bits only in FPCR.
11120                  */
11121                 ((CPUAlphaState *)cpu_env)->swcr
11122                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11123 
11124                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11125                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11126                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11127                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11128                 ret = 0;
11129             }
11130             break;
11131 
11132           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11133             {
11134                 uint64_t exc, fpcr, fex;
11135 
11136                 if (get_user_u64(exc, arg2)) {
11137                     return -TARGET_EFAULT;
11138                 }
11139                 exc &= SWCR_STATUS_MASK;
11140                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11141 
11142                 /* Old exceptions are not signaled.  */
11143                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11144                 fex = exc & ~fex;
11145                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11146                 fex &= ((CPUArchState *)cpu_env)->swcr;
11147 
11148                 /* Update the hardware fpcr.  */
11149                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11150                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11151 
11152                 if (fex) {
11153                     int si_code = TARGET_FPE_FLTUNK;
11154                     target_siginfo_t info;
11155 
11156                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11157                         si_code = TARGET_FPE_FLTUND;
11158                     }
11159                     if (fex & SWCR_TRAP_ENABLE_INE) {
11160                         si_code = TARGET_FPE_FLTRES;
11161                     }
11162                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11163                         si_code = TARGET_FPE_FLTUND;
11164                     }
11165                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11166                         si_code = TARGET_FPE_FLTOVF;
11167                     }
11168                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11169                         si_code = TARGET_FPE_FLTDIV;
11170                     }
11171                     if (fex & SWCR_TRAP_ENABLE_INV) {
11172                         si_code = TARGET_FPE_FLTINV;
11173                     }
11174 
11175                     info.si_signo = SIGFPE;
11176                     info.si_errno = 0;
11177                     info.si_code = si_code;
11178                     info._sifields._sigfault._addr
11179                         = ((CPUArchState *)cpu_env)->pc;
11180                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11181                                  QEMU_SI_FAULT, &info);
11182                 }
11183                 ret = 0;
11184             }
11185             break;
11186 
11187           /* case SSI_NVPAIRS:
11188              -- Used with SSIN_UACPROC to enable unaligned accesses.
11189              case SSI_IEEE_STATE_AT_SIGNAL:
11190              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11191              -- Not implemented in linux kernel
11192           */
11193         }
11194         return ret;
11195 #endif
11196 #ifdef TARGET_NR_osf_sigprocmask
11197     /* Alpha specific.  */
11198     case TARGET_NR_osf_sigprocmask:
11199         {
11200             abi_ulong mask;
11201             int how;
11202             sigset_t set, oldset;
11203 
11204             switch(arg1) {
11205             case TARGET_SIG_BLOCK:
11206                 how = SIG_BLOCK;
11207                 break;
11208             case TARGET_SIG_UNBLOCK:
11209                 how = SIG_UNBLOCK;
11210                 break;
11211             case TARGET_SIG_SETMASK:
11212                 how = SIG_SETMASK;
11213                 break;
11214             default:
11215                 return -TARGET_EINVAL;
11216             }
11217             mask = arg2;
11218             target_to_host_old_sigset(&set, &mask);
11219             ret = do_sigprocmask(how, &set, &oldset);
11220             if (!ret) {
11221                 host_to_target_old_sigset(&mask, &oldset);
11222                 ret = mask;
11223             }
11224         }
11225         return ret;
11226 #endif
11227 
11228 #ifdef TARGET_NR_getgid32
11229     case TARGET_NR_getgid32:
11230         return get_errno(getgid());
11231 #endif
11232 #ifdef TARGET_NR_geteuid32
11233     case TARGET_NR_geteuid32:
11234         return get_errno(geteuid());
11235 #endif
11236 #ifdef TARGET_NR_getegid32
11237     case TARGET_NR_getegid32:
11238         return get_errno(getegid());
11239 #endif
11240 #ifdef TARGET_NR_setreuid32
11241     case TARGET_NR_setreuid32:
11242         return get_errno(setreuid(arg1, arg2));
11243 #endif
11244 #ifdef TARGET_NR_setregid32
11245     case TARGET_NR_setregid32:
11246         return get_errno(setregid(arg1, arg2));
11247 #endif
11248 #ifdef TARGET_NR_getgroups32
11249     case TARGET_NR_getgroups32:
11250         {
11251             int gidsetsize = arg1;
11252             uint32_t *target_grouplist;
11253             gid_t *grouplist;
11254             int i;
11255 
11256             grouplist = alloca(gidsetsize * sizeof(gid_t));
11257             ret = get_errno(getgroups(gidsetsize, grouplist));
11258             if (gidsetsize == 0)
11259                 return ret;
11260             if (!is_error(ret)) {
11261                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11262                 if (!target_grouplist) {
11263                     return -TARGET_EFAULT;
11264                 }
11265                 for(i = 0;i < ret; i++)
11266                     target_grouplist[i] = tswap32(grouplist[i]);
11267                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11268             }
11269         }
11270         return ret;
11271 #endif
11272 #ifdef TARGET_NR_setgroups32
11273     case TARGET_NR_setgroups32:
11274         {
11275             int gidsetsize = arg1;
11276             uint32_t *target_grouplist;
11277             gid_t *grouplist;
11278             int i;
11279 
11280             grouplist = alloca(gidsetsize * sizeof(gid_t));
11281             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11282             if (!target_grouplist) {
11283                 return -TARGET_EFAULT;
11284             }
11285             for(i = 0;i < gidsetsize; i++)
11286                 grouplist[i] = tswap32(target_grouplist[i]);
11287             unlock_user(target_grouplist, arg2, 0);
11288             return get_errno(setgroups(gidsetsize, grouplist));
11289         }
11290 #endif
11291 #ifdef TARGET_NR_fchown32
11292     case TARGET_NR_fchown32:
11293         return get_errno(fchown(arg1, arg2, arg3));
11294 #endif
11295 #ifdef TARGET_NR_setresuid32
11296     case TARGET_NR_setresuid32:
11297         return get_errno(sys_setresuid(arg1, arg2, arg3));
11298 #endif
11299 #ifdef TARGET_NR_getresuid32
11300     case TARGET_NR_getresuid32:
11301         {
11302             uid_t ruid, euid, suid;
11303             ret = get_errno(getresuid(&ruid, &euid, &suid));
11304             if (!is_error(ret)) {
11305                 if (put_user_u32(ruid, arg1)
11306                     || put_user_u32(euid, arg2)
11307                     || put_user_u32(suid, arg3))
11308                     return -TARGET_EFAULT;
11309             }
11310         }
11311         return ret;
11312 #endif
11313 #ifdef TARGET_NR_setresgid32
11314     case TARGET_NR_setresgid32:
11315         return get_errno(sys_setresgid(arg1, arg2, arg3));
11316 #endif
11317 #ifdef TARGET_NR_getresgid32
11318     case TARGET_NR_getresgid32:
11319         {
11320             gid_t rgid, egid, sgid;
11321             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11322             if (!is_error(ret)) {
11323                 if (put_user_u32(rgid, arg1)
11324                     || put_user_u32(egid, arg2)
11325                     || put_user_u32(sgid, arg3))
11326                     return -TARGET_EFAULT;
11327             }
11328         }
11329         return ret;
11330 #endif
11331 #ifdef TARGET_NR_chown32
11332     case TARGET_NR_chown32:
11333         if (!(p = lock_user_string(arg1)))
11334             return -TARGET_EFAULT;
11335         ret = get_errno(chown(p, arg2, arg3));
11336         unlock_user(p, arg1, 0);
11337         return ret;
11338 #endif
11339 #ifdef TARGET_NR_setuid32
11340     case TARGET_NR_setuid32:
11341         return get_errno(sys_setuid(arg1));
11342 #endif
11343 #ifdef TARGET_NR_setgid32
11344     case TARGET_NR_setgid32:
11345         return get_errno(sys_setgid(arg1));
11346 #endif
11347 #ifdef TARGET_NR_setfsuid32
11348     case TARGET_NR_setfsuid32:
11349         return get_errno(setfsuid(arg1));
11350 #endif
11351 #ifdef TARGET_NR_setfsgid32
11352     case TARGET_NR_setfsgid32:
11353         return get_errno(setfsgid(arg1));
11354 #endif
11355 #ifdef TARGET_NR_mincore
11356     case TARGET_NR_mincore:
11357         {
11358             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11359             if (!a) {
11360                 return -TARGET_ENOMEM;
11361             }
11362             p = lock_user_string(arg3);
11363             if (!p) {
11364                 ret = -TARGET_EFAULT;
11365             } else {
11366                 ret = get_errno(mincore(a, arg2, p));
11367                 unlock_user(p, arg3, ret);
11368             }
11369             unlock_user(a, arg1, 0);
11370         }
11371         return ret;
11372 #endif
11373 #ifdef TARGET_NR_arm_fadvise64_64
11374     case TARGET_NR_arm_fadvise64_64:
11375         /* arm_fadvise64_64 looks like fadvise64_64 but
11376          * with different argument order: fd, advice, offset, len
11377          * rather than the usual fd, offset, len, advice.
11378          * Note that offset and len are both 64-bit so appear as
11379          * pairs of 32-bit registers.
11380          */
11381         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11382                             target_offset64(arg5, arg6), arg2);
11383         return -host_to_target_errno(ret);
11384 #endif
11385 
11386 #if TARGET_ABI_BITS == 32
11387 
11388 #ifdef TARGET_NR_fadvise64_64
11389     case TARGET_NR_fadvise64_64:
11390 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11391         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11392         ret = arg2;
11393         arg2 = arg3;
11394         arg3 = arg4;
11395         arg4 = arg5;
11396         arg5 = arg6;
11397         arg6 = ret;
11398 #else
11399         /* 6 args: fd, offset (high, low), len (high, low), advice */
11400         if (regpairs_aligned(cpu_env, num)) {
11401             /* offset is in (3,4), len in (5,6) and advice in 7 */
11402             arg2 = arg3;
11403             arg3 = arg4;
11404             arg4 = arg5;
11405             arg5 = arg6;
11406             arg6 = arg7;
11407         }
11408 #endif
11409         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11410                             target_offset64(arg4, arg5), arg6);
11411         return -host_to_target_errno(ret);
11412 #endif
11413 
11414 #ifdef TARGET_NR_fadvise64
11415     case TARGET_NR_fadvise64:
11416         /* 5 args: fd, offset (high, low), len, advice */
11417         if (regpairs_aligned(cpu_env, num)) {
11418             /* offset is in (3,4), len in 5 and advice in 6 */
11419             arg2 = arg3;
11420             arg3 = arg4;
11421             arg4 = arg5;
11422             arg5 = arg6;
11423         }
11424         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11425         return -host_to_target_errno(ret);
11426 #endif
11427 
11428 #else /* not a 32-bit ABI */
11429 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11430 #ifdef TARGET_NR_fadvise64_64
11431     case TARGET_NR_fadvise64_64:
11432 #endif
11433 #ifdef TARGET_NR_fadvise64
11434     case TARGET_NR_fadvise64:
11435 #endif
11436 #ifdef TARGET_S390X
11437         switch (arg4) {
11438         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11439         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11440         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11441         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11442         default: break;
11443         }
11444 #endif
11445         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11446 #endif
11447 #endif /* end of 64-bit ABI fadvise handling */
11448 
11449 #ifdef TARGET_NR_madvise
11450     case TARGET_NR_madvise:
11451         /* A straight passthrough may not be safe because qemu sometimes
11452            turns private file-backed mappings into anonymous mappings.
11453            This will break MADV_DONTNEED.
11454            This is a hint, so ignoring and returning success is ok.  */
11455         return 0;
11456 #endif
11457 #ifdef TARGET_NR_fcntl64
11458     case TARGET_NR_fcntl64:
11459     {
11460         int cmd;
11461         struct flock64 fl;
11462         from_flock64_fn *copyfrom = copy_from_user_flock64;
11463         to_flock64_fn *copyto = copy_to_user_flock64;
11464 
11465 #ifdef TARGET_ARM
11466         if (!((CPUARMState *)cpu_env)->eabi) {
11467             copyfrom = copy_from_user_oabi_flock64;
11468             copyto = copy_to_user_oabi_flock64;
11469         }
11470 #endif
11471 
11472         cmd = target_to_host_fcntl_cmd(arg2);
11473         if (cmd == -TARGET_EINVAL) {
11474             return cmd;
11475         }
11476 
11477         switch(arg2) {
11478         case TARGET_F_GETLK64:
11479             ret = copyfrom(&fl, arg3);
11480             if (ret) {
11481                 break;
11482             }
11483             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11484             if (ret == 0) {
11485                 ret = copyto(arg3, &fl);
11486             }
11487 	    break;
11488 
11489         case TARGET_F_SETLK64:
11490         case TARGET_F_SETLKW64:
11491             ret = copyfrom(&fl, arg3);
11492             if (ret) {
11493                 break;
11494             }
11495             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11496 	    break;
11497         default:
11498             ret = do_fcntl(arg1, arg2, arg3);
11499             break;
11500         }
11501         return ret;
11502     }
11503 #endif
11504 #ifdef TARGET_NR_cacheflush
11505     case TARGET_NR_cacheflush:
11506         /* self-modifying code is handled automatically, so nothing needed */
11507         return 0;
11508 #endif
11509 #ifdef TARGET_NR_getpagesize
11510     case TARGET_NR_getpagesize:
11511         return TARGET_PAGE_SIZE;
11512 #endif
11513     case TARGET_NR_gettid:
11514         return get_errno(sys_gettid());
11515 #ifdef TARGET_NR_readahead
11516     case TARGET_NR_readahead:
11517 #if TARGET_ABI_BITS == 32
11518         if (regpairs_aligned(cpu_env, num)) {
11519             arg2 = arg3;
11520             arg3 = arg4;
11521             arg4 = arg5;
11522         }
11523         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11524 #else
11525         ret = get_errno(readahead(arg1, arg2, arg3));
11526 #endif
11527         return ret;
11528 #endif
11529 #ifdef CONFIG_ATTR
11530 #ifdef TARGET_NR_setxattr
11531     case TARGET_NR_listxattr:
11532     case TARGET_NR_llistxattr:
11533     {
11534         void *p, *b = 0;
11535         if (arg2) {
11536             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11537             if (!b) {
11538                 return -TARGET_EFAULT;
11539             }
11540         }
11541         p = lock_user_string(arg1);
11542         if (p) {
11543             if (num == TARGET_NR_listxattr) {
11544                 ret = get_errno(listxattr(p, b, arg3));
11545             } else {
11546                 ret = get_errno(llistxattr(p, b, arg3));
11547             }
11548         } else {
11549             ret = -TARGET_EFAULT;
11550         }
11551         unlock_user(p, arg1, 0);
11552         unlock_user(b, arg2, arg3);
11553         return ret;
11554     }
11555     case TARGET_NR_flistxattr:
11556     {
11557         void *b = 0;
11558         if (arg2) {
11559             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11560             if (!b) {
11561                 return -TARGET_EFAULT;
11562             }
11563         }
11564         ret = get_errno(flistxattr(arg1, b, arg3));
11565         unlock_user(b, arg2, arg3);
11566         return ret;
11567     }
11568     case TARGET_NR_setxattr:
11569     case TARGET_NR_lsetxattr:
11570         {
11571             void *p, *n, *v = 0;
11572             if (arg3) {
11573                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11574                 if (!v) {
11575                     return -TARGET_EFAULT;
11576                 }
11577             }
11578             p = lock_user_string(arg1);
11579             n = lock_user_string(arg2);
11580             if (p && n) {
11581                 if (num == TARGET_NR_setxattr) {
11582                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11583                 } else {
11584                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11585                 }
11586             } else {
11587                 ret = -TARGET_EFAULT;
11588             }
11589             unlock_user(p, arg1, 0);
11590             unlock_user(n, arg2, 0);
11591             unlock_user(v, arg3, 0);
11592         }
11593         return ret;
11594     case TARGET_NR_fsetxattr:
11595         {
11596             void *n, *v = 0;
11597             if (arg3) {
11598                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11599                 if (!v) {
11600                     return -TARGET_EFAULT;
11601                 }
11602             }
11603             n = lock_user_string(arg2);
11604             if (n) {
11605                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11606             } else {
11607                 ret = -TARGET_EFAULT;
11608             }
11609             unlock_user(n, arg2, 0);
11610             unlock_user(v, arg3, 0);
11611         }
11612         return ret;
11613     case TARGET_NR_getxattr:
11614     case TARGET_NR_lgetxattr:
11615         {
11616             void *p, *n, *v = 0;
11617             if (arg3) {
11618                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11619                 if (!v) {
11620                     return -TARGET_EFAULT;
11621                 }
11622             }
11623             p = lock_user_string(arg1);
11624             n = lock_user_string(arg2);
11625             if (p && n) {
11626                 if (num == TARGET_NR_getxattr) {
11627                     ret = get_errno(getxattr(p, n, v, arg4));
11628                 } else {
11629                     ret = get_errno(lgetxattr(p, n, v, arg4));
11630                 }
11631             } else {
11632                 ret = -TARGET_EFAULT;
11633             }
11634             unlock_user(p, arg1, 0);
11635             unlock_user(n, arg2, 0);
11636             unlock_user(v, arg3, arg4);
11637         }
11638         return ret;
11639     case TARGET_NR_fgetxattr:
11640         {
11641             void *n, *v = 0;
11642             if (arg3) {
11643                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11644                 if (!v) {
11645                     return -TARGET_EFAULT;
11646                 }
11647             }
11648             n = lock_user_string(arg2);
11649             if (n) {
11650                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11651             } else {
11652                 ret = -TARGET_EFAULT;
11653             }
11654             unlock_user(n, arg2, 0);
11655             unlock_user(v, arg3, arg4);
11656         }
11657         return ret;
11658     case TARGET_NR_removexattr:
11659     case TARGET_NR_lremovexattr:
11660         {
11661             void *p, *n;
11662             p = lock_user_string(arg1);
11663             n = lock_user_string(arg2);
11664             if (p && n) {
11665                 if (num == TARGET_NR_removexattr) {
11666                     ret = get_errno(removexattr(p, n));
11667                 } else {
11668                     ret = get_errno(lremovexattr(p, n));
11669                 }
11670             } else {
11671                 ret = -TARGET_EFAULT;
11672             }
11673             unlock_user(p, arg1, 0);
11674             unlock_user(n, arg2, 0);
11675         }
11676         return ret;
11677     case TARGET_NR_fremovexattr:
11678         {
11679             void *n;
11680             n = lock_user_string(arg2);
11681             if (n) {
11682                 ret = get_errno(fremovexattr(arg1, n));
11683             } else {
11684                 ret = -TARGET_EFAULT;
11685             }
11686             unlock_user(n, arg2, 0);
11687         }
11688         return ret;
11689 #endif
11690 #endif /* CONFIG_ATTR */
11691 #ifdef TARGET_NR_set_thread_area
11692     case TARGET_NR_set_thread_area:
11693 #if defined(TARGET_MIPS)
11694       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11695       return 0;
11696 #elif defined(TARGET_CRIS)
11697       if (arg1 & 0xff)
11698           ret = -TARGET_EINVAL;
11699       else {
11700           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11701           ret = 0;
11702       }
11703       return ret;
11704 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11705       return do_set_thread_area(cpu_env, arg1);
11706 #elif defined(TARGET_M68K)
11707       {
11708           TaskState *ts = cpu->opaque;
11709           ts->tp_value = arg1;
11710           return 0;
11711       }
11712 #else
11713       return -TARGET_ENOSYS;
11714 #endif
11715 #endif
11716 #ifdef TARGET_NR_get_thread_area
11717     case TARGET_NR_get_thread_area:
11718 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11719         return do_get_thread_area(cpu_env, arg1);
11720 #elif defined(TARGET_M68K)
11721         {
11722             TaskState *ts = cpu->opaque;
11723             return ts->tp_value;
11724         }
11725 #else
11726         return -TARGET_ENOSYS;
11727 #endif
11728 #endif
11729 #ifdef TARGET_NR_getdomainname
11730     case TARGET_NR_getdomainname:
11731         return -TARGET_ENOSYS;
11732 #endif
11733 
11734 #ifdef TARGET_NR_clock_settime
11735     case TARGET_NR_clock_settime:
11736     {
11737         struct timespec ts;
11738 
11739         ret = target_to_host_timespec(&ts, arg2);
11740         if (!is_error(ret)) {
11741             ret = get_errno(clock_settime(arg1, &ts));
11742         }
11743         return ret;
11744     }
11745 #endif
11746 #ifdef TARGET_NR_clock_settime64
11747     case TARGET_NR_clock_settime64:
11748     {
11749         struct timespec ts;
11750 
11751         ret = target_to_host_timespec64(&ts, arg2);
11752         if (!is_error(ret)) {
11753             ret = get_errno(clock_settime(arg1, &ts));
11754         }
11755         return ret;
11756     }
11757 #endif
11758 #ifdef TARGET_NR_clock_gettime
11759     case TARGET_NR_clock_gettime:
11760     {
11761         struct timespec ts;
11762         ret = get_errno(clock_gettime(arg1, &ts));
11763         if (!is_error(ret)) {
11764             ret = host_to_target_timespec(arg2, &ts);
11765         }
11766         return ret;
11767     }
11768 #endif
11769 #ifdef TARGET_NR_clock_gettime64
11770     case TARGET_NR_clock_gettime64:
11771     {
11772         struct timespec ts;
11773         ret = get_errno(clock_gettime(arg1, &ts));
11774         if (!is_error(ret)) {
11775             ret = host_to_target_timespec64(arg2, &ts);
11776         }
11777         return ret;
11778     }
11779 #endif
11780 #ifdef TARGET_NR_clock_getres
11781     case TARGET_NR_clock_getres:
11782     {
11783         struct timespec ts;
11784         ret = get_errno(clock_getres(arg1, &ts));
11785         if (!is_error(ret)) {
11786             host_to_target_timespec(arg2, &ts);
11787         }
11788         return ret;
11789     }
11790 #endif
11791 #ifdef TARGET_NR_clock_nanosleep
11792     case TARGET_NR_clock_nanosleep:
11793     {
11794         struct timespec ts;
11795         target_to_host_timespec(&ts, arg3);
11796         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11797                                              &ts, arg4 ? &ts : NULL));
11798         if (arg4)
11799             host_to_target_timespec(arg4, &ts);
11800 
11801 #if defined(TARGET_PPC)
11802         /* clock_nanosleep is odd in that it returns positive errno values.
11803          * On PPC, CR0 bit 3 should be set in such a situation. */
11804         if (ret && ret != -TARGET_ERESTARTSYS) {
11805             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11806         }
11807 #endif
11808         return ret;
11809     }
11810 #endif
11811 
11812 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11813     case TARGET_NR_set_tid_address:
11814         return get_errno(set_tid_address((int *)g2h(arg1)));
11815 #endif
11816 
11817     case TARGET_NR_tkill:
11818         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11819 
11820     case TARGET_NR_tgkill:
11821         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11822                          target_to_host_signal(arg3)));
11823 
11824 #ifdef TARGET_NR_set_robust_list
11825     case TARGET_NR_set_robust_list:
11826     case TARGET_NR_get_robust_list:
11827         /* The ABI for supporting robust futexes has userspace pass
11828          * the kernel a pointer to a linked list which is updated by
11829          * userspace after the syscall; the list is walked by the kernel
11830          * when the thread exits. Since the linked list in QEMU guest
11831          * memory isn't a valid linked list for the host and we have
11832          * no way to reliably intercept the thread-death event, we can't
11833          * support these. Silently return ENOSYS so that guest userspace
11834          * falls back to a non-robust futex implementation (which should
11835          * be OK except in the corner case of the guest crashing while
11836          * holding a mutex that is shared with another process via
11837          * shared memory).
11838          */
11839         return -TARGET_ENOSYS;
11840 #endif
11841 
11842 #if defined(TARGET_NR_utimensat)
11843     case TARGET_NR_utimensat:
11844         {
11845             struct timespec *tsp, ts[2];
11846             if (!arg3) {
11847                 tsp = NULL;
11848             } else {
11849                 target_to_host_timespec(ts, arg3);
11850                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11851                 tsp = ts;
11852             }
11853             if (!arg2)
11854                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11855             else {
11856                 if (!(p = lock_user_string(arg2))) {
11857                     return -TARGET_EFAULT;
11858                 }
11859                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11860                 unlock_user(p, arg2, 0);
11861             }
11862         }
11863         return ret;
11864 #endif
11865 #ifdef TARGET_NR_futex
11866     case TARGET_NR_futex:
11867         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11868 #endif
11869 #ifdef TARGET_NR_futex_time64
11870     case TARGET_NR_futex_time64:
11871         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11872 #endif
11873 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11874     case TARGET_NR_inotify_init:
11875         ret = get_errno(sys_inotify_init());
11876         if (ret >= 0) {
11877             fd_trans_register(ret, &target_inotify_trans);
11878         }
11879         return ret;
11880 #endif
11881 #ifdef CONFIG_INOTIFY1
11882 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11883     case TARGET_NR_inotify_init1:
11884         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11885                                           fcntl_flags_tbl)));
11886         if (ret >= 0) {
11887             fd_trans_register(ret, &target_inotify_trans);
11888         }
11889         return ret;
11890 #endif
11891 #endif
11892 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11893     case TARGET_NR_inotify_add_watch:
11894         p = lock_user_string(arg2);
11895         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11896         unlock_user(p, arg2, 0);
11897         return ret;
11898 #endif
11899 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11900     case TARGET_NR_inotify_rm_watch:
11901         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11902 #endif
11903 
11904 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11905     case TARGET_NR_mq_open:
11906         {
11907             struct mq_attr posix_mq_attr;
11908             struct mq_attr *pposix_mq_attr;
11909             int host_flags;
11910 
11911             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11912             pposix_mq_attr = NULL;
11913             if (arg4) {
11914                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11915                     return -TARGET_EFAULT;
11916                 }
11917                 pposix_mq_attr = &posix_mq_attr;
11918             }
11919             p = lock_user_string(arg1 - 1);
11920             if (!p) {
11921                 return -TARGET_EFAULT;
11922             }
11923             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11924             unlock_user (p, arg1, 0);
11925         }
11926         return ret;
11927 
11928     case TARGET_NR_mq_unlink:
11929         p = lock_user_string(arg1 - 1);
11930         if (!p) {
11931             return -TARGET_EFAULT;
11932         }
11933         ret = get_errno(mq_unlink(p));
11934         unlock_user (p, arg1, 0);
11935         return ret;
11936 
11937 #ifdef TARGET_NR_mq_timedsend
11938     case TARGET_NR_mq_timedsend:
11939         {
11940             struct timespec ts;
11941 
11942             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11943             if (arg5 != 0) {
11944                 target_to_host_timespec(&ts, arg5);
11945                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11946                 host_to_target_timespec(arg5, &ts);
11947             } else {
11948                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11949             }
11950             unlock_user (p, arg2, arg3);
11951         }
11952         return ret;
11953 #endif
11954 
11955 #ifdef TARGET_NR_mq_timedreceive
11956     case TARGET_NR_mq_timedreceive:
11957         {
11958             struct timespec ts;
11959             unsigned int prio;
11960 
11961             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11962             if (arg5 != 0) {
11963                 target_to_host_timespec(&ts, arg5);
11964                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11965                                                      &prio, &ts));
11966                 host_to_target_timespec(arg5, &ts);
11967             } else {
11968                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11969                                                      &prio, NULL));
11970             }
11971             unlock_user (p, arg2, arg3);
11972             if (arg4 != 0)
11973                 put_user_u32(prio, arg4);
11974         }
11975         return ret;
11976 #endif
11977 
11978     /* Not implemented for now... */
11979 /*     case TARGET_NR_mq_notify: */
11980 /*         break; */
11981 
11982     case TARGET_NR_mq_getsetattr:
11983         {
11984             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11985             ret = 0;
11986             if (arg2 != 0) {
11987                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11988                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11989                                            &posix_mq_attr_out));
11990             } else if (arg3 != 0) {
11991                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11992             }
11993             if (ret == 0 && arg3 != 0) {
11994                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11995             }
11996         }
11997         return ret;
11998 #endif
11999 
12000 #ifdef CONFIG_SPLICE
12001 #ifdef TARGET_NR_tee
12002     case TARGET_NR_tee:
12003         {
12004             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12005         }
12006         return ret;
12007 #endif
12008 #ifdef TARGET_NR_splice
12009     case TARGET_NR_splice:
12010         {
12011             loff_t loff_in, loff_out;
12012             loff_t *ploff_in = NULL, *ploff_out = NULL;
12013             if (arg2) {
12014                 if (get_user_u64(loff_in, arg2)) {
12015                     return -TARGET_EFAULT;
12016                 }
12017                 ploff_in = &loff_in;
12018             }
12019             if (arg4) {
12020                 if (get_user_u64(loff_out, arg4)) {
12021                     return -TARGET_EFAULT;
12022                 }
12023                 ploff_out = &loff_out;
12024             }
12025             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12026             if (arg2) {
12027                 if (put_user_u64(loff_in, arg2)) {
12028                     return -TARGET_EFAULT;
12029                 }
12030             }
12031             if (arg4) {
12032                 if (put_user_u64(loff_out, arg4)) {
12033                     return -TARGET_EFAULT;
12034                 }
12035             }
12036         }
12037         return ret;
12038 #endif
12039 #ifdef TARGET_NR_vmsplice
12040 	case TARGET_NR_vmsplice:
12041         {
12042             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12043             if (vec != NULL) {
12044                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12045                 unlock_iovec(vec, arg2, arg3, 0);
12046             } else {
12047                 ret = -host_to_target_errno(errno);
12048             }
12049         }
12050         return ret;
12051 #endif
12052 #endif /* CONFIG_SPLICE */
12053 #ifdef CONFIG_EVENTFD
12054 #if defined(TARGET_NR_eventfd)
12055     case TARGET_NR_eventfd:
12056         ret = get_errno(eventfd(arg1, 0));
12057         if (ret >= 0) {
12058             fd_trans_register(ret, &target_eventfd_trans);
12059         }
12060         return ret;
12061 #endif
12062 #if defined(TARGET_NR_eventfd2)
12063     case TARGET_NR_eventfd2:
12064     {
12065         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12066         if (arg2 & TARGET_O_NONBLOCK) {
12067             host_flags |= O_NONBLOCK;
12068         }
12069         if (arg2 & TARGET_O_CLOEXEC) {
12070             host_flags |= O_CLOEXEC;
12071         }
12072         ret = get_errno(eventfd(arg1, host_flags));
12073         if (ret >= 0) {
12074             fd_trans_register(ret, &target_eventfd_trans);
12075         }
12076         return ret;
12077     }
12078 #endif
12079 #endif /* CONFIG_EVENTFD  */
12080 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12081     case TARGET_NR_fallocate:
12082 #if TARGET_ABI_BITS == 32
12083         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12084                                   target_offset64(arg5, arg6)));
12085 #else
12086         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12087 #endif
12088         return ret;
12089 #endif
12090 #if defined(CONFIG_SYNC_FILE_RANGE)
12091 #if defined(TARGET_NR_sync_file_range)
12092     case TARGET_NR_sync_file_range:
12093 #if TARGET_ABI_BITS == 32
12094 #if defined(TARGET_MIPS)
12095         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12096                                         target_offset64(arg5, arg6), arg7));
12097 #else
12098         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12099                                         target_offset64(arg4, arg5), arg6));
12100 #endif /* !TARGET_MIPS */
12101 #else
12102         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12103 #endif
12104         return ret;
12105 #endif
12106 #if defined(TARGET_NR_sync_file_range2) || \
12107     defined(TARGET_NR_arm_sync_file_range)
12108 #if defined(TARGET_NR_sync_file_range2)
12109     case TARGET_NR_sync_file_range2:
12110 #endif
12111 #if defined(TARGET_NR_arm_sync_file_range)
12112     case TARGET_NR_arm_sync_file_range:
12113 #endif
12114         /* This is like sync_file_range but the arguments are reordered */
12115 #if TARGET_ABI_BITS == 32
12116         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12117                                         target_offset64(arg5, arg6), arg2));
12118 #else
12119         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12120 #endif
12121         return ret;
12122 #endif
12123 #endif
12124 #if defined(TARGET_NR_signalfd4)
12125     case TARGET_NR_signalfd4:
12126         return do_signalfd4(arg1, arg2, arg4);
12127 #endif
12128 #if defined(TARGET_NR_signalfd)
12129     case TARGET_NR_signalfd:
12130         return do_signalfd4(arg1, arg2, 0);
12131 #endif
12132 #if defined(CONFIG_EPOLL)
12133 #if defined(TARGET_NR_epoll_create)
12134     case TARGET_NR_epoll_create:
12135         return get_errno(epoll_create(arg1));
12136 #endif
12137 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12138     case TARGET_NR_epoll_create1:
12139         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12140 #endif
12141 #if defined(TARGET_NR_epoll_ctl)
12142     case TARGET_NR_epoll_ctl:
12143     {
12144         struct epoll_event ep;
12145         struct epoll_event *epp = 0;
12146         if (arg4) {
12147             struct target_epoll_event *target_ep;
12148             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12149                 return -TARGET_EFAULT;
12150             }
12151             ep.events = tswap32(target_ep->events);
12152             /* The epoll_data_t union is just opaque data to the kernel,
12153              * so we transfer all 64 bits across and need not worry what
12154              * actual data type it is.
12155              */
12156             ep.data.u64 = tswap64(target_ep->data.u64);
12157             unlock_user_struct(target_ep, arg4, 0);
12158             epp = &ep;
12159         }
12160         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12161     }
12162 #endif
12163 
12164 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12165 #if defined(TARGET_NR_epoll_wait)
12166     case TARGET_NR_epoll_wait:
12167 #endif
12168 #if defined(TARGET_NR_epoll_pwait)
12169     case TARGET_NR_epoll_pwait:
12170 #endif
12171     {
12172         struct target_epoll_event *target_ep;
12173         struct epoll_event *ep;
12174         int epfd = arg1;
12175         int maxevents = arg3;
12176         int timeout = arg4;
12177 
12178         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12179             return -TARGET_EINVAL;
12180         }
12181 
12182         target_ep = lock_user(VERIFY_WRITE, arg2,
12183                               maxevents * sizeof(struct target_epoll_event), 1);
12184         if (!target_ep) {
12185             return -TARGET_EFAULT;
12186         }
12187 
12188         ep = g_try_new(struct epoll_event, maxevents);
12189         if (!ep) {
12190             unlock_user(target_ep, arg2, 0);
12191             return -TARGET_ENOMEM;
12192         }
12193 
12194         switch (num) {
12195 #if defined(TARGET_NR_epoll_pwait)
12196         case TARGET_NR_epoll_pwait:
12197         {
12198             target_sigset_t *target_set;
12199             sigset_t _set, *set = &_set;
12200 
12201             if (arg5) {
12202                 if (arg6 != sizeof(target_sigset_t)) {
12203                     ret = -TARGET_EINVAL;
12204                     break;
12205                 }
12206 
12207                 target_set = lock_user(VERIFY_READ, arg5,
12208                                        sizeof(target_sigset_t), 1);
12209                 if (!target_set) {
12210                     ret = -TARGET_EFAULT;
12211                     break;
12212                 }
12213                 target_to_host_sigset(set, target_set);
12214                 unlock_user(target_set, arg5, 0);
12215             } else {
12216                 set = NULL;
12217             }
12218 
12219             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12220                                              set, SIGSET_T_SIZE));
12221             break;
12222         }
12223 #endif
12224 #if defined(TARGET_NR_epoll_wait)
12225         case TARGET_NR_epoll_wait:
12226             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12227                                              NULL, 0));
12228             break;
12229 #endif
12230         default:
12231             ret = -TARGET_ENOSYS;
12232         }
12233         if (!is_error(ret)) {
12234             int i;
12235             for (i = 0; i < ret; i++) {
12236                 target_ep[i].events = tswap32(ep[i].events);
12237                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12238             }
12239             unlock_user(target_ep, arg2,
12240                         ret * sizeof(struct target_epoll_event));
12241         } else {
12242             unlock_user(target_ep, arg2, 0);
12243         }
12244         g_free(ep);
12245         return ret;
12246     }
12247 #endif
12248 #endif
12249 #ifdef TARGET_NR_prlimit64
12250     case TARGET_NR_prlimit64:
12251     {
12252         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12253         struct target_rlimit64 *target_rnew, *target_rold;
12254         struct host_rlimit64 rnew, rold, *rnewp = 0;
12255         int resource = target_to_host_resource(arg2);
12256 
12257         if (arg3 && (resource != RLIMIT_AS &&
12258                      resource != RLIMIT_DATA &&
12259                      resource != RLIMIT_STACK)) {
12260             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12261                 return -TARGET_EFAULT;
12262             }
12263             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12264             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12265             unlock_user_struct(target_rnew, arg3, 0);
12266             rnewp = &rnew;
12267         }
12268 
12269         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12270         if (!is_error(ret) && arg4) {
12271             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12272                 return -TARGET_EFAULT;
12273             }
12274             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12275             target_rold->rlim_max = tswap64(rold.rlim_max);
12276             unlock_user_struct(target_rold, arg4, 1);
12277         }
12278         return ret;
12279     }
12280 #endif
12281 #ifdef TARGET_NR_gethostname
12282     case TARGET_NR_gethostname:
12283     {
12284         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12285         if (name) {
12286             ret = get_errno(gethostname(name, arg2));
12287             unlock_user(name, arg1, arg2);
12288         } else {
12289             ret = -TARGET_EFAULT;
12290         }
12291         return ret;
12292     }
12293 #endif
12294 #ifdef TARGET_NR_atomic_cmpxchg_32
12295     case TARGET_NR_atomic_cmpxchg_32:
12296     {
12297         /* should use start_exclusive from main.c */
12298         abi_ulong mem_value;
12299         if (get_user_u32(mem_value, arg6)) {
12300             target_siginfo_t info;
12301             info.si_signo = SIGSEGV;
12302             info.si_errno = 0;
12303             info.si_code = TARGET_SEGV_MAPERR;
12304             info._sifields._sigfault._addr = arg6;
12305             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12306                          QEMU_SI_FAULT, &info);
12307             ret = 0xdeadbeef;
12308 
12309         }
12310         if (mem_value == arg2)
12311             put_user_u32(arg1, arg6);
12312         return mem_value;
12313     }
12314 #endif
12315 #ifdef TARGET_NR_atomic_barrier
12316     case TARGET_NR_atomic_barrier:
12317         /* Like the kernel implementation and the
12318            qemu arm barrier, no-op this? */
12319         return 0;
12320 #endif
12321 
12322 #ifdef TARGET_NR_timer_create
12323     case TARGET_NR_timer_create:
12324     {
12325         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12326 
12327         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12328 
12329         int clkid = arg1;
12330         int timer_index = next_free_host_timer();
12331 
12332         if (timer_index < 0) {
12333             ret = -TARGET_EAGAIN;
12334         } else {
12335             timer_t *phtimer = g_posix_timers  + timer_index;
12336 
12337             if (arg2) {
12338                 phost_sevp = &host_sevp;
12339                 ret = target_to_host_sigevent(phost_sevp, arg2);
12340                 if (ret != 0) {
12341                     return ret;
12342                 }
12343             }
12344 
12345             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12346             if (ret) {
12347                 phtimer = NULL;
12348             } else {
12349                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12350                     return -TARGET_EFAULT;
12351                 }
12352             }
12353         }
12354         return ret;
12355     }
12356 #endif
12357 
12358 #ifdef TARGET_NR_timer_settime
12359     case TARGET_NR_timer_settime:
12360     {
12361         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12362          * struct itimerspec * old_value */
12363         target_timer_t timerid = get_timer_id(arg1);
12364 
12365         if (timerid < 0) {
12366             ret = timerid;
12367         } else if (arg3 == 0) {
12368             ret = -TARGET_EINVAL;
12369         } else {
12370             timer_t htimer = g_posix_timers[timerid];
12371             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12372 
12373             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12374                 return -TARGET_EFAULT;
12375             }
12376             ret = get_errno(
12377                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12378             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12379                 return -TARGET_EFAULT;
12380             }
12381         }
12382         return ret;
12383     }
12384 #endif
12385 
12386 #ifdef TARGET_NR_timer_gettime
12387     case TARGET_NR_timer_gettime:
12388     {
12389         /* args: timer_t timerid, struct itimerspec *curr_value */
12390         target_timer_t timerid = get_timer_id(arg1);
12391 
12392         if (timerid < 0) {
12393             ret = timerid;
12394         } else if (!arg2) {
12395             ret = -TARGET_EFAULT;
12396         } else {
12397             timer_t htimer = g_posix_timers[timerid];
12398             struct itimerspec hspec;
12399             ret = get_errno(timer_gettime(htimer, &hspec));
12400 
12401             if (host_to_target_itimerspec(arg2, &hspec)) {
12402                 ret = -TARGET_EFAULT;
12403             }
12404         }
12405         return ret;
12406     }
12407 #endif
12408 
12409 #ifdef TARGET_NR_timer_getoverrun
12410     case TARGET_NR_timer_getoverrun:
12411     {
12412         /* args: timer_t timerid */
12413         target_timer_t timerid = get_timer_id(arg1);
12414 
12415         if (timerid < 0) {
12416             ret = timerid;
12417         } else {
12418             timer_t htimer = g_posix_timers[timerid];
12419             ret = get_errno(timer_getoverrun(htimer));
12420         }
12421         return ret;
12422     }
12423 #endif
12424 
12425 #ifdef TARGET_NR_timer_delete
12426     case TARGET_NR_timer_delete:
12427     {
12428         /* args: timer_t timerid */
12429         target_timer_t timerid = get_timer_id(arg1);
12430 
12431         if (timerid < 0) {
12432             ret = timerid;
12433         } else {
12434             timer_t htimer = g_posix_timers[timerid];
12435             ret = get_errno(timer_delete(htimer));
12436             g_posix_timers[timerid] = 0;
12437         }
12438         return ret;
12439     }
12440 #endif
12441 
12442 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12443     case TARGET_NR_timerfd_create:
12444         return get_errno(timerfd_create(arg1,
12445                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12446 #endif
12447 
12448 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12449     case TARGET_NR_timerfd_gettime:
12450         {
12451             struct itimerspec its_curr;
12452 
12453             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12454 
12455             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12456                 return -TARGET_EFAULT;
12457             }
12458         }
12459         return ret;
12460 #endif
12461 
12462 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12463     case TARGET_NR_timerfd_settime:
12464         {
12465             struct itimerspec its_new, its_old, *p_new;
12466 
12467             if (arg3) {
12468                 if (target_to_host_itimerspec(&its_new, arg3)) {
12469                     return -TARGET_EFAULT;
12470                 }
12471                 p_new = &its_new;
12472             } else {
12473                 p_new = NULL;
12474             }
12475 
12476             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12477 
12478             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12479                 return -TARGET_EFAULT;
12480             }
12481         }
12482         return ret;
12483 #endif
12484 
12485 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12486     case TARGET_NR_ioprio_get:
12487         return get_errno(ioprio_get(arg1, arg2));
12488 #endif
12489 
12490 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12491     case TARGET_NR_ioprio_set:
12492         return get_errno(ioprio_set(arg1, arg2, arg3));
12493 #endif
12494 
12495 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12496     case TARGET_NR_setns:
12497         return get_errno(setns(arg1, arg2));
12498 #endif
12499 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12500     case TARGET_NR_unshare:
12501         return get_errno(unshare(arg1));
12502 #endif
12503 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12504     case TARGET_NR_kcmp:
12505         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12506 #endif
12507 #ifdef TARGET_NR_swapcontext
12508     case TARGET_NR_swapcontext:
12509         /* PowerPC specific.  */
12510         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12511 #endif
12512 #ifdef TARGET_NR_memfd_create
12513     case TARGET_NR_memfd_create:
12514         p = lock_user_string(arg1);
12515         if (!p) {
12516             return -TARGET_EFAULT;
12517         }
12518         ret = get_errno(memfd_create(p, arg2));
12519         fd_trans_unregister(ret);
12520         unlock_user(p, arg1, 0);
12521         return ret;
12522 #endif
12523 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12524     case TARGET_NR_membarrier:
12525         return get_errno(membarrier(arg1, arg2));
12526 #endif
12527 
12528     default:
12529         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12530         return -TARGET_ENOSYS;
12531     }
12532     return ret;
12533 }
12534 
12535 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12536                     abi_long arg2, abi_long arg3, abi_long arg4,
12537                     abi_long arg5, abi_long arg6, abi_long arg7,
12538                     abi_long arg8)
12539 {
12540     CPUState *cpu = env_cpu(cpu_env);
12541     abi_long ret;
12542 
12543 #ifdef DEBUG_ERESTARTSYS
12544     /* Debug-only code for exercising the syscall-restart code paths
12545      * in the per-architecture cpu main loops: restart every syscall
12546      * the guest makes once before letting it through.
12547      */
12548     {
12549         static bool flag;
12550         flag = !flag;
12551         if (flag) {
12552             return -TARGET_ERESTARTSYS;
12553         }
12554     }
12555 #endif
12556 
12557     record_syscall_start(cpu, num, arg1,
12558                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12559 
12560     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12561         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12562     }
12563 
12564     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12565                       arg5, arg6, arg7, arg8);
12566 
12567     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12568         print_syscall_ret(num, ret);
12569     }
12570 
12571     record_syscall_return(cpu, num, ret);
12572     return ret;
12573 }
12574